mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
improve
This commit is contained in:
@@ -27,9 +27,17 @@ def run_chat():
|
||||
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||
system_message = build_system_message(crew_chat_inputs)
|
||||
|
||||
# Call the LLM to generate the introductory message
|
||||
introductory_message = chat_llm.call(
|
||||
messages=[{"role": "system", "content": system_message}]
|
||||
)
|
||||
click.secho(f"\nAssistant: {introductory_message}\n", fg="green")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "assistant", "content": introductory_message},
|
||||
]
|
||||
|
||||
available_functions = {
|
||||
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
||||
}
|
||||
@@ -77,6 +85,8 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
||||
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
||||
"If you are ever unsure about a user's request or need clarification, ask the user for more information."
|
||||
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
||||
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
||||
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
||||
f"\nCrew Description: {crew_chat_inputs.crew_description}"
|
||||
)
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
import json
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.types.crew_chat import ChatInputs
|
||||
|
||||
|
||||
def fetch_crew_inputs() -> Optional[ChatInputs]:
|
||||
"""
|
||||
Fetch the crew's ChatInputs (a structure containing crew_description and input fields)
|
||||
by running "uv run fetch_chat_inputs", which prints JSON representing a ChatInputs object.
|
||||
|
||||
This function will parse that JSON and return a ChatInputs instance.
|
||||
If the output is empty or invalid, an empty ChatInputs object is returned.
|
||||
"""
|
||||
|
||||
command = ["uv", "run", "fetch_chat_inputs"]
|
||||
crewai_version = get_crewai_version()
|
||||
min_required_version = "0.87.0"
|
||||
|
||||
pyproject_data = read_toml()
|
||||
crew_name = pyproject_data.get("project", {}).get("name", None)
|
||||
|
||||
# If you're on an older poetry-based setup and version < min_required_version
|
||||
if pyproject_data.get("tool", {}).get("poetry") and (
|
||||
version.parse(crewai_version) < version.parse(min_required_version)
|
||||
):
|
||||
click.secho(
|
||||
f"You are running an older version of crewAI ({crewai_version}) that uses poetry pyproject.toml.\n"
|
||||
f"Please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="red",
|
||||
)
|
||||
|
||||
try:
|
||||
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
||||
stdout_lines = result.stdout.strip().splitlines()
|
||||
|
||||
# Find the line that contains the JSON data
|
||||
json_line = next(
|
||||
(
|
||||
line
|
||||
for line in stdout_lines
|
||||
if line.startswith("{") and line.endswith("}")
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if not json_line:
|
||||
click.echo(
|
||||
"No valid JSON output received from `fetch_chat_inputs` command.",
|
||||
err=True,
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
raw_data = json.loads(json_line)
|
||||
chat_inputs = ChatInputs(**raw_data)
|
||||
if crew_name:
|
||||
chat_inputs.crew_name = crew_name
|
||||
return chat_inputs
|
||||
except json.JSONDecodeError as e:
|
||||
click.echo(
|
||||
f"Unable to parse JSON from `fetch_chat_inputs` output: {e}\nOutput: {repr(json_line)}",
|
||||
err=True,
|
||||
)
|
||||
return None
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while fetching chat inputs: {e}", err=True)
|
||||
click.echo(e.output, err=True, nl=True)
|
||||
|
||||
if pyproject_data.get("tool", {}).get("poetry"):
|
||||
click.secho(
|
||||
"It's possible that you are using an old version of crewAI that uses poetry.\n"
|
||||
"Please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="yellow",
|
||||
)
|
||||
except Exception as e:
|
||||
click.echo(f"An unexpected error occurred: {e}", err=True)
|
||||
|
||||
return None
|
||||
@@ -1,10 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import json
|
||||
import warnings
|
||||
|
||||
from {{folder_name}}.crew import {{crew_name}}
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
@@ -15,13 +13,10 @@ warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
def run():
|
||||
"""
|
||||
Run the crew, allowing CLI overrides for required inputs.
|
||||
Usage example:
|
||||
uv run run_crew -- --topic="New Topic" --some_other_field="Value"
|
||||
Run the crew.
|
||||
"""
|
||||
inputs = {
|
||||
'topic': 'AI LLMs'
|
||||
# Add any other default fields here
|
||||
}
|
||||
|
||||
try:
|
||||
|
||||
@@ -209,10 +209,6 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
chat_inputs: Optional[ChatInputs] = Field(
|
||||
default=None,
|
||||
description="Holds descriptions of the crew as well as named inputs for chat usage.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@@ -146,7 +146,6 @@ class LLM:
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
|
||||
# For safety, we disable passing init params to next calls
|
||||
litellm.drop_params = True
|
||||
|
||||
self.set_callbacks(callbacks)
|
||||
@@ -247,40 +246,36 @@ class LLM:
|
||||
function_name = tool_call.function.name
|
||||
|
||||
if function_name in available_functions:
|
||||
# Parse arguments
|
||||
try:
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.warning(f"Failed to parse function arguments: {e}")
|
||||
return text_response # Fallback to text response
|
||||
return text_response
|
||||
|
||||
fn = available_functions[function_name]
|
||||
try:
|
||||
# Call the actual tool function
|
||||
result = fn(**function_args)
|
||||
|
||||
# Return the result directly
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error executing function '{function_name}': {e}"
|
||||
)
|
||||
return text_response # Fallback to text response
|
||||
return text_response
|
||||
|
||||
else:
|
||||
logging.warning(
|
||||
f"Tool call requested unknown function '{function_name}'"
|
||||
)
|
||||
return text_response # Fallback to text response
|
||||
return text_response
|
||||
|
||||
except Exception as e:
|
||||
# Check if context length was exceeded, otherwise log
|
||||
if not LLMContextLengthExceededException(
|
||||
str(e)
|
||||
)._is_context_limit_error(str(e)):
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
# Re-raise the exception
|
||||
raise
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
|
||||
@@ -393,7 +393,7 @@ class Task(BaseModel):
|
||||
self.retry_count += 1
|
||||
context = self.i18n.errors("validation_error").format(
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
@@ -431,9 +431,7 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
@@ -453,11 +451,12 @@ class Task(BaseModel):
|
||||
tasks_slices = [self.description, output]
|
||||
return "\n".join(tasks_slices)
|
||||
|
||||
|
||||
def interpolate_inputs_and_add_conversation_history(self, inputs: Dict[str, Union[str, int, float]]) -> None:
|
||||
def interpolate_inputs_and_add_conversation_history(
|
||||
self, inputs: Dict[str, Union[str, int, float]]
|
||||
) -> None:
|
||||
"""Interpolate inputs into the task description, expected output, and output file path.
|
||||
Add conversation history if present.
|
||||
|
||||
|
||||
Args:
|
||||
inputs: Dictionary mapping template variables to their values.
|
||||
Supported value types are strings, integers, and floats.
|
||||
@@ -497,16 +496,15 @@ class Task(BaseModel):
|
||||
input_string=self._original_output_file, inputs=inputs
|
||||
)
|
||||
except (KeyError, ValueError) as e:
|
||||
raise ValueError(f"Error interpolating output_file path: {str(e)}") from e
|
||||
|
||||
raise ValueError(
|
||||
f"Error interpolating output_file path: {str(e)}"
|
||||
) from e
|
||||
|
||||
if "crew_chat_messages" in inputs and inputs["crew_chat_messages"]:
|
||||
# Fetch the conversation history instruction using self.i18n.slice
|
||||
conversation_instruction = self.i18n.slice(
|
||||
"conversation_history_instruction"
|
||||
)
|
||||
print("crew_chat_messages:", inputs["crew_chat_messages"])
|
||||
|
||||
# Ensure that inputs["crew_chat_messages"] is a string
|
||||
crew_chat_messages_json = str(inputs["crew_chat_messages"])
|
||||
|
||||
try:
|
||||
@@ -515,15 +513,15 @@ class Task(BaseModel):
|
||||
print("An error occurred while parsing crew chat messages:", e)
|
||||
raise
|
||||
|
||||
# Process the messages to build conversation history
|
||||
conversation_history = "\n".join(
|
||||
f"{msg['role'].capitalize()}: {msg['content']}"
|
||||
for msg in crew_chat_messages
|
||||
if isinstance(msg, dict) and "role" in msg and "content" in msg
|
||||
)
|
||||
|
||||
# Add the instruction and conversation history to the description
|
||||
self.description += f"\n\n{conversation_instruction}\n\n{conversation_history}"
|
||||
self.description += (
|
||||
f"\n\n{conversation_instruction}\n\n{conversation_history}"
|
||||
)
|
||||
|
||||
def interpolate_only(
|
||||
self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]]
|
||||
|
||||
@@ -34,7 +34,6 @@ def create_llm(
|
||||
if isinstance(llm_value, str):
|
||||
try:
|
||||
created_llm = LLM(model=llm_value)
|
||||
print(f"LLM created with model='{llm_value}'")
|
||||
return created_llm
|
||||
except Exception as e:
|
||||
print(f"Failed to instantiate LLM with model='{llm_value}': {e}")
|
||||
@@ -197,7 +196,6 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
||||
# Try creating the LLM
|
||||
try:
|
||||
new_llm = LLM(**llm_params)
|
||||
print(f"LLM created with model='{model_name}'")
|
||||
return new_llm
|
||||
except Exception as e:
|
||||
print(
|
||||
|
||||
Reference in New Issue
Block a user