Merge branch 'main' into lg-guardrail-llm

This commit is contained in:
Lucas Gomide
2025-04-29 11:01:20 -03:00
committed by GitHub
50 changed files with 4288 additions and 2298 deletions

View File

@@ -17,7 +17,7 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.114.0"
__version__ = "0.117.1"
__all__ = [
"Agent",
"Crew",

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0,<1.0.0"
"crewai[tools]>=0.117.1,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0,<1.0.0",
"crewai[tools]>=0.117.1,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.13"
dependencies = [
"crewai[tools]>=0.114.0"
"crewai[tools]>=0.117.1"
]
[tool.crewai]

View File

@@ -65,7 +65,7 @@ class FilteredStream:
if (
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
in s
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
or "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()`"
in s
):
return 0

View File

@@ -88,7 +88,9 @@ class Mem0Storage(Storage):
}
if params:
self.memory.add(value, **params | {"output_format": "v1.1"})
if isinstance(self.memory, MemoryClient):
params["output_format"] = "v1.1"
self.memory.add(value, **params)
def search(
self,
@@ -96,7 +98,7 @@ class Mem0Storage(Storage):
limit: int = 3,
score_threshold: float = 0.35,
) -> List[Any]:
params = {"query": query, "limit": limit}
params = {"query": query, "limit": limit, "output_format": "v1.1"}
if user_id := self._get_user_id():
params["user_id"] = user_id
@@ -116,8 +118,11 @@ class Mem0Storage(Storage):
# Discard the filters for now since we create the filters
# automatically when the crew is created.
if isinstance(self.memory, Memory):
del params["metadata"], params["output_format"]
results = self.memory.search(**params)
return [r for r in results if r["score"] >= score_threshold]
return [r for r in results["results"] if r["score"] >= score_threshold]
def _get_user_id(self) -> str:
return self._get_config().get("user_id", "")

View File

@@ -54,10 +54,12 @@ class Prompts(BaseModel):
response_template=None,
) -> str:
"""Constructs a prompt string from specified components."""
if not system_template and not prompt_template:
if not system_template or not prompt_template:
# If any of the required templates are missing, fall back to the default format
prompt_parts = [self.i18n.slice(component) for component in components]
prompt = "".join(prompt_parts)
else:
# All templates are provided, use them
prompt_parts = [
self.i18n.slice(component)
for component in components
@@ -67,8 +69,12 @@ class Prompts(BaseModel):
prompt = prompt_template.replace(
"{{ .Prompt }}", "".join(self.i18n.slice("task"))
)
response = response_template.split("{{ .Response }}")[0]
prompt = f"{system}\n{prompt}\n{response}"
# Handle missing response_template
if response_template:
response = response_template.split("{{ .Response }}")[0]
prompt = f"{system}\n{prompt}\n{response}"
else:
prompt = f"{system}\n{prompt}"
prompt = (
prompt.replace("{goal}", self.agent.goal)