mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-16 11:38:31 +00:00
Merge remote-tracking branch 'upstream/main'
# Conflicts: # poetry.lock # src/crewai/tools/tool_usage.py
This commit is contained in:
@@ -88,9 +88,6 @@ class Agent(BaseModel):
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
memory: bool = Field(
|
||||
default=False, description="Whether the agent should have memory or not"
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
@@ -101,7 +98,11 @@ class Agent(BaseModel):
|
||||
default_factory=list, description="Tools at agents disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
default=15, description="Maximum iterations for an agent to execute a task"
|
||||
default=25, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
@@ -170,9 +171,12 @@ class Agent(BaseModel):
|
||||
def set_agent_executor(self) -> "Agent":
|
||||
"""set agent executor is set."""
|
||||
if hasattr(self.llm, "model_name"):
|
||||
self.llm.callbacks = [
|
||||
TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
]
|
||||
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
if isinstance(self.llm.callbacks, list):
|
||||
self.llm.callbacks.append(token_handler)
|
||||
else:
|
||||
self.llm.callbacks = [token_handler]
|
||||
|
||||
if not self.agent_executor:
|
||||
if not self.cache_handler:
|
||||
self.cache_handler = CacheHandler()
|
||||
@@ -205,14 +209,15 @@ class Agent(BaseModel):
|
||||
task=task_prompt, context=context
|
||||
)
|
||||
|
||||
if self.crew and self.memory:
|
||||
if self.crew and self.crew.memory:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
tools = tools or self.tools
|
||||
parsed_tools = self._parse_tools(tools)
|
||||
@@ -286,6 +291,7 @@ class Agent(BaseModel):
|
||||
"original_tools": tools,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"max_execution_time": self.max_execution_time,
|
||||
"step_callback": self.step_callback,
|
||||
"tools_handler": self.tools_handler,
|
||||
"function_calling_llm": self.function_calling_llm,
|
||||
|
||||
@@ -53,7 +53,8 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
if (
|
||||
self.crew_agent.memory
|
||||
self.crew
|
||||
and self.crew.memory
|
||||
and "Action: Delegate work to co-worker" not in output.log
|
||||
):
|
||||
memory = ShortTermMemoryItem(
|
||||
@@ -66,7 +67,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
self.crew._short_term_memory.save(memory)
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
if self.crew_agent.memory:
|
||||
if self.crew and self.crew.memory:
|
||||
ltm_agent = TaskEvaluator(self.crew_agent)
|
||||
evaluation = ltm_agent.evaluate(self.task, output.log)
|
||||
|
||||
|
||||
1
src/crewai/cli/templates/.gitignore
vendored
1
src/crewai/cli/templates/.gitignore
vendored
@@ -1,3 +1,2 @@
|
||||
.env
|
||||
.db
|
||||
__pycache__/
|
||||
|
||||
@@ -23,7 +23,7 @@ poetry install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add you `OPENAI_API_KEY` on the `.env` file**
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = {extras = ["tools"], version = "^0.22.2"}
|
||||
crewai = {extras = ["tools"], version = "^0.27.0"}
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
@@ -73,7 +70,7 @@ class Crew(BaseModel):
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: Union[int, bool] = Field(default=0)
|
||||
memory: bool = Field(
|
||||
default=True,
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
@@ -161,10 +158,6 @@ class Crew(BaseModel):
|
||||
def create_crew_memory(self) -> "Crew":
|
||||
"""Set private attributes."""
|
||||
if self.memory:
|
||||
storage_dir = Path(".db")
|
||||
storage_dir.mkdir(exist_ok=True)
|
||||
if sys.platform.startswith("win"):
|
||||
subprocess.call(["attrib", "+H", str(storage_dir)])
|
||||
self._long_term_memory = LongTermMemory()
|
||||
self._short_term_memory = ShortTermMemory(embedder_config=self.embedder)
|
||||
self._entity_memory = EntityMemory(embedder_config=self.embedder)
|
||||
@@ -286,9 +279,9 @@ class Crew(BaseModel):
|
||||
task.tools += AgentTools(agents=agents_for_delegation).tools()
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_yellow")
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple")
|
||||
self._logger.log(
|
||||
"info", f"== Starting Task: {task.description}", color="bold_yellow"
|
||||
"info", f"== Starting Task: {task.description}", color="bold_purple"
|
||||
)
|
||||
|
||||
output = task.execute(context=task_output)
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from embedchain import App
|
||||
from embedchain.llm.base import BaseLlm
|
||||
from embedchain.vectordb.chroma import InvalidDimensionException
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
@@ -37,6 +39,11 @@ class RAGStorage(Storage):
|
||||
|
||||
def __init__(self, type, allow_reset=True, embedder_config=None):
|
||||
super().__init__()
|
||||
if (
|
||||
not os.getenv("OPENAI_API_KEY")
|
||||
and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1"
|
||||
):
|
||||
os.environ["OPENAI_API_KEY"] = "fake"
|
||||
config = {
|
||||
"app": {
|
||||
"config": {"name": type, "collect_metrics": False, "log_level": "ERROR"}
|
||||
@@ -76,11 +83,15 @@ class RAGStorage(Storage):
|
||||
score_threshold: float = 0.35,
|
||||
) -> Dict[str, Any]:
|
||||
with suppress_logging():
|
||||
results = (
|
||||
self.app.search(query, limit, where=filter)
|
||||
if filter
|
||||
else self.app.search(query, limit)
|
||||
)
|
||||
try:
|
||||
results = (
|
||||
self.app.search(query, limit, where=filter)
|
||||
if filter
|
||||
else self.app.search(query, limit)
|
||||
)
|
||||
except InvalidDimensionException:
|
||||
self.app.reset()
|
||||
return []
|
||||
return [r for r in results if r["metadata"]["score"] >= score_threshold]
|
||||
|
||||
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any:
|
||||
|
||||
@@ -221,6 +221,16 @@ class Task(BaseModel):
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model = self.output_pydantic or self.output_json
|
||||
|
||||
# try to convert task_output directly to pydantic/json
|
||||
try:
|
||||
exported_result = model.model_validate_json(result)
|
||||
if self.output_json:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
if not self._is_gpt(llm):
|
||||
|
||||
@@ -85,7 +85,6 @@ class ToolUsage:
|
||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||
self.task.increment_tools_errors()
|
||||
return error
|
||||
|
||||
try:
|
||||
tool = self._select_tool(calling.tool_name)
|
||||
except Exception as e:
|
||||
@@ -107,7 +106,7 @@ class ToolUsage:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool_names=self.tools_names
|
||||
)
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
||||
self._telemetry.tool_repeated_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
@@ -186,7 +185,7 @@ class ToolUsage:
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
||||
if agentops:
|
||||
agentops.record(tool_event)
|
||||
self._telemetry.tool_usage(
|
||||
|
||||
@@ -4,9 +4,15 @@ import appdirs
|
||||
|
||||
|
||||
def db_storage_path():
|
||||
app_name = "crewai"
|
||||
app_name = get_project_directory_name()
|
||||
app_author = "CrewAI"
|
||||
|
||||
data_dir = Path(appdirs.user_data_dir(app_name, app_author))
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
return data_dir
|
||||
|
||||
|
||||
def get_project_directory_name():
|
||||
cwd = Path.cwd()
|
||||
project_directory_name = cwd.name
|
||||
return project_directory_name
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
class Printer:
|
||||
def print(self, content: str, color: str):
|
||||
if color == "yellow":
|
||||
self._print_yellow(content)
|
||||
if color == "purple":
|
||||
self._print_purple(content)
|
||||
elif color == "red":
|
||||
self._print_red(content)
|
||||
elif color == "bold_green":
|
||||
self._print_bold_green(content)
|
||||
elif color == "bold_yellow":
|
||||
self._print_bold_yellow(content)
|
||||
elif color == "bold_purple":
|
||||
self._print_bold_purple(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
def _print_bold_yellow(self, content):
|
||||
print("\033[1m\033[93m {}\033[00m".format(content))
|
||||
def _print_bold_purple(self, content):
|
||||
print("\033[1m\033[95m {}\033[00m".format(content))
|
||||
|
||||
def _print_bold_green(self, content):
|
||||
print("\033[1m\033[92m {}\033[00m".format(content))
|
||||
|
||||
def _print_yellow(self, content):
|
||||
print("\033[93m {}\033[00m".format(content))
|
||||
def _print_purple(self, content):
|
||||
print("\033[95m {}\033[00m".format(content))
|
||||
|
||||
def _print_red(self, content):
|
||||
print("\033[91m {}\033[00m".format(content))
|
||||
|
||||
Reference in New Issue
Block a user