mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
* Adding tool caching a loop execution prevention. This adds some guardrails, to both prevent the same tool to be used consecutively and also caching tool's results across the entire crew so it cuts down execution time and eventual LLM calls. This plays a huge role for smaller opensource models that usually fall into those behaviors patterns. It also includes some smaller improvements around the tool prompt and agent tools, all with the same intention of guiding models into better conform with agent instructions.
21 lines
457 B
Python
21 lines
457 B
Python
from typing import Optional
|
|
|
|
from pydantic import PrivateAttr
|
|
|
|
|
|
class CacheHandler:
|
|
"""Callback handler for tool usage."""
|
|
|
|
_cache: PrivateAttr = {}
|
|
|
|
def __init__(self):
|
|
self._cache = {}
|
|
|
|
def add(self, tool, input, output):
|
|
input = input.strip()
|
|
self._cache[f"{tool}-{input}"] = output
|
|
|
|
def read(self, tool, input) -> Optional[str]:
|
|
input = input.strip()
|
|
return self._cache.get(f"{tool}-{input}")
|