mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
* patching for non-gpt model * removal of json_object tool name assignment * fixed issue for smaller models due to instructions prompt * fixing for ollama llama3 models * WIP: generated summary from documents split, could also create memgpt approach * WIP: need tests but user inputted summarization strategy implemented - handling context window exceeding errors * rm extra line * removed type ignores * added tests * handling n to summarize prompt * code cleanup, using click for cli asker * rm not used class * better refactor * reverted poetry lock * reverted poetry.locl * improved context window exceeding exception class
27 lines
921 B
Python
27 lines
921 B
Python
class LLMContextLengthExceededException(Exception):
|
|
CONTEXT_LIMIT_ERRORS = [
|
|
"maximum context length",
|
|
"context length exceeded",
|
|
"context_length_exceeded",
|
|
"context window full",
|
|
"too many tokens",
|
|
"input is too long",
|
|
"exceeds token limit",
|
|
]
|
|
|
|
def __init__(self, error_message: str):
|
|
self.original_error_message = error_message
|
|
super().__init__(self._get_error_message(error_message))
|
|
|
|
def _is_context_limit_error(self, error_message: str) -> bool:
|
|
return any(
|
|
phrase.lower() in error_message.lower()
|
|
for phrase in self.CONTEXT_LIMIT_ERRORS
|
|
)
|
|
|
|
def _get_error_message(self, error_message: str):
|
|
return (
|
|
f"LLM context length exceeded. Original error: {error_message}\n"
|
|
"Consider using a smaller input or implementing a text splitting strategy."
|
|
)
|