fix: Improve knowledge context formatting for better LLM effectiveness

- Make knowledge context more explicit and instructive for LLM
- Add clear instructions to use provided context
- Add comprehensive test coverage for knowledge utils

Fixes #2269

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-03-04 04:16:26 +00:00
parent 00eede0d5d
commit 978d3a19c6
2 changed files with 49 additions and 2 deletions

View File

@@ -2,11 +2,19 @@ from typing import Any, Dict, List
def extract_knowledge_context(knowledge_snippets: List[Dict[str, Any]]) -> str:
"""Extract knowledge from the task prompt."""
"""Extract knowledge from the task prompt and format it for effective LLM usage."""
valid_snippets = [
result["context"]
for result in knowledge_snippets
if result and result.get("context")
]
if not valid_snippets:
return ""
snippet = "\n".join(valid_snippets)
return f"Additional Information: {snippet}" if valid_snippets else ""
return (
"Important Context (You MUST use this information to complete your task "
"accurately and effectively):\n"
f"{snippet}\n\n"
"Make sure to incorporate the above context into your response."
)