Merge branch 'main' into feat/joao-flow-improvement-requests

This commit is contained in:
João Moura
2024-12-24 18:52:59 -03:00
committed by GitHub
10 changed files with 1257 additions and 48 deletions

View File

@@ -1,10 +1,12 @@
"""Test Knowledge creation and querying functionality."""
from pathlib import Path
from typing import List, Union
from unittest.mock import patch
import pytest
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
from crewai.knowledge.source.json_knowledge_source import JSONKnowledgeSource
@@ -200,7 +202,7 @@ def test_single_short_file(mock_vector_db, tmpdir):
f.write(content)
file_source = TextFileKnowledgeSource(
file_path=file_path, metadata={"preference": "personal"}
file_paths=[file_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [file_source]
mock_vector_db.query.return_value = [{"context": content, "score": 0.9}]
@@ -242,7 +244,7 @@ def test_single_2k_character_file(mock_vector_db, tmpdir):
f.write(content)
file_source = TextFileKnowledgeSource(
file_path=file_path, metadata={"preference": "personal"}
file_paths=[file_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [file_source]
mock_vector_db.query.return_value = [{"context": content, "score": 0.9}]
@@ -279,7 +281,7 @@ def test_multiple_short_files(mock_vector_db, tmpdir):
file_paths.append((file_path, item["metadata"]))
file_sources = [
TextFileKnowledgeSource(file_path=path, metadata=metadata)
TextFileKnowledgeSource(file_paths=[path], metadata=metadata)
for path, metadata in file_paths
]
mock_vector_db.sources = file_sources
@@ -352,7 +354,7 @@ def test_multiple_2k_character_files(mock_vector_db, tmpdir):
file_paths.append(file_path)
file_sources = [
TextFileKnowledgeSource(file_path=path, metadata={"preference": "personal"})
TextFileKnowledgeSource(file_paths=[path], metadata={"preference": "personal"})
for path in file_paths
]
mock_vector_db.sources = file_sources
@@ -399,7 +401,7 @@ def test_hybrid_string_and_files(mock_vector_db, tmpdir):
file_paths.append(file_path)
file_sources = [
TextFileKnowledgeSource(file_path=path, metadata={"preference": "personal"})
TextFileKnowledgeSource(file_paths=[path], metadata={"preference": "personal"})
for path in file_paths
]
@@ -424,7 +426,7 @@ def test_pdf_knowledge_source(mock_vector_db):
# Create a PDFKnowledgeSource
pdf_source = PDFKnowledgeSource(
file_path=pdf_path, metadata={"preference": "personal"}
file_paths=[pdf_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [pdf_source]
mock_vector_db.query.return_value = [
@@ -461,7 +463,7 @@ def test_csv_knowledge_source(mock_vector_db, tmpdir):
# Create a CSVKnowledgeSource
csv_source = CSVKnowledgeSource(
file_path=csv_path, metadata={"preference": "personal"}
file_paths=[csv_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [csv_source]
mock_vector_db.query.return_value = [
@@ -496,7 +498,7 @@ def test_json_knowledge_source(mock_vector_db, tmpdir):
# Create a JSONKnowledgeSource
json_source = JSONKnowledgeSource(
file_path=json_path, metadata={"preference": "personal"}
file_paths=[json_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [json_source]
mock_vector_db.query.return_value = [
@@ -529,7 +531,7 @@ def test_excel_knowledge_source(mock_vector_db, tmpdir):
# Create an ExcelKnowledgeSource
excel_source = ExcelKnowledgeSource(
file_path=excel_path, metadata={"preference": "personal"}
file_paths=[excel_path], metadata={"preference": "personal"}
)
mock_vector_db.sources = [excel_source]
mock_vector_db.query.return_value = [
@@ -543,3 +545,42 @@ def test_excel_knowledge_source(mock_vector_db, tmpdir):
# Assert that the correct information is retrieved
assert any("30" in result["context"] for result in results)
mock_vector_db.query.assert_called_once()
def test_docling_source(mock_vector_db):
docling_source = CrewDoclingSource(
file_paths=[
"https://lilianweng.github.io/posts/2024-11-28-reward-hacking/",
],
)
mock_vector_db.sources = [docling_source]
mock_vector_db.query.return_value = [
{
"context": "Reward hacking is a technique used to improve the performance of reinforcement learning agents.",
"score": 0.9,
}
]
# Perform a query
query = "What is reward hacking?"
results = mock_vector_db.query(query)
assert any("reward hacking" in result["context"].lower() for result in results)
mock_vector_db.query.assert_called_once()
def test_multiple_docling_sources():
urls: List[Union[Path, str]] = [
"https://lilianweng.github.io/posts/2024-11-28-reward-hacking/",
"https://lilianweng.github.io/posts/2024-07-07-hallucination/",
]
docling_source = CrewDoclingSource(file_paths=urls)
assert docling_source.file_paths == urls
assert docling_source.content is not None
def test_docling_source_with_local_file():
current_dir = Path(__file__).parent
pdf_path = current_dir / "crewai_quickstart.pdf"
docling_source = CrewDoclingSource(file_paths=[pdf_path])
assert docling_source.file_paths == [pdf_path]
assert docling_source.content is not None

View File

@@ -736,6 +736,48 @@ def test_interpolate_inputs():
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
def test_interpolate_only():
"""Test the interpolate_only method for various scenarios including JSON structure preservation."""
task = Task(
description="Unused in this test",
expected_output="Unused in this test"
)
# Test JSON structure preservation
json_string = '{"info": "Look at {placeholder}", "nested": {"val": "{nestedVal}"}}'
result = task.interpolate_only(
input_string=json_string,
inputs={"placeholder": "the data", "nestedVal": "something else"}
)
assert '"info": "Look at the data"' in result
assert '"val": "something else"' in result
assert "{placeholder}" not in result
assert "{nestedVal}" not in result
# Test normal string interpolation
normal_string = "Hello {name}, welcome to {place}!"
result = task.interpolate_only(
input_string=normal_string,
inputs={"name": "John", "place": "CrewAI"}
)
assert result == "Hello John, welcome to CrewAI!"
# Test empty string
result = task.interpolate_only(
input_string="",
inputs={"unused": "value"}
)
assert result == ""
# Test string with no placeholders
no_placeholders = "Hello, this is a test"
result = task.interpolate_only(
input_string=no_placeholders,
inputs={"unused": "value"}
)
assert result == no_placeholders
def test_task_output_str_with_pydantic():
from crewai.tasks.output_format import OutputFormat