diff --git a/docs/concepts/knowledge.mdx b/docs/concepts/knowledge.mdx index 06c2eb947..16bb9efb1 100644 --- a/docs/concepts/knowledge.mdx +++ b/docs/concepts/knowledge.mdx @@ -48,7 +48,6 @@ from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSourc content = "Users name is John. He is 30 years old and lives in San Francisco." string_source = StringKnowledgeSource( content=content, - metadata={"preference": "personal"} ) # Create an LLM with a temperature of 0 to ensure deterministic outputs @@ -76,7 +75,6 @@ crew = Crew( process=Process.sequential, knowledge={ "sources": [string_source], - "metadata": {"preference": "personal"} }, # Enable knowledge by adding the sources here. You can also add more sources to the sources list. ) @@ -85,17 +83,6 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o ## Knowledge Configuration -### Metadata and Filtering - -Knowledge sources support metadata for better organization and filtering. Metadata is used to filter the knowledge sources when querying the knowledge store. - -```python Code -knowledge_source = StringKnowledgeSource( - content="Users name is John. He is 30 years old and lives in San Francisco.", - metadata={"preference": "personal"} # Metadata is used to filter the knowledge sources -) -``` - ### Chunking Configuration Control how content is split for processing by setting the chunk size and overlap. @@ -116,13 +103,11 @@ You can also configure the embedder for the knowledge store. This is useful if y ... string_source = StringKnowledgeSource( content="Users name is John. He is 30 years old and lives in San Francisco.", - metadata={"preference": "personal"} ) crew = Crew( ... knowledge={ "sources": [string_source], - "metadata": {"preference": "personal"}, "embedder_config": { "provider": "openai", # Default embedder provider; can be "ollama", "gemini", e.t.c. "config": {"model": "text-embedding-3-small"} # Default embedder model; can be "mxbai-embed-large", "nomic-embed-tex", e.t.c. @@ -326,4 +311,4 @@ recent_news = SpaceNewsKnowledgeSource( - Configure appropriate embedding models - Consider using local embedding providers for faster processing - \ No newline at end of file + diff --git a/src/crewai/knowledge/knowledge.py b/src/crewai/knowledge/knowledge.py index d7729e9b7..f9f55a517 100644 --- a/src/crewai/knowledge/knowledge.py +++ b/src/crewai/knowledge/knowledge.py @@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage -from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed