mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
fix linting
This commit is contained in:
@@ -48,7 +48,6 @@ from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSourc
|
|||||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||||
string_source = StringKnowledgeSource(
|
string_source = StringKnowledgeSource(
|
||||||
content=content,
|
content=content,
|
||||||
metadata={"preference": "personal"}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||||
@@ -76,7 +75,6 @@ crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
knowledge={
|
knowledge={
|
||||||
"sources": [string_source],
|
"sources": [string_source],
|
||||||
"metadata": {"preference": "personal"}
|
|
||||||
}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -85,17 +83,6 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
|||||||
|
|
||||||
## Knowledge Configuration
|
## Knowledge Configuration
|
||||||
|
|
||||||
### Metadata and Filtering
|
|
||||||
|
|
||||||
Knowledge sources support metadata for better organization and filtering. Metadata is used to filter the knowledge sources when querying the knowledge store.
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
knowledge_source = StringKnowledgeSource(
|
|
||||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
|
||||||
metadata={"preference": "personal"} # Metadata is used to filter the knowledge sources
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Chunking Configuration
|
### Chunking Configuration
|
||||||
|
|
||||||
Control how content is split for processing by setting the chunk size and overlap.
|
Control how content is split for processing by setting the chunk size and overlap.
|
||||||
@@ -116,13 +103,11 @@ You can also configure the embedder for the knowledge store. This is useful if y
|
|||||||
...
|
...
|
||||||
string_source = StringKnowledgeSource(
|
string_source = StringKnowledgeSource(
|
||||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
||||||
metadata={"preference": "personal"}
|
|
||||||
)
|
)
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
...
|
...
|
||||||
knowledge={
|
knowledge={
|
||||||
"sources": [string_source],
|
"sources": [string_source],
|
||||||
"metadata": {"preference": "personal"},
|
|
||||||
"embedder_config": {
|
"embedder_config": {
|
||||||
"provider": "openai", # Default embedder provider; can be "ollama", "gemini", e.t.c.
|
"provider": "openai", # Default embedder provider; can be "ollama", "gemini", e.t.c.
|
||||||
"config": {"model": "text-embedding-3-small"} # Default embedder model; can be "mxbai-embed-large", "nomic-embed-tex", e.t.c.
|
"config": {"model": "text-embedding-3-small"} # Default embedder model; can be "mxbai-embed-large", "nomic-embed-tex", e.t.c.
|
||||||
@@ -326,4 +311,4 @@ recent_news = SpaceNewsKnowledgeSource(
|
|||||||
- Configure appropriate embedding models
|
- Configure appropriate embedding models
|
||||||
- Consider using local embedding providers for faster processing
|
- Consider using local embedding providers for faster processing
|
||||||
</Accordion>
|
</Accordion>
|
||||||
</AccordionGroup>
|
</AccordionGroup>
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
|||||||
|
|
||||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
|
|
||||||
|
|
||||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user