diff --git a/docs/concepts/knowledge.mdx b/docs/concepts/knowledge.mdx index 254a56bb8..e4e40ba3e 100644 --- a/docs/concepts/knowledge.mdx +++ b/docs/concepts/knowledge.mdx @@ -288,6 +288,7 @@ The `embedder` parameter supports various embedding model providers that include - `ollama`: Local embeddings with Ollama - `vertexai`: Google Cloud VertexAI embeddings - `cohere`: Cohere's embedding models +- `voyageai`: VoyageAI's embedding models - `bedrock`: AWS Bedrock embeddings - `huggingface`: Hugging Face models - `watson`: IBM Watson embeddings diff --git a/docs/concepts/memory.mdx b/docs/concepts/memory.mdx index b04b29c64..751b6dd2e 100644 --- a/docs/concepts/memory.mdx +++ b/docs/concepts/memory.mdx @@ -293,6 +293,26 @@ my_crew = Crew( } ) ``` +### Using VoyageAI embeddings + +```python Code +from crewai import Crew, Agent, Task, Process + +my_crew = Crew( + agents=[...], + tasks=[...], + process=Process.sequential, + memory=True, + verbose=True, + embedder={ + "provider": "voyageai", + "config": { + "api_key": "YOUR_API_KEY", + "model_name": "" + } + } +) +``` ### Using HuggingFace embeddings ```python Code diff --git a/docs/how-to/llm-connections.mdx b/docs/how-to/llm-connections.mdx index 25509c299..33be323b7 100644 --- a/docs/how-to/llm-connections.mdx +++ b/docs/how-to/llm-connections.mdx @@ -23,6 +23,7 @@ LiteLLM supports a wide range of providers, including but not limited to: - Azure OpenAI - AWS (Bedrock, SageMaker) - Cohere +- VoyageAI - Hugging Face - Ollama - Mistral AI diff --git a/src/crewai/utilities/embedding_configurator.py b/src/crewai/utilities/embedding_configurator.py index 44e832ec2..71965bf53 100644 --- a/src/crewai/utilities/embedding_configurator.py +++ b/src/crewai/utilities/embedding_configurator.py @@ -14,6 +14,7 @@ class EmbeddingConfigurator: "vertexai": self._configure_vertexai, "google": self._configure_google, "cohere": self._configure_cohere, + "voyageai": self._configure_voyageai, "bedrock": self._configure_bedrock, "huggingface": self._configure_huggingface, "watson": self._configure_watson, @@ -124,6 +125,17 @@ class EmbeddingConfigurator: api_key=config.get("api_key"), ) + @staticmethod + def _configure_voyageai(config, model_name): + from chromadb.utils.embedding_functions.voyageai_embedding_function import ( + VoyageAIEmbeddingFunction, + ) + + return VoyageAIEmbeddingFunction( + model_name=model_name, + api_key=config.get("api_key"), + ) + @staticmethod def _configure_bedrock(config, model_name): from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (