diff --git a/README.md b/README.md index 21ab34390..f79f2a011 100644 --- a/README.md +++ b/README.md @@ -252,6 +252,12 @@ or python src/my_project/main.py ``` +If an error happens due to the usage of poetry, please run the following command to update your crewai package: + +```bash +crewai update +``` + You should see the output in the console and the `report.md` file should be created in the root of your project with the full final report. In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/). diff --git a/docs/concepts/memory.mdx b/docs/concepts/memory.mdx index d3f9b955a..b07096442 100644 --- a/docs/concepts/memory.mdx +++ b/docs/concepts/memory.mdx @@ -34,7 +34,7 @@ By default, the memory system is disabled, and you can ensure it is active by se The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model. It's also possible to initialize the memory instance with your own instance. -The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG using the EmbedChain package. +The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG. The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations. The data storage files are saved into a platform-specific location found using the appdirs package, and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable. @@ -105,12 +105,9 @@ my_crew = Crew( process=Process.sequential, memory=True, verbose=True, - embedder={ - "provider": "openai", - "config": { - "model": 'text-embedding-3-small' - } - } + embedder=embedding_functions.OpenAIEmbeddingFunction( + api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small" + ) ) ``` @@ -125,14 +122,10 @@ my_crew = Crew( process=Process.sequential, memory=True, verbose=True, - embedder={ - "provider": "google", - "config": { - "model": 'models/embedding-001', - "task_type": "retrieval_document", - "title": "Embeddings for Embedchain" - } - } + embedder=embedding_functions.OpenAIEmbeddingFunction( + api_key=os.getenv("OPENAI_API_KEY"), + model_name="text-embedding-ada-002" + ) ) ``` @@ -147,30 +140,13 @@ my_crew = Crew( process=Process.sequential, memory=True, verbose=True, - embedder={ - "provider": "azure_openai", - "config": { - "model": 'text-embedding-ada-002', - "deployment_name": "your_embedding_model_deployment_name" - } - } -) -``` - -### Using GPT4ALL embeddings - -```python Code -from crewai import Crew, Agent, Task, Process - -my_crew = Crew( - agents=[...], - tasks=[...], - process=Process.sequential, - memory=True, - verbose=True, - embedder={ - "provider": "gpt4all" - } + embedder=embedding_functions.OpenAIEmbeddingFunction( + api_key="YOUR_API_KEY", + api_base="YOUR_API_BASE_PATH", + api_type="azure", + api_version="YOUR_API_VERSION", + model_name="text-embedding-3-small" + ) ) ``` @@ -185,12 +161,12 @@ my_crew = Crew( process=Process.sequential, memory=True, verbose=True, - embedder={ - "provider": "vertexai", - "config": { - "model": 'textembedding-gecko' - } - } + embedder=embedding_functions.GoogleVertexEmbeddingFunction( + project_id="YOUR_PROJECT_ID", + region="YOUR_REGION", + api_key="YOUR_API_KEY", + model_name="textembedding-gecko" + ) ) ``` @@ -205,13 +181,10 @@ my_crew = Crew( process=Process.sequential, memory=True, verbose=True, - embedder={ - "provider": "cohere", - "config": { - "model": "embed-english-v3.0", - "vector_dimension": 1024 - } - } + embedder=embedding_functions.CohereEmbeddingFunction( + api_key=YOUR_API_KEY, + model_name="" + ) ) ``` diff --git a/docs/tools/visiontool.mdx b/docs/tools/visiontool.mdx index 83856e34b..f4eab0f1c 100644 --- a/docs/tools/visiontool.mdx +++ b/docs/tools/visiontool.mdx @@ -8,13 +8,13 @@ icon: eye ## Description -This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output. +This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output. The URL or the PATH of the image should be passed to the Agent. - ## Installation Install the crewai_tools package + ```shell pip install 'crewai[tools]' ``` @@ -44,7 +44,6 @@ def researcher(self) -> Agent: The VisionTool requires the following arguments: -| Argument | Type | Description | -|:---------------|:---------|:-------------------------------------------------------------------------------------------------------------------------------------| -| **image_path** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. | - +| Argument | Type | Description | +| :----------------- | :------- | :------------------------------------------------------------------------------- | +| **image_path_url** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. | diff --git a/pyproject.toml b/pyproject.toml index 2dbcdedc9..e47748e28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "crewai" -version = "0.70.1" +version = "0.74.0" description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." readme = "README.md" requires-python = ">=3.10,<=3.13" @@ -16,19 +16,18 @@ dependencies = [ "opentelemetry-exporter-otlp-proto-http>=1.22.0", "instructor>=1.3.3", "regex>=2024.9.11", - "crewai-tools>=0.12.1", + "crewai-tools>=0.13.1", "click>=8.1.7", "python-dotenv>=1.0.0", "appdirs>=1.4.4", "jsonref>=1.1.0", - "agentops>=0.3.0", - "embedchain>=0.1.114", "json-repair>=0.25.2", "auth0-python>=4.7.1", "litellm>=1.44.22", "pyvis>=0.3.2", "uv>=0.4.18", "tomli-w>=1.1.0", + "chromadb>=0.4.24", ] [project.urls] diff --git a/src/crewai/__init__.py b/src/crewai/__init__.py index d237ad2a4..a72d6e805 100644 --- a/src/crewai/__init__.py +++ b/src/crewai/__init__.py @@ -14,5 +14,5 @@ warnings.filterwarnings( category=UserWarning, module="pydantic.main", ) -__version__ = "0.70.1" +__version__ = "0.74.0" __all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"] diff --git a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py index bf2e2841b..c452848e8 100644 --- a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: class CrewAgentExecutorMixin: crew: Optional["Crew"] - crew_agent: Optional["BaseAgent"] + agent: Optional["BaseAgent"] task: Optional["Task"] iterations: int have_forced_answer: bool @@ -33,9 +33,9 @@ class CrewAgentExecutorMixin: """Create and save a short-term memory item if conditions are met.""" if ( self.crew - and self.crew_agent + and self.agent and self.task - and "Action: Delegate work to coworker" not in output.log + and "Action: Delegate work to coworker" not in output.text ): try: if ( @@ -43,11 +43,11 @@ class CrewAgentExecutorMixin: and self.crew._short_term_memory ): self.crew._short_term_memory.save( - value=output.log, + value=output.text, metadata={ "observation": self.task.description, }, - agent=self.crew_agent.role, + agent=self.agent.role, ) except Exception as e: print(f"Failed to add to short term memory: {e}") @@ -61,18 +61,18 @@ class CrewAgentExecutorMixin: and self.crew._long_term_memory and self.crew._entity_memory and self.task - and self.crew_agent + and self.agent ): try: - ltm_agent = TaskEvaluator(self.crew_agent) - evaluation = ltm_agent.evaluate(self.task, output.log) + ltm_agent = TaskEvaluator(self.agent) + evaluation = ltm_agent.evaluate(self.task, output.text) if isinstance(evaluation, ConverterError): return long_term_memory = LongTermMemoryItem( task=self.task.description, - agent=self.crew_agent.role, + agent=self.agent.role, quality=evaluation.quality, datetime=str(time.time()), expected_output=self.task.expected_output, diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index 0c510ef71..b901fe132 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -19,6 +19,7 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import ( ) from crewai.utilities.logger import Logger from crewai.utilities.training_handler import CrewTrainingHandler +from crewai.agents.agent_builder.base_agent import BaseAgent class CrewAgentExecutor(CrewAgentExecutorMixin): @@ -29,7 +30,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): llm: Any, task: Any, crew: Any, - agent: Any, + agent: BaseAgent, prompt: dict[str, str], max_iter: int, tools: List[Any], @@ -103,7 +104,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): if self.crew and self.crew._train: self._handle_crew_training_output(formatted_answer) - + self._create_short_term_memory(formatted_answer) + self._create_long_term_memory(formatted_answer) return {"output": formatted_answer.output} def _invoke_loop(self, formatted_answer=None): @@ -176,6 +178,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): return formatted_answer def _show_start_logs(self): + if self.agent is None: + raise ValueError("Agent cannot be None") if self.agent.verbose or ( hasattr(self, "crew") and getattr(self.crew, "verbose", False) ): @@ -188,6 +192,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ) def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]): + if self.agent is None: + raise ValueError("Agent cannot be None") if self.agent.verbose or ( hasattr(self, "crew") and getattr(self.crew, "verbose", False) ): @@ -306,7 +312,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self, result: AgentFinish, human_feedback: str | None = None ) -> None: """Function to handle the process of the training data.""" - agent_id = str(self.agent.id) + agent_id = str(self.agent.id) # type: ignore # Load training data training_handler = CrewTrainingHandler(TRAINING_DATA_FILE) @@ -339,7 +345,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): "initial_output": result.output, "human_feedback": human_feedback, "agent": agent_id, - "agent_role": self.agent.role, + "agent_role": self.agent.role, # type: ignore } if self.crew is not None and hasattr(self.crew, "_train_iteration"): train_iteration = self.crew._train_iteration diff --git a/src/crewai/cli/constants.py b/src/crewai/cli/constants.py new file mode 100644 index 000000000..9a0b36c39 --- /dev/null +++ b/src/crewai/cli/constants.py @@ -0,0 +1,19 @@ +ENV_VARS = { + 'openai': ['OPENAI_API_KEY'], + 'anthropic': ['ANTHROPIC_API_KEY'], + 'gemini': ['GEMINI_API_KEY'], + 'groq': ['GROQ_API_KEY'], + 'ollama': ['FAKE_KEY'], +} + +PROVIDERS = ['openai', 'anthropic', 'gemini', 'groq', 'ollama'] + +MODELS = { + 'openai': ['gpt-4', 'gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1-preview'], + 'anthropic': ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'], + 'gemini': ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-gemma-2-9b-it', 'gemini-gemma-2-27b-it'], + 'groq': ['llama-3.1-8b-instant', 'llama-3.1-70b-versatile', 'llama-3.1-405b-reasoning', 'gemma2-9b-it', 'gemma-7b-it'], + 'ollama': ['llama3.1', 'mixtral'], +} + +JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" \ No newline at end of file diff --git a/src/crewai/cli/create_crew.py b/src/crewai/cli/create_crew.py index 510d4f431..f74331a23 100644 --- a/src/crewai/cli/create_crew.py +++ b/src/crewai/cli/create_crew.py @@ -1,12 +1,11 @@ from pathlib import Path - import click - -from crewai.cli.utils import copy_template +from crewai.cli.utils import copy_template, load_env_vars, write_env_file +from crewai.cli.provider import get_provider_data, select_provider, PROVIDERS +from crewai.cli.constants import ENV_VARS -def create_crew(name, parent_folder=None): - """Create a new crew.""" +def create_folder_structure(name, parent_folder=None): folder_name = name.replace(" ", "_").replace("-", "_").lower() class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "") @@ -28,19 +27,84 @@ def create_crew(name, parent_folder=None): (folder_path / "src" / folder_name).mkdir(parents=True) (folder_path / "src" / folder_name / "tools").mkdir(parents=True) (folder_path / "src" / folder_name / "config").mkdir(parents=True) - with open(folder_path / ".env", "w") as file: - file.write("OPENAI_API_KEY=YOUR_API_KEY") else: click.secho( - f"\tFolder {folder_name} already exists. Please choose a different name.", - fg="red", + f"\tFolder {folder_name} already exists.", + fg="yellow", ) + + return folder_path, folder_name, class_name + + +def copy_template_files(folder_path, name, class_name, parent_folder): + package_dir = Path(__file__).parent + templates_dir = package_dir / "templates" / "crew" + + root_template_files = ( + [".gitignore", "pyproject.toml", "README.md"] if not parent_folder else [] + ) + tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"] + config_template_files = ["config/agents.yaml", "config/tasks.yaml"] + src_template_files = ( + ["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"] + ) + + for file_name in root_template_files: + src_file = templates_dir / file_name + dst_file = folder_path / file_name + copy_template(src_file, dst_file, name, class_name, folder_path.name) + + src_folder = ( + folder_path / "src" / folder_path.name if not parent_folder else folder_path + ) + + for file_name in src_template_files: + src_file = templates_dir / file_name + dst_file = src_folder / file_name + copy_template(src_file, dst_file, name, class_name, folder_path.name) + + if not parent_folder: + for file_name in tools_template_files + config_template_files: + src_file = templates_dir / file_name + dst_file = src_folder / file_name + copy_template(src_file, dst_file, name, class_name, folder_path.name) + + +def create_crew(name, parent_folder=None): + folder_path, folder_name, class_name = create_folder_structure(name, parent_folder) + env_vars = load_env_vars(folder_path) + + provider_models = get_provider_data() + if not provider_models: return + selected_provider = select_provider(provider_models) + if not selected_provider: + return + provider = selected_provider + + # selected_model = select_model(provider, provider_models) + # if not selected_model: + # return + # model = selected_model + + if provider in PROVIDERS: + api_key_var = ENV_VARS[provider][0] + else: + api_key_var = click.prompt( + f"Enter the environment variable name for your {provider.capitalize()} API key", + type=str, + ) + + env_vars = {api_key_var: "YOUR_API_KEY_HERE"} + write_env_file(folder_path, env_vars) + + # env_vars['MODEL'] = model + # click.secho(f"Selected model: {model}", fg="green") + package_dir = Path(__file__).parent templates_dir = package_dir / "templates" / "crew" - # List of template files to copy root_template_files = ( [".gitignore", "pyproject.toml", "README.md"] if not parent_folder else [] ) diff --git a/src/crewai/cli/provider.py b/src/crewai/cli/provider.py new file mode 100644 index 000000000..f829ca9fd --- /dev/null +++ b/src/crewai/cli/provider.py @@ -0,0 +1,186 @@ +import json +import time +import requests +from collections import defaultdict +import click +from pathlib import Path +from crewai.cli.constants import PROVIDERS, MODELS, JSON_URL + +def select_choice(prompt_message, choices): + """ + Presents a list of choices to the user and prompts them to select one. + + Args: + - prompt_message (str): The message to display to the user before presenting the choices. + - choices (list): A list of options to present to the user. + + Returns: + - str: The selected choice from the list, or None if the operation is aborted or an invalid selection is made. + """ + click.secho(prompt_message, fg="cyan") + for idx, choice in enumerate(choices, start=1): + click.secho(f"{idx}. {choice}", fg="cyan") + try: + selected_index = click.prompt("Enter the number of your choice", type=int) - 1 + except click.exceptions.Abort: + click.secho("Operation aborted by the user.", fg="red") + return None + if not (0 <= selected_index < len(choices)): + click.secho("Invalid selection.", fg="red") + return None + return choices[selected_index] + +def select_provider(provider_models): + """ + Presents a list of providers to the user and prompts them to select one. + + Args: + - provider_models (dict): A dictionary of provider models. + + Returns: + - str: The selected provider, or None if the operation is aborted or an invalid selection is made. + """ + predefined_providers = [p.lower() for p in PROVIDERS] + all_providers = sorted(set(predefined_providers + list(provider_models.keys()))) + + provider = select_choice("Select a provider to set up:", predefined_providers + ['other']) + if not provider: + return None + provider = provider.lower() + + if provider == 'other': + provider = select_choice("Select a provider from the full list:", all_providers) + if not provider: + return None + return provider + +def select_model(provider, provider_models): + """ + Presents a list of models for a given provider to the user and prompts them to select one. + + Args: + - provider (str): The provider for which to select a model. + - provider_models (dict): A dictionary of provider models. + + Returns: + - str: The selected model, or None if the operation is aborted or an invalid selection is made. + """ + predefined_providers = [p.lower() for p in PROVIDERS] + + if provider in predefined_providers: + available_models = MODELS.get(provider, []) + else: + available_models = provider_models.get(provider, []) + + if not available_models: + click.secho(f"No models available for provider '{provider}'.", fg="red") + return None + + selected_model = select_choice(f"Select a model to use for {provider.capitalize()}:", available_models) + return selected_model + +def load_provider_data(cache_file, cache_expiry): + """ + Loads provider data from a cache file if it exists and is not expired. If the cache is expired or corrupted, it fetches the data from the web. + + Args: + - cache_file (Path): The path to the cache file. + - cache_expiry (int): The cache expiry time in seconds. + + Returns: + - dict or None: The loaded provider data or None if the operation fails. + """ + current_time = time.time() + if cache_file.exists() and (current_time - cache_file.stat().st_mtime) < cache_expiry: + data = read_cache_file(cache_file) + if data: + return data + click.secho("Cache is corrupted. Fetching provider data from the web...", fg="yellow") + else: + click.secho("Cache expired or not found. Fetching provider data from the web...", fg="cyan") + return fetch_provider_data(cache_file) + +def read_cache_file(cache_file): + """ + Reads and returns the JSON content from a cache file. Returns None if the file contains invalid JSON. + + Args: + - cache_file (Path): The path to the cache file. + + Returns: + - dict or None: The JSON content of the cache file or None if the JSON is invalid. + """ + try: + with open(cache_file, "r") as f: + return json.load(f) + except json.JSONDecodeError: + return None + +def fetch_provider_data(cache_file): + """ + Fetches provider data from a specified URL and caches it to a file. + + Args: + - cache_file (Path): The path to the cache file. + + Returns: + - dict or None: The fetched provider data or None if the operation fails. + """ + try: + response = requests.get(JSON_URL, stream=True, timeout=10) + response.raise_for_status() + data = download_data(response) + with open(cache_file, "w") as f: + json.dump(data, f) + return data + except requests.RequestException as e: + click.secho(f"Error fetching provider data: {e}", fg="red") + except json.JSONDecodeError: + click.secho("Error parsing provider data. Invalid JSON format.", fg="red") + return None + +def download_data(response): + """ + Downloads data from a given HTTP response and returns the JSON content. + + Args: + - response (requests.Response): The HTTP response object. + + Returns: + - dict: The JSON content of the response. + """ + total_size = int(response.headers.get('content-length', 0)) + block_size = 8192 + data_chunks = [] + with click.progressbar(length=total_size, label='Downloading', show_pos=True) as progress_bar: + for chunk in response.iter_content(block_size): + if chunk: + data_chunks.append(chunk) + progress_bar.update(len(chunk)) + data_content = b''.join(data_chunks) + return json.loads(data_content.decode('utf-8')) + +def get_provider_data(): + """ + Retrieves provider data from a cache file, filters out models based on provider criteria, and returns a dictionary of providers mapped to their models. + + Returns: + - dict or None: A dictionary of providers mapped to their models or None if the operation fails. + """ + cache_dir = Path.home() / '.crewai' + cache_dir.mkdir(exist_ok=True) + cache_file = cache_dir / 'provider_cache.json' + cache_expiry = 24 * 3600 + + data = load_provider_data(cache_file, cache_expiry) + if not data: + return None + + provider_models = defaultdict(list) + for model_name, properties in data.items(): + provider = properties.get("litellm_provider", "").strip().lower() + if 'http' in provider or provider == 'other': + continue + if provider: + provider_models[provider].append(model_name) + return provider_models \ No newline at end of file diff --git a/src/crewai/cli/run_crew.py b/src/crewai/cli/run_crew.py index 0b1ac29bc..829d8ed95 100644 --- a/src/crewai/cli/run_crew.py +++ b/src/crewai/cli/run_crew.py @@ -2,6 +2,9 @@ import subprocess import click import tomllib +from packaging import version + +from crewai.cli.utils import get_crewai_version def run_crew() -> None: @@ -9,6 +12,22 @@ def run_crew() -> None: Run the crew by running a command in the UV environment. """ command = ["uv", "run", "run_crew"] + crewai_version = get_crewai_version() + min_required_version = "0.71.0" + + with open("pyproject.toml", "rb") as f: + data = tomllib.load(f) + + if data.get("tool", {}).get("poetry") and ( + version.parse(crewai_version) < version.parse(min_required_version) + ): + click.secho( + f"You are running an older version of crewAI ({crewai_version}) that uses poetry pyproject.toml. " + f"Please run `crewai update` to update your pyproject.toml to use uv.", + fg="red", + ) + print() + try: subprocess.run(command, capture_output=False, text=True, check=True) diff --git a/src/crewai/cli/templates/crew/pyproject.toml b/src/crewai/cli/templates/crew/pyproject.toml index 54bb2ad34..d387443a1 100644 --- a/src/crewai/cli/templates/crew/pyproject.toml +++ b/src/crewai/cli/templates/crew/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<=3.13" dependencies = [ - "crewai[tools]>=0.67.1,<1.0.0" + "crewai[tools]>=0.74.0,<1.0.0" ] [project.scripts] diff --git a/src/crewai/cli/templates/flow/pyproject.toml b/src/crewai/cli/templates/flow/pyproject.toml index 476d24f44..9db16e2a2 100644 --- a/src/crewai/cli/templates/flow/pyproject.toml +++ b/src/crewai/cli/templates/flow/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<=3.13" dependencies = [ - "crewai[tools]>=0.67.1,<1.0.0", + "crewai[tools]>=0.74.0,<1.0.0", "asyncio" ] diff --git a/src/crewai/cli/templates/pipeline/pyproject.toml b/src/crewai/cli/templates/pipeline/pyproject.toml index 933f62d89..3eb9b73d1 100644 --- a/src/crewai/cli/templates/pipeline/pyproject.toml +++ b/src/crewai/cli/templates/pipeline/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Your Name "] [tool.poetry.dependencies] python = ">=3.10,<=3.13" -crewai = { extras = ["tools"], version = ">=0.70.1,<1.0.0" } +crewai = { extras = ["tools"], version = ">=0.74.0,<1.0.0" } asyncio = "*" [tool.poetry.scripts] diff --git a/src/crewai/cli/templates/pipeline_router/pyproject.toml b/src/crewai/cli/templates/pipeline_router/pyproject.toml index 3bc64ee00..dabc8d281 100644 --- a/src/crewai/cli/templates/pipeline_router/pyproject.toml +++ b/src/crewai/cli/templates/pipeline_router/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = ["Your Name "] requires-python = ">=3.10,<=3.13" dependencies = [ - "crewai[tools]>=0.67.1,<1.0.0" + "crewai[tools]>=0.74.0,<1.0.0" ] [project.scripts] diff --git a/src/crewai/cli/templates/tool/pyproject.toml b/src/crewai/cli/templates/tool/pyproject.toml index a5b2ece4f..0fef250f5 100644 --- a/src/crewai/cli/templates/tool/pyproject.toml +++ b/src/crewai/cli/templates/tool/pyproject.toml @@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}" readme = "README.md" requires-python = ">=3.10,<=3.13" dependencies = [ - "crewai[tools]>=0.70.1" + "crewai[tools]>=0.74.0" ] diff --git a/src/crewai/cli/utils.py b/src/crewai/cli/utils.py index d8c048df8..c32113faa 100644 --- a/src/crewai/cli/utils.py +++ b/src/crewai/cli/utils.py @@ -9,6 +9,7 @@ import click from rich.console import Console from crewai.cli.authentication.utils import TokenManager +from crewai.cli.constants import ENV_VARS if sys.version_info >= (3, 11): import tomllib @@ -200,3 +201,76 @@ def tree_find_and_replace(directory, find, replace): new_dirpath = os.path.join(path, new_dirname) old_dirpath = os.path.join(path, dirname) os.rename(old_dirpath, new_dirpath) + + +def load_env_vars(folder_path): + """ + Loads environment variables from a .env file in the specified folder path. + + Args: + - folder_path (Path): The path to the folder containing the .env file. + + Returns: + - dict: A dictionary of environment variables. + """ + env_file_path = folder_path / ".env" + env_vars = {} + if env_file_path.exists(): + with open(env_file_path, "r") as file: + for line in file: + key, _, value = line.strip().partition("=") + if key and value: + env_vars[key] = value + return env_vars + + +def update_env_vars(env_vars, provider, model): + """ + Updates environment variables with the API key for the selected provider and model. + + Args: + - env_vars (dict): Environment variables dictionary. + - provider (str): Selected provider. + - model (str): Selected model. + + Returns: + - None + """ + api_key_var = ENV_VARS.get( + provider, + [ + click.prompt( + f"Enter the environment variable name for your {provider.capitalize()} API key", + type=str, + ) + ], + )[0] + + if api_key_var not in env_vars: + try: + env_vars[api_key_var] = click.prompt( + f"Enter your {provider.capitalize()} API key", type=str, hide_input=True + ) + except click.exceptions.Abort: + click.secho("Operation aborted by the user.", fg="red") + return None + else: + click.secho(f"API key already exists for {provider.capitalize()}.", fg="yellow") + + env_vars["MODEL"] = model + click.secho(f"Selected model: {model}", fg="green") + return env_vars + + +def write_env_file(folder_path, env_vars): + """ + Writes environment variables to a .env file in the specified folder. + + Args: + - folder_path (Path): The path to the folder where the .env file will be written. + - env_vars (dict): A dictionary of environment variables to write. + """ + env_file_path = folder_path / ".env" + with open(env_file_path, "w") as file: + for key, value in env_vars.items(): + file.write(f"{key}={value}\n") diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 84de8f6c9..5c3979cec 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -126,8 +126,8 @@ class Crew(BaseModel): default=None, description="An Instance of the EntityMemory to be used by the Crew", ) - embedder: Optional[dict] = Field( - default={"provider": "openai"}, + embedder: Optional[Any] = Field( + default=None, description="Configuration for the embedder to be used for the crew.", ) usage_metrics: Optional[UsageMetrics] = Field( @@ -774,7 +774,9 @@ class Crew(BaseModel): def _log_task_start(self, task: Task, role: str = "None"): if self.output_log_file: - self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="started") + self._file_handler.log( + task_name=task.name, task=task.description, agent=role, status="started" + ) def _update_manager_tools(self, task: Task): if self.manager_agent: @@ -796,7 +798,13 @@ class Crew(BaseModel): def _process_task_result(self, task: Task, output: TaskOutput) -> None: role = task.agent.role if task.agent is not None else "None" if self.output_log_file: - self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="completed", output=output.raw) + self._file_handler.log( + task_name=task.name, + task=task.description, + agent=role, + status="completed", + output=output.raw, + ) def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput: if len(task_outputs) != 1: diff --git a/src/crewai/memory/contextual/contextual_memory.py b/src/crewai/memory/contextual/contextual_memory.py index 23435c04c..5d91cf47d 100644 --- a/src/crewai/memory/contextual/contextual_memory.py +++ b/src/crewai/memory/contextual/contextual_memory.py @@ -31,7 +31,9 @@ class ContextualMemory: formatted as bullet points. """ stm_results = self.stm.search(query) - formatted_results = "\n".join([f"- {result}" for result in stm_results]) + formatted_results = "\n".join( + [f"- {result['context']}" for result in stm_results] + ) return f"Recent Insights:\n{formatted_results}" if stm_results else "" def _fetch_ltm_context(self, task) -> Optional[str]: diff --git a/src/crewai/memory/long_term/long_term_memory.py b/src/crewai/memory/long_term/long_term_memory.py index ab225e406..b9c36bdc9 100644 --- a/src/crewai/memory/long_term/long_term_memory.py +++ b/src/crewai/memory/long_term/long_term_memory.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, List from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.memory import Memory @@ -28,7 +28,7 @@ class LongTermMemory(Memory): datetime=item.datetime, ) - def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]: + def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory" return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load" def reset(self) -> None: diff --git a/src/crewai/memory/memory.py b/src/crewai/memory/memory.py index 9df09d3c7..d0bcd614f 100644 --- a/src/crewai/memory/memory.py +++ b/src/crewai/memory/memory.py @@ -1,6 +1,6 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, List -from crewai.memory.storage.interface import Storage +from crewai.memory.storage.rag_storage import RAGStorage class Memory: @@ -8,7 +8,7 @@ class Memory: Base class for memory, now supporting agent tags and generic metadata. """ - def __init__(self, storage: Storage): + def __init__(self, storage: RAGStorage): self.storage = storage def save( @@ -23,5 +23,5 @@ class Memory: self.storage.save(value, metadata) - def search(self, query: str) -> Dict[str, Any]: + def search(self, query: str) -> List[Dict[str, Any]]: return self.storage.search(query) diff --git a/src/crewai/memory/storage/base_rag_storage.py b/src/crewai/memory/storage/base_rag_storage.py new file mode 100644 index 000000000..10b82ebff --- /dev/null +++ b/src/crewai/memory/storage/base_rag_storage.py @@ -0,0 +1,76 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional + + +class BaseRAGStorage(ABC): + """ + Base class for RAG-based Storage implementations. + """ + + app: Any | None = None + + def __init__( + self, + type: str, + allow_reset: bool = True, + embedder_config: Optional[Any] = None, + crew: Any = None, + ): + self.type = type + self.allow_reset = allow_reset + self.embedder_config = embedder_config + self.crew = crew + self.agents = self._initialize_agents() + + def _initialize_agents(self) -> str: + if self.crew: + return "_".join( + [self._sanitize_role(agent.role) for agent in self.crew.agents] + ) + return "" + + @abstractmethod + def _sanitize_role(self, role: str) -> str: + """Sanitizes agent roles to ensure valid directory names.""" + pass + + @abstractmethod + def save(self, value: Any, metadata: Dict[str, Any]) -> None: + """Save a value with metadata to the storage.""" + pass + + @abstractmethod + def search( + self, + query: str, + limit: int = 3, + filter: Optional[dict] = None, + score_threshold: float = 0.35, + ) -> List[Any]: + """Search for entries in the storage.""" + pass + + @abstractmethod + def reset(self) -> None: + """Reset the storage.""" + pass + + @abstractmethod + def _generate_embedding( + self, text: str, metadata: Optional[Dict[str, Any]] = None + ) -> Any: + """Generate an embedding for the given text and metadata.""" + pass + + @abstractmethod + def _initialize_app(self): + """Initialize the vector db.""" + pass + + def setup_config(self, config: Dict[str, Any]): + """Setup the config of the storage.""" + pass + + def initialize_client(self): + """Initialize the client of the storage. This should setup the app and the db collection""" + pass diff --git a/src/crewai/memory/storage/interface.py b/src/crewai/memory/storage/interface.py index 0ffc1de16..8fbe10b03 100644 --- a/src/crewai/memory/storage/interface.py +++ b/src/crewai/memory/storage/interface.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, List class Storage: @@ -7,7 +7,7 @@ class Storage: def save(self, value: Any, metadata: Dict[str, Any]) -> None: pass - def search(self, key: str) -> Dict[str, Any]: # type: ignore + def search(self, key: str) -> List[Dict[str, Any]]: # type: ignore pass def reset(self) -> None: diff --git a/src/crewai/memory/storage/rag_storage.py b/src/crewai/memory/storage/rag_storage.py index ef051bed4..8d45d9f5a 100644 --- a/src/crewai/memory/storage/rag_storage.py +++ b/src/crewai/memory/storage/rag_storage.py @@ -3,10 +3,11 @@ import io import logging import os import shutil +import uuid from typing import Any, Dict, List, Optional - -from crewai.memory.storage.interface import Storage +from crewai.memory.storage.base_rag_storage import BaseRAGStorage from crewai.utilities.paths import db_storage_path +from chromadb.api import ClientAPI @contextlib.contextmanager @@ -24,61 +25,42 @@ def suppress_logging( logger.setLevel(original_level) -class RAGStorage(Storage): +class RAGStorage(BaseRAGStorage): """ Extends Storage to handle embeddings for memory entries, improving search efficiency. """ - def __init__(self, type, allow_reset=True, embedder_config=None, crew=None): - super().__init__() - if ( - not os.getenv("OPENAI_API_KEY") - and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1" - ): - os.environ["OPENAI_API_KEY"] = "fake" + app: ClientAPI | None = None + def __init__(self, type, allow_reset=True, embedder_config=None, crew=None): + super().__init__(type, allow_reset, embedder_config, crew) agents = crew.agents if crew else [] agents = [self._sanitize_role(agent.role) for agent in agents] agents = "_".join(agents) + self.agents = agents - config = { - "app": { - "config": {"name": type, "collect_metrics": False, "log_level": "ERROR"} - }, - "chunker": { - "chunk_size": 5000, - "chunk_overlap": 100, - "length_function": "len", - "min_chunk_size": 150, - }, - "vectordb": { - "provider": "chroma", - "config": { - "collection_name": type, - "dir": f"{db_storage_path()}/{type}/{agents}", - "allow_reset": allow_reset, - }, - }, - } - - if embedder_config: - config["embedder"] = embedder_config self.type = type - self.config = config + self.embedder_config = embedder_config or self._create_embedding_function() self.allow_reset = allow_reset + self._initialize_app() def _initialize_app(self): - from embedchain import App - from embedchain.llm.base import BaseLlm + import chromadb - class FakeLLM(BaseLlm): - pass + chroma_client = chromadb.PersistentClient( + path=f"{db_storage_path()}/{self.type}/{self.agents}" + ) + self.app = chroma_client - self.app = App.from_config(config=self.config) - self.app.llm = FakeLLM() - if self.allow_reset: - self.app.reset() + try: + self.collection = self.app.get_collection( + name=self.type, embedding_function=self.embedder_config + ) + except Exception: + self.collection = self.app.create_collection( + name=self.type, embedding_function=self.embedder_config + ) def _sanitize_role(self, role: str) -> str: """ @@ -87,11 +69,14 @@ class RAGStorage(Storage): return role.replace("\n", "").replace(" ", "_").replace("/", "_") def save(self, value: Any, metadata: Dict[str, Any]) -> None: - if not hasattr(self, "app"): + if not hasattr(self, "app") or not hasattr(self, "collection"): self._initialize_app() - self._generate_embedding(value, metadata) + try: + self._generate_embedding(value, metadata) + except Exception as e: + logging.error(f"Error during {self.type} save: {str(e)}") - def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage" + def search( self, query: str, limit: int = 3, @@ -100,31 +85,50 @@ class RAGStorage(Storage): ) -> List[Any]: if not hasattr(self, "app"): self._initialize_app() - from embedchain.vectordb.chroma import InvalidDimensionException - with suppress_logging(): - try: - results = ( - self.app.search(query, limit, where=filter) - if filter - else self.app.search(query, limit) - ) - except InvalidDimensionException: - self.app.reset() - return [] - return [r for r in results if r["metadata"]["score"] >= score_threshold] + try: + with suppress_logging(): + response = self.collection.query(query_texts=query, n_results=limit) - def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any: - if not hasattr(self, "app"): + results = [] + for i in range(len(response["ids"][0])): + result = { + "id": response["ids"][0][i], + "metadata": response["metadatas"][0][i], + "context": response["documents"][0][i], + "score": response["distances"][0][i], + } + if result["score"] >= score_threshold: + results.append(result) + + return results + except Exception as e: + logging.error(f"Error during {self.type} search: {str(e)}") + return [] + + def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> None: # type: ignore + if not hasattr(self, "app") or not hasattr(self, "collection"): self._initialize_app() - from embedchain.models.data_type import DataType - self.app.add(text, data_type=DataType.TEXT, metadata=metadata) + self.collection.add( + documents=[text], + metadatas=[metadata or {}], + ids=[str(uuid.uuid4())], + ) def reset(self) -> None: try: shutil.rmtree(f"{db_storage_path()}/{self.type}") + if self.app: + self.app.reset() except Exception as e: raise Exception( f"An error occurred while resetting the {self.type} memory: {e}" ) + + def _create_embedding_function(self): + import chromadb.utils.embedding_functions as embedding_functions + + return embedding_functions.OpenAIEmbeddingFunction( + api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small" + ) diff --git a/src/crewai/project/annotations.py b/src/crewai/project/annotations.py index 5bbf8dd1c..6c0b9942a 100644 --- a/src/crewai/project/annotations.py +++ b/src/crewai/project/annotations.py @@ -76,27 +76,13 @@ def crew(func) -> Callable[..., Crew]: instantiated_agents = [] agent_roles = set() - # Collect methods from crew in order - all_functions = [ - (name, getattr(self, name)) - for name, attr in self.__class__.__dict__.items() - if callable(attr) - ] - tasks = [ - (name, method) - for name, method in all_functions - if hasattr(method, "is_task") - ] - - agents = [ - (name, method) - for name, method in all_functions - if hasattr(method, "is_agent") - ] + # Use the preserved task and agent information + tasks = self._original_tasks.items() + agents = self._original_agents.items() # Instantiate tasks in order for task_name, task_method in tasks: - task_instance = task_method() + task_instance = task_method(self) instantiated_tasks.append(task_instance) agent_instance = getattr(task_instance, "agent", None) if agent_instance and agent_instance.role not in agent_roles: @@ -105,7 +91,7 @@ def crew(func) -> Callable[..., Crew]: # Instantiate agents not included by tasks for agent_name, agent_method in agents: - agent_instance = agent_method() + agent_instance = agent_method(self) if agent_instance.role not in agent_roles: instantiated_agents.append(agent_instance) agent_roles.add(agent_instance.role) diff --git a/src/crewai/project/crew_base.py b/src/crewai/project/crew_base.py index 6ad8a8c3c..a420c4dd2 100644 --- a/src/crewai/project/crew_base.py +++ b/src/crewai/project/crew_base.py @@ -34,6 +34,18 @@ def CrewBase(cls: T) -> T: self.map_all_agent_variables() self.map_all_task_variables() + # Preserve task and agent information + self._original_tasks = { + name: method + for name, method in cls.__dict__.items() + if hasattr(method, "is_task") and method.is_task + } + self._original_agents = { + name: method + for name, method in cls.__dict__.items() + if hasattr(method, "is_agent") and method.is_agent + } + @staticmethod def load_yaml(config_path: Path): try: diff --git a/src/crewai/telemetry/telemetry.py b/src/crewai/telemetry/telemetry.py index f6a018f27..a08ccd96f 100644 --- a/src/crewai/telemetry/telemetry.py +++ b/src/crewai/telemetry/telemetry.py @@ -65,7 +65,7 @@ class Telemetry: self.provider.add_span_processor(processor) self.ready = True - except BaseException as e: + except Exception as e: if isinstance( e, (SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError), @@ -83,404 +83,33 @@ class Telemetry: self.ready = False self.trace_set = False + def _safe_telemetry_operation(self, operation): + if not self.ready: + return + try: + operation() + except Exception: + pass + def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None): """Records the creation of a crew.""" - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Crew Created") - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "python_version", platform.python_version()) - self._add_attribute(span, "crew_key", crew.key) - self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute(span, "crew_process", crew.process) - self._add_attribute(span, "crew_memory", crew.memory) - self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) - self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) - if crew.share_crew: - self._add_attribute( - span, - "crew_agents", - json.dumps( - [ - { - "key": agent.key, - "id": str(agent.id), - "role": agent.role, - "goal": agent.goal, - "backstory": agent.backstory, - "verbose?": agent.verbose, - "max_iter": agent.max_iter, - "max_rpm": agent.max_rpm, - "i18n": agent.i18n.prompt_file, - "function_calling_llm": ( - agent.function_calling_llm.model - if agent.function_calling_llm - else "" - ), - "llm": agent.llm.model, - "delegation_enabled?": agent.allow_delegation, - "allow_code_execution?": agent.allow_code_execution, - "max_retry_limit": agent.max_retry_limit, - "tools_names": [ - tool.name.casefold() - for tool in agent.tools or [] - ], - } - for agent in crew.agents - ] - ), - ) - self._add_attribute( - span, - "crew_tasks", - json.dumps( - [ - { - "key": task.key, - "id": str(task.id), - "description": task.description, - "expected_output": task.expected_output, - "async_execution?": task.async_execution, - "human_input?": task.human_input, - "agent_role": ( - task.agent.role if task.agent else "None" - ), - "agent_key": task.agent.key if task.agent else None, - "context": ( - [task.description for task in task.context] - if task.context - else None - ), - "tools_names": [ - tool.name.casefold() - for tool in task.tools or [] - ], - } - for task in crew.tasks - ] - ), - ) - self._add_attribute(span, "platform", platform.platform()) - self._add_attribute(span, "platform_release", platform.release()) - self._add_attribute(span, "platform_system", platform.system()) - self._add_attribute(span, "platform_version", platform.version()) - self._add_attribute(span, "cpus", os.cpu_count()) - self._add_attribute( - span, "crew_inputs", json.dumps(inputs) if inputs else None - ) - else: - self._add_attribute( - span, - "crew_agents", - json.dumps( - [ - { - "key": agent.key, - "id": str(agent.id), - "role": agent.role, - "verbose?": agent.verbose, - "max_iter": agent.max_iter, - "max_rpm": agent.max_rpm, - "function_calling_llm": ( - agent.function_calling_llm.model - if agent.function_calling_llm - else "" - ), - "llm": agent.llm.model, - "delegation_enabled?": agent.allow_delegation, - "allow_code_execution?": agent.allow_code_execution, - "max_retry_limit": agent.max_retry_limit, - "tools_names": [ - tool.name.casefold() - for tool in agent.tools or [] - ], - } - for agent in crew.agents - ] - ), - ) - self._add_attribute( - span, - "crew_tasks", - json.dumps( - [ - { - "key": task.key, - "id": str(task.id), - "async_execution?": task.async_execution, - "human_input?": task.human_input, - "agent_role": ( - task.agent.role if task.agent else "None" - ), - "agent_key": task.agent.key if task.agent else None, - "tools_names": [ - tool.name.casefold() - for tool in task.tools or [] - ], - } - for task in crew.tasks - ] - ), - ) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - def task_started(self, crew: Crew, task: Task) -> Span | None: - """Records task started in a crew.""" - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - - created_span = tracer.start_span("Task Created") - - self._add_attribute(created_span, "crew_key", crew.key) - self._add_attribute(created_span, "crew_id", str(crew.id)) - self._add_attribute(created_span, "task_key", task.key) - self._add_attribute(created_span, "task_id", str(task.id)) - - if crew.share_crew: - self._add_attribute( - created_span, "formatted_description", task.description - ) - self._add_attribute( - created_span, "formatted_expected_output", task.expected_output - ) - - created_span.set_status(Status(StatusCode.OK)) - created_span.end() - - span = tracer.start_span("Task Execution") - - self._add_attribute(span, "crew_key", crew.key) - self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute(span, "task_key", task.key) - self._add_attribute(span, "task_id", str(task.id)) - - if crew.share_crew: - self._add_attribute(span, "formatted_description", task.description) - self._add_attribute( - span, "formatted_expected_output", task.expected_output - ) - - return span - except Exception: - pass - - return None - - def task_ended(self, span: Span, task: Task, crew: Crew): - """Records task execution in a crew.""" - if self.ready: - try: - if crew.share_crew: - self._add_attribute( - span, - "task_output", - task.output.raw if task.output else "", - ) - - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int): - """Records the repeated usage 'error' of a tool by an agent.""" - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Tool Repeated Usage") - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "tool_name", tool_name) - self._add_attribute(span, "attempts", attempts) - if llm: - self._add_attribute(span, "llm", llm.model) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def tool_usage(self, llm: Any, tool_name: str, attempts: int): - """Records the usage of a tool by an agent.""" - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Tool Usage") - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "tool_name", tool_name) - self._add_attribute(span, "attempts", attempts) - if llm: - self._add_attribute(span, "llm", llm.model) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def tool_usage_error(self, llm: Any): - """Records the usage of a tool by an agent.""" - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Tool Usage Error") - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - if llm: - self._add_attribute(span, "llm", llm.model) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def individual_test_result_span( - self, crew: Crew, quality: float, exec_time: int, model_name: str - ): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Crew Individual Test Result") - - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "crew_key", crew.key) - self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute(span, "quality", str(quality)) - self._add_attribute(span, "exec_time", str(exec_time)) - self._add_attribute(span, "model_name", model_name) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def test_execution_span( - self, - crew: Crew, - iterations: int, - inputs: dict[str, Any] | None, - model_name: str, - ): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Crew Test Execution") - - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "crew_key", crew.key) - self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute(span, "iterations", str(iterations)) - self._add_attribute(span, "model_name", model_name) - - if crew.share_crew: - self._add_attribute( - span, "inputs", json.dumps(inputs) if inputs else None - ) - - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def deploy_signup_error_span(self): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Deploy Signup Error") - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def start_deployment_span(self, uuid: Optional[str] = None): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Start Deployment") - if uuid: - self._add_attribute(span, "uuid", uuid) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def create_crew_deployment_span(self): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Create Crew Deployment") - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Get Crew Logs") - self._add_attribute(span, "log_type", log_type) - if uuid: - self._add_attribute(span, "uuid", uuid) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def remove_crew_span(self, uuid: Optional[str] = None): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Remove Crew") - if uuid: - self._add_attribute(span, "uuid", uuid) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass - - def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None): - """Records the complete execution of a crew. - This is only collected if the user has opted-in to share the crew. - """ - self.crew_creation(crew, inputs) - - if (self.ready) and (crew.share_crew): - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Crew Execution") - self._add_attribute( - span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, - ) - self._add_attribute(span, "crew_key", crew.key) - self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute( - span, "crew_inputs", json.dumps(inputs) if inputs else None - ) + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Crew Created") + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "python_version", platform.python_version()) + self._add_attribute(span, "crew_key", crew.key) + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute(span, "crew_process", crew.process) + self._add_attribute(span, "crew_memory", crew.memory) + self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) + self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) + if crew.share_crew: self._add_attribute( span, "crew_agents", @@ -496,8 +125,15 @@ class Telemetry: "max_iter": agent.max_iter, "max_rpm": agent.max_rpm, "i18n": agent.i18n.prompt_file, + "function_calling_llm": ( + agent.function_calling_llm.model + if agent.function_calling_llm + else "" + ), "llm": agent.llm.model, "delegation_enabled?": agent.allow_delegation, + "allow_code_execution?": agent.allow_code_execution, + "max_retry_limit": agent.max_retry_limit, "tools_names": [ tool.name.casefold() for tool in agent.tools or [] ], @@ -512,12 +148,15 @@ class Telemetry: json.dumps( [ { + "key": task.key, "id": str(task.id), "description": task.description, "expected_output": task.expected_output, "async_execution?": task.async_execution, "human_input?": task.human_input, - "agent_role": task.agent.role if task.agent else "None", + "agent_role": ( + task.agent.role if task.agent else "None" + ), "agent_key": task.agent.key if task.agent else None, "context": ( [task.description for task in task.context] @@ -532,78 +171,433 @@ class Telemetry: ] ), ) - return span - except Exception: - pass - - def end_crew(self, crew, final_string_output): - if (self.ready) and (crew.share_crew): - try: + self._add_attribute(span, "platform", platform.platform()) + self._add_attribute(span, "platform_release", platform.release()) + self._add_attribute(span, "platform_system", platform.system()) + self._add_attribute(span, "platform_version", platform.version()) + self._add_attribute(span, "cpus", os.cpu_count()) self._add_attribute( - crew._execution_span, - "crewai_version", - pkg_resources.get_distribution("crewai").version, + span, "crew_inputs", json.dumps(inputs) if inputs else None ) + else: self._add_attribute( - crew._execution_span, "crew_output", final_string_output - ) - self._add_attribute( - crew._execution_span, - "crew_tasks_output", + span, + "crew_agents", json.dumps( [ { + "key": agent.key, + "id": str(agent.id), + "role": agent.role, + "verbose?": agent.verbose, + "max_iter": agent.max_iter, + "max_rpm": agent.max_rpm, + "function_calling_llm": ( + agent.function_calling_llm.model + if agent.function_calling_llm + else "" + ), + "llm": agent.llm.model, + "delegation_enabled?": agent.allow_delegation, + "allow_code_execution?": agent.allow_code_execution, + "max_retry_limit": agent.max_retry_limit, + "tools_names": [ + tool.name.casefold() for tool in agent.tools or [] + ], + } + for agent in crew.agents + ] + ), + ) + self._add_attribute( + span, + "crew_tasks", + json.dumps( + [ + { + "key": task.key, "id": str(task.id), - "description": task.description, - "output": task.output.raw_output, + "async_execution?": task.async_execution, + "human_input?": task.human_input, + "agent_role": ( + task.agent.role if task.agent else "None" + ), + "agent_key": task.agent.key if task.agent else None, + "tools_names": [ + tool.name.casefold() for tool in task.tools or [] + ], } for task in crew.tasks ] ), ) - crew._execution_span.set_status(Status(StatusCode.OK)) - crew._execution_span.end() - except Exception: - pass + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def task_started(self, crew: Crew, task: Task) -> Span | None: + """Records task started in a crew.""" + + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + + created_span = tracer.start_span("Task Created") + + self._add_attribute(created_span, "crew_key", crew.key) + self._add_attribute(created_span, "crew_id", str(crew.id)) + self._add_attribute(created_span, "task_key", task.key) + self._add_attribute(created_span, "task_id", str(task.id)) + + if crew.share_crew: + self._add_attribute( + created_span, "formatted_description", task.description + ) + self._add_attribute( + created_span, "formatted_expected_output", task.expected_output + ) + + created_span.set_status(Status(StatusCode.OK)) + created_span.end() + + span = tracer.start_span("Task Execution") + + self._add_attribute(span, "crew_key", crew.key) + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute(span, "task_key", task.key) + self._add_attribute(span, "task_id", str(task.id)) + + if crew.share_crew: + self._add_attribute(span, "formatted_description", task.description) + self._add_attribute( + span, "formatted_expected_output", task.expected_output + ) + + return span + + return self._safe_telemetry_operation(operation) + + def task_ended(self, span: Span, task: Task, crew: Crew): + """Records task execution in a crew.""" + + def operation(): + if crew.share_crew: + self._add_attribute( + span, + "task_output", + task.output.raw if task.output else "", + ) + + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int): + """Records the repeated usage 'error' of a tool by an agent.""" + + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Tool Repeated Usage") + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "tool_name", tool_name) + self._add_attribute(span, "attempts", attempts) + if llm: + self._add_attribute(span, "llm", llm.model) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def tool_usage(self, llm: Any, tool_name: str, attempts: int): + """Records the usage of a tool by an agent.""" + + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Tool Usage") + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "tool_name", tool_name) + self._add_attribute(span, "attempts", attempts) + if llm: + self._add_attribute(span, "llm", llm.model) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def tool_usage_error(self, llm: Any): + """Records the usage of a tool by an agent.""" + + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Tool Usage Error") + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + if llm: + self._add_attribute(span, "llm", llm.model) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def individual_test_result_span( + self, crew: Crew, quality: float, exec_time: int, model_name: str + ): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Crew Individual Test Result") + + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "crew_key", crew.key) + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute(span, "quality", str(quality)) + self._add_attribute(span, "exec_time", str(exec_time)) + self._add_attribute(span, "model_name", model_name) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def test_execution_span( + self, + crew: Crew, + iterations: int, + inputs: dict[str, Any] | None, + model_name: str, + ): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Crew Test Execution") + + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "crew_key", crew.key) + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute(span, "iterations", str(iterations)) + self._add_attribute(span, "model_name", model_name) + + if crew.share_crew: + self._add_attribute( + span, "inputs", json.dumps(inputs) if inputs else None + ) + + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def deploy_signup_error_span(self): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Deploy Signup Error") + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def start_deployment_span(self, uuid: Optional[str] = None): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Start Deployment") + if uuid: + self._add_attribute(span, "uuid", uuid) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def create_crew_deployment_span(self): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Create Crew Deployment") + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Get Crew Logs") + self._add_attribute(span, "log_type", log_type) + if uuid: + self._add_attribute(span, "uuid", uuid) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def remove_crew_span(self, uuid: Optional[str] = None): + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Remove Crew") + if uuid: + self._add_attribute(span, "uuid", uuid) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) + + def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None): + """Records the complete execution of a crew. + This is only collected if the user has opted-in to share the crew. + """ + self.crew_creation(crew, inputs) + + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Crew Execution") + self._add_attribute( + span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute(span, "crew_key", crew.key) + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute( + span, "crew_inputs", json.dumps(inputs) if inputs else None + ) + self._add_attribute( + span, + "crew_agents", + json.dumps( + [ + { + "key": agent.key, + "id": str(agent.id), + "role": agent.role, + "goal": agent.goal, + "backstory": agent.backstory, + "verbose?": agent.verbose, + "max_iter": agent.max_iter, + "max_rpm": agent.max_rpm, + "i18n": agent.i18n.prompt_file, + "llm": agent.llm.model, + "delegation_enabled?": agent.allow_delegation, + "tools_names": [ + tool.name.casefold() for tool in agent.tools or [] + ], + } + for agent in crew.agents + ] + ), + ) + self._add_attribute( + span, + "crew_tasks", + json.dumps( + [ + { + "id": str(task.id), + "description": task.description, + "expected_output": task.expected_output, + "async_execution?": task.async_execution, + "human_input?": task.human_input, + "agent_role": task.agent.role if task.agent else "None", + "agent_key": task.agent.key if task.agent else None, + "context": ( + [task.description for task in task.context] + if task.context + else None + ), + "tools_names": [ + tool.name.casefold() for tool in task.tools or [] + ], + } + for task in crew.tasks + ] + ), + ) + return span + + if crew.share_crew: + return self._safe_telemetry_operation(operation) + return None + + def end_crew(self, crew, final_string_output): + def operation(): + self._add_attribute( + crew._execution_span, + "crewai_version", + pkg_resources.get_distribution("crewai").version, + ) + self._add_attribute( + crew._execution_span, "crew_output", final_string_output + ) + self._add_attribute( + crew._execution_span, + "crew_tasks_output", + json.dumps( + [ + { + "id": str(task.id), + "description": task.description, + "output": task.output.raw_output, + } + for task in crew.tasks + ] + ), + ) + crew._execution_span.set_status(Status(StatusCode.OK)) + crew._execution_span.end() + + if crew.share_crew: + self._safe_telemetry_operation(operation) def _add_attribute(self, span, key, value): """Add an attribute to a span.""" - try: + + def operation(): return span.set_attribute(key, value) - except Exception: - pass + + self._safe_telemetry_operation(operation) def flow_creation_span(self, flow_name: str): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Flow Creation") - self._add_attribute(span, "flow_name", flow_name) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Flow Creation") + self._add_attribute(span, "flow_name", flow_name) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) def flow_plotting_span(self, flow_name: str, node_names: list[str]): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Flow Plotting") - self._add_attribute(span, "flow_name", flow_name) - self._add_attribute(span, "node_names", json.dumps(node_names)) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Flow Plotting") + self._add_attribute(span, "flow_name", flow_name) + self._add_attribute(span, "node_names", json.dumps(node_names)) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) def flow_execution_span(self, flow_name: str, node_names: list[str]): - if self.ready: - try: - tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Flow Execution") - self._add_attribute(span, "flow_name", flow_name) - self._add_attribute(span, "node_names", json.dumps(node_names)) - span.set_status(Status(StatusCode.OK)) - span.end() - except Exception: - pass + def operation(): + tracer = trace.get_tracer("crewai.telemetry") + span = tracer.start_span("Flow Execution") + self._add_attribute(span, "flow_name", flow_name) + self._add_attribute(span, "node_names", json.dumps(node_names)) + span.set_status(Status(StatusCode.OK)) + span.end() + + self._safe_telemetry_operation(operation) diff --git a/tests/memory/cassettes/test_save_and_search.yaml b/tests/memory/cassettes/test_save_and_search.yaml deleted file mode 100644 index 473fe50d3..000000000 --- a/tests/memory/cassettes/test_save_and_search.yaml +++ /dev/null @@ -1,189 +0,0 @@ -interactions: -- request: - body: '{"input": ["test value test value test value test value test value test - value test value test value test value test value test value test value test - value test value test value test value test value test value"], "model": "text-embedding-ada-002", - "encoding_format": "base64"}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '292' - content-type: - - application/json - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.47.0 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.47.0 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/embeddings - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": - \"embedding\",\n \"index\": 0,\n \"embedding\": \"0hlQvJraaDvd7qU8UKxtOq2rkbzgV788o9ccu1bQnryGE7O87AS3vJraaLvUOX07rcrdO+ortLtCJ9S4C6FCO+O/9zwdkFa84ug2PHb8wLwAPbe7pK5dvKdANr28UNi7KB6CvB5oeLun+Ys8jVVPvR25lTx7Z5w7RwKZPP/zyrus8ru7GygevIdbPjxJSwU7qmFEvCxeXLuHMv88yCaPvODmdDtfXAg8PFaCPPsZZ7w6nEu89iC3u5IH1bgk01O9xU2Mu07UyzwOM5s8XBI7PFAdOL1wAjC8zB+/PCVEHjx0s1Q8UmYkPIXKxjwNo4S8qznmvELfSLzcXg+90tHEO+XBOTv1SBW8m9tJvJkCRzynsQA83X3bOyVEnjzWO788U4ZRvL4oejx79tE8kOcnO4IaA7zyJ4c8eh6wunnWpLsia5s86rppu6H+mbwcAEC71KpHPKiJojy6v+A6k1DBO2wozLvbXS68KDztO4CJCzwPCz07W8lOPERwwDu8CM08sl0XPBJVCj0eaPi8uuifvGbmLzwS5L+7OlTAO7mglLwhssW8u0/3OvsZ57kshxs6sRWMOxnfMbyxXLa767vKPAtaGLzIJg+8nSS2PBknPTy5oJQ8nZWAPDg0k7tu4oK7MDjAuobLJ7vbFgQ9t55SvKX3STzT0qW631beu6/LvryU4Ne8G3CpuxyP9TyiZXE5ab+yPET/9TvZFMK8NBIkPPFuMbxDb188FOaBvKiJIrwcAEA8D1JnPLBbVbqZuxy8pWiUPBW9QjyxXLY7cyM+vF3p+zvd7iW8pofgu9esCbwVdpg8XcsQOwlYVrx7Zxw8hTuROhyP9blWiJO8q2IlvAOlb7sBPhg8EMQSvBfdb7yqqU88AMzsPIIagzxBlz26UI4COl9cCLlLs728gKjXPK3znLzGlZc8UI6CvDbrJj3ZFEI8gBjBO6Q+9LuRBvQ7CqBhvMn+MDzrdCA9Bn/TPNhbbLzZFMI7TkWWPBqYBz2uEmk8u0/3vO7c2DznmVs8fPcyvC4YEzuftS2/MIDLO8N0CTx2bYs7cSH8PH3O8zscj/U8iDNgPFTO3Lw2oxs8+Gkju9mFjDyQ56e8lMELvCmuGDvPsLa81BsSPAx55Lwfkpi7fs/UPNH6g7xFANc7GyievK2rETxgpBO8FS6Nu8aVlzvEI+w7CsqBug4K3DyGomi8rxPKuneMVzv9YtM8/hyKPagYWLxVXvO8DgrcuiVEHjzrLBU9F5ZFvL4oerzR0GM88JaPO9hbbLzN2JQ8PHRtPHj+Ary2Dry8HkmsvGyZFjy1Ddu7m5O+usQj7LvmUdA5NXl7vDrFijy8waK8p7EAPPYgtzynsYC8pvgqvMCbBro1WxC8ZVYZvUxskzs37Ae7TYvfPLsxjDzCSum7eY4ZvIBgzLtISiS8P3eQvL4oejzBK508HbmVPGDsnrzjv/c7PC3DPCT8ErzRYHo6Uz5GvODm9LsDh4Q8WoFDup2z67vAmwa76FOSOxqYBzyMfg49DHnkPE6MwLvXrAm8CIC0PJQJFzzAmwa8xmzYO4KpODz90527kXe+ugpZt7qGEzM8gmKOOma9cLvZPYE8VxgqO+/duTyban88tO6OvMwfv7yLVO68j5+cvCs/kLx6Zru5C1oYva3znDvnUrE8Dws9ugjIv7u/cWY775WuPMABfTx714U8SyQIPA0yujqds+s5uHZ0vKPXnDs06WS8RSkWPA7CULz90508+UHFvGsJgDtjxaG8bwHPO2fnELpAllw86ZsdvczXs7sGqJI7okeGO78pW7pxAxE8r4QUvSFB+7z6+po7TdNqvJe52jsia5u851KxO8pGvLwYJty7fbCIvBxxCjsEz4+88G1QvCwWUTzybxK9TLQevKFGpbzETKu8Yn0WvMUFATy6d9U7fT++vLt4NjwQ4n285Zj6vHj+grwMoiO7t57SOvn5ubvX9BS5JBr+PB0Bobz0HnW8+olQvLYOPLzN2JQ8qtIOO5TBCzaXAeY8V2C1PPdoQrtXp188aZZzPEuKfjxLazK7YHtUvAGugTsLMHi8eY4ZvGQOjrzYW2w8nZUAO5PfdjxCUJM72YUMPfQAijx/0DU7ckscPCcdobzCSum7M6HZvIhcnzmniMG8pj9VO5LAKjxVhzI8hTsRvdxeD73RiTk8h1u+u/fZDDzN2BS9OlRAOywWUTzBAt47WqoCu/Qe9bsNekU85glFPI4tcbvxJqY8lnHPOlmpIT2P5kY8TdNqvLifs7t71wU9WfGsPDI6Ary+mcQ8SvrnPG244jxEcEC8niUXPXP6/jx++BO83sbHOzEQ4jwxEGK8HHEKPD++OjvMH788mHIwPGPFobxpeIg8JWNqvC83XzyB8GI7YDNJPG3hoTxuSHm8linEvBe/hDyrYqU8HdjhPArKAT22nXE89mfhOhB8BzqeJRc8KDztOjUTBTw9no26f6d2uzHxlbxza0m8n/24O15bpzw1E4U8qoqDPIOqGTyI69S7LmCePCFqujuPnxy8nWxBu5+1rbzsBDc8KB6CPDqcSzzaFSM7FOaBvEnaujjkCGS8rMl8vCVEHjzPsLa7PJ0sPIDRFrwXvwQ8Dwu9PLee0jztTEI8SCFlPDpUwDwRVCk88JYPPPzSvLwkGv67qtKOu3hGjjrHbbm81/QUPEiSLzzQsZc8qvD5Oz292bwv8LQ74793OVKFcDxLazI89ACKPOiavDu9UTk8cmpoPMv/Eb3nUjE6Fr6jvA9SZzxRrU49vuHPPNkUwjldgwU8e/bRuqexgLsisya875WuvClmDbwUdTe8rzwJutTTBjxt4SE6y9ZSvFmA4rqq0g47ZMYCvNhb7LyHo0k8SSJGPAD1Kzw3M7K7iqULPSBpWTv50Po8tO4OvO2UzTt/iCo9CIC0O12Dhbz60du80oqaPBxxijwwgEu8/NK8OtoVIz0eAoK84i/hPNY7vzuYcjC8yG4aPFo5ODymsB88O+TWvKCNTzutOsc79B51O3j+gjweSSw8vQkuvM8hATzHbTm78N6avGCkk7zKjsc7z2mMPJtqf7w37Ae8NROFvLZ/Br2qioO8NjLRvHQknzsxyNa8EStqvCo+L7zN2JS7I4rnvLnnvrwIgLS80LEXvGqX1Lx7H5G7jDaDO97GRzxQZcO76rppuilmDTw1efu7A6Xvu/6rv7sekbe82IQrvIIaA7zmeo+79mfhOwlY1rppT0k802FbvMseXrxvuiQ8Kz8QPHhGDrtagcO8EyxLPAsSDbwgaVk8sVy2vGqXVDuuEmk8D1LnOmwozLs+TXA8s+0tOxpvSDzqc7882IQrPNztxDxkxgI7ODQTt97vBjzUOX28CRBLPFCOgruZuxy7XFnlPDEQ4juYurs8ZVYZO1qqgrzBAt47rhLpvK+i/zyUmEw8lyqlOmxRizzz/yi8sc2AvPXXSjyCgHk8M8qYvElLBTxJ2jq7I4rnuxV2GL3gVz+7ema7O7Bb1TxO1Eu6xQUBPGzgwLxqwBM8QpgePPuKMbyftS08t8cRvXZETLrqumk83e4lvDhSfjzCu7M7x205vBfd77zn4ea7ema7uyitt7z/ZJW8IdsEPKSu3TwLWpg7yo5HPAaokryxzQA8mHKwvO2UTTzxRfI7at9fvAx5ZLyxXDa8SvrnOn/QtTxixEA8VogTu6Y/VbyHW748gGDMPEskiLyBYa07xdzBvIrE17sgIq87Pk1wPB25lbwXTjo8PZ4NvVs6mbxsKEw8dCQfux4CgjwH8B085AhkPIx+Dr3I/U+8M8oYPFKuLzuWuPm7vAhNvJtMFL1Hkc68tKYDPQg5irs1efs85XkuOQwx2bssz6a8GJcmvELfyLyqigM8RUjiOVVe87zmUdC8yCYPvGVWmbvPsLa8GSe9O2Ut2jzB45G80PmiOxxxiryZuxw8IrMmPGxRi7tKIyc9cAIwPJN5AD37QiY94qArvLI0WL3v3Tm8KxVwu/DeGjzGJE09DKKjPEz7SLzlMoS8dSWAPNk9gTyVma076iu0vB1ISzxuKo488SYmvJ+MbrxifRa8oEVEvYk0QTus8js8hDowvCTT07wfIU67D1LnvEFPsjz8Q4e8GE+buoPyJDuRMJS8gzlPPJJ4nzuXKqW8z2kMvGtQKjxu4gI98LXbu9QbErygjU88YjWLO7adcbyf/bg7CcmgOkqy3Dyf/Ti9+bEuvJhyMDtPHFc82s0XvOq6aTwIyL+8SySIvHb8wLyb28m71KrHPE/VrDye/Fe6f9C1u3jVQzuXAWa8sBSrtQSmULxO/Yo87AQ3vDx0bbxMtJ48n7Utu2XlzjoFfvI7rcpdO+5NIzwAzOw8LIcbvVwSuzyLNaI8nJQfvEBOUTyiZfE7X6MyvNeDyjvL1tI7t+bdvEnaOruCGoO8LtCHu4EZojxShfC8pWgUPL7Cgzt8Pt07rxPKPDsNlju/cea8nt2LPGE0Krt+F+C84zDCulmAYrxrb3a7YOwePD52rzubBIk8nt2LvFg3drxtcFe8a2/2ucf8bjvoCwe8yf6wvO11gTv9ql68W4KkPFT3G7zAcke8s+0tu6BugzrNZ0o6ImubPPFusbxD4Km6aC8cvZcqpbuiR4a83n48vAjIPzuypaI8of6ZPAOHBLsCFdm8yCYPvPsZ57zleS69nWzBOrvpADzc7cQ6HbmVPKgYWLxoLxw9edakPAx55Dv39/e84zDCuvhAZDw5DDW87b2MPJi6u7k37Ae9gBjBvPfZDD2PVxE8prCfPLowqzykZ7O86nM/OvWQoLyQ56c7keiIuyitt7s/d5C6CIC0u9Y7P7xsUQu81oLpu3+ndry1xrC86eMoO0Zygrzsk2y7FOYBvY1VT7p/QYA7D1LnvISCuzsqPi+6gmIOvKrSDj1uSPk7e67GPBsonrzomjw8zZCJvPmxLjv8Gsi7OsUKvM1nSrwDXkW8a2/2OwfHXrwv8DS8goB5vNxej7w0Wq+86+SJPCk9zjxM+0i8ZuavOjjDSLx9P767yP3Pt0VI4rykPvQ7V6dfu98OUzwa4JI8QL+bvIaiaLy4dvS6rGMGPX3O8zvwbdC6SCFlPtMasbuvE8q7jH4OPZG/STv1H9Y4w7wUPL4oejvNkIk8DOquPAfHXjx51qS7of6ZO30/vrq5L0q74lggPIAYwbxyS5y8odVavDzlt7w9no066Jq8u13peztt4SG816yJO5zcKrwktAe8nWzBu3cc7jz/rCA8F93vvBxxCryq0o675TKEPIfMCL3uBRi8yUXbPCNDPTs95pg8X8J+vE3T6jqtyt27CxKNvEkixrwWBq88jlYwO8zXMzrk6Re7g6qZOyesVjsKoOG8A6Vvu27igjzwlo88qtKOPIWDHDsXlkU9JRvfvLPtrbtpeIg816wJvGIMzDqKfMy8O1WhPLU2Gr1y2zK6YDPJvGrAk7uCYo48D1JnOy1fPTuOLXG8YQvrOqdANryfjG687925vMv/ET0zWc48IWo6OUNv3zyjj5G70Yk5PMECXrvkMSO9R7qNvD8Gxrw9ng08setrvJe5Wrz7QiY6omXxvCn1QrpplvO7eEaOvDUThbl1tLU8D1LnPKnRrTyoQRe8EHwHuvBtUDxwkeU7TPvIOvDeGj32ILe7SvpnvMq3BrlkVbi7X6OyOYw2g7zzjl47+tHbvO1Mwjp3HG68jDYDO/Uf1rtDb987oLYOvTRarzpdWsY7CqDhPKexALsat1M7hcrGO7DMn7uZuxw7dm2LvG0pLTrfx6g8QSbzvH+n9rtd6Xu8/vLpPM2QibyLfS28//PKuxFUKbz393c7vAjNvPyLkjpwkeW7fT8+vBrgEjykrt07LtAHvEUpFrxN0+o7FC0svL7Cg7xY8Es8WqoCvdmFjLw1efu7RklDvPwaSD3BAl45aXgIvU79Cr1lniQ9ZMaCO4jr1LwVLg06N+wHPZoDqLwMMVm8y9ZSvMn+ML7TGrE755nbPNbzs7unQLa7APWrvICJCz2zNbm7Aa6BvJ+Mbjswx/U7WBmLvLTFz7tl5c66m5O+On9BgDt7H5G85nqPPAGugTseaHg89mdhPB5o+LuclJ882FtsOtCxF7yUmEy8Rth4vMzXMzz6so+7mLq7O5EGdLyXAeY7CFf1O/QACjxpeIg8TUQ1PHJLHDwt7vK7a5g1Ozfshzzu3Fg8L6gpPessFbx4/gK85wqmvMi1xDzFTYw8dm2LvKexAD2+woO8SvpnPGAzSbwmjYo831bevIs1Ijx51qQ8hzJ/vEghZbvh59U8dPtfPMvWUrs1EwW8GCbcO74KDzxWF0k7QSbzu+ortLyr8do8Z+eQvF3p+7t51iS6vsKDu6zJfDyXAea89B51POKgK7wj+zG81ju/PPfZDLwx8RW7hhMzO5tMlDziL+E7XcuQPPJvErx/QYA8OcQpuzRarzxxuwU7wruzO6kZOT1oL5y8sqUivNhbbLxdg4U8pGczPOq6abwVLo08YKSTOxFUKbzmwpq8m2p/uzbrprwzoVk7POW3PJJ4HzwUBG081WMdPR/ZwjzAm4Y8Kj4vvZtMlDvquuk8GJemPLxQ2DpZqSE8n4xuvIQ6ML1QrO265sKaPL9x5jykZ7O8w0vKvGbmrzyCYo47jg8GvGlPyb0ZJ728wHLHPPQACj2vy767omXxPKqpz7s85Tc9NaI6PNJCDz2clB+8FQVOPJF3vrwG7zy7ab8yPJWZLbz1H1a8N3s9OueZW7yXAeY8pj/Vu8clrryCGoM6lMELvWsJAL3fVt47gNEWvPlBRTxqCJ+7iV0Avfyp/buUCRe8QifUPEThCr3meo87yf6wu9ymGr3DA7+8iqULPVuCpLwqPq87BV8mPWAzSbzn4ea7sevrux25Fb3Q+aI7sqWiu6Nm0ruNnVq8kk/gvK+i/7tShXC8MjoCvKf5Czwp9cK7w3SJPK/Lvjwu0Ic8iXtrPPlBxbiVma28xQUBvGB7VDwrhjq8lSjjufqyD73Mj6i83DXQPMN0iTsxEOI7+rIPO9TThryniEE8N3u9vJAvszxBl728UI6CvFAdODwZtvK7TxxXvPFuMbzG3SK8pD70vP9kFTznUrE8lVEiO8YkTbxvAU86G//evMAqPLwcAMA8FC2sPP4cCr0t7nK8jH6Ou4Iagzw6VEC8qtKOPJPf9jsTdNa8nSS2vGXlTr1nnwU9lyqlu7h2dLvbNO84ur9gugVfJjwgseS6ybYlvDHI1jt7rsa6mLo7PLKlorz0AIo7cyO+vJyUn7vGbFg8gNGWPJ0kNjsFfvK7Hmh4PC0XMjy/cea7GyievOu7yrzZzLY8ChGsuwmBlTvc7US87JNsvHJLHDy9Ubm8tlZHvIZa3TzwJcW7TEPUOyJrG7t41UM9M4KNPO1Mwru76QC9J9WVvJgrBjxYYRa96SrTvLlYibsTLMu8drUWPDvkVjwd2GG8zyGBu9TTBrt/iCq8DjObu6lg4znvbG+8m9vJPBR1Nzw6nMs7O1WhuxFUKT2ZcxG7fT++PI0OpbvKjsc8CFd1PFdgNbxf67267gWYPK06x7yFO5G8MoEsvBsonju76YA8706EPOhxfTzocf258N4aOxEMHrwQxJI8tX6lOx0BITwisya9Z+eQPJEwFDztTEI8aHenvHNrSbwYBxC7jZ3auwc4Kb2+woM7gWEtvCgegjyEyeU7QifUuQ0J+7uoQZc8HHGKO8QjbLsdkFY818tVuwY3yDtWF8k7XlsnuzczMjxyS5y8tH1EvLnnvjtN0+q6w3SJPJLAqrxTZ4W6pGezO5N5AL2tgtI7HLi0vJHoiDt4Rg69VD+nPHdFLbuoQRe8s8TuPPZn4bw/LwW8VYcyPDhSfrwH8B29VUCIPOcKpju+Upo71BsSuxgHkLx1bCo7EVQpvPhAZLwEz4874lggPIx+DjvlMoQ9V2C1PHIi3bwlY+o7xyWuPMUFAbszgo075DGjPP5jNDwMeWS8Uq6vPMQjbLkR4167podgvK88Cb2aSzM8Lu9TuMByRzzSipq8kXe+PLHrazwx8ZW8tn8GPWgvHDwQ4v27Z77ROYVZ/DwXlsW6rzyJvPFFcrwwqQq7uFcoPLXGML3TqWa8JUSeui6nyLt0+987XFllupcB5jzWgmm6vsKDPKiJIj0isya9JIvIvK2C0rt+z9Q6omXxvLwITbuT33Y8\"\n - \ }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": - 38,\n \"total_tokens\": 38\n }\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 8c7d0d952df8a533-MIA - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Mon, 23 Sep 2024 19:48:35 GMT - Server: - - cloudflare - Set-Cookie: - - __cf_bm=fCNrGtPzpUFtfoUMG2M7ss3WN9l7UWSHySlztYzxTug-1727120915-1.0.1.1-qw_aPQTh6.36xWyc5KuutqbYcTnlLJNPLVPbhmpVJi3BlkOJSv2ZFFB8ZaTHzSU5OUeNvdkT8Zf52isf39ig_g; - path=/; expires=Mon, 23-Sep-24 20:18:35 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=iDcOeEoxyAXVk7Sd7cdqMnYCstioELtzVAcYdG7ahuA-1727120915182-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-allow-origin: - - '*' - access-control-expose-headers: - - X-Request-ID - openai-model: - - text-embedding-ada-002 - openai-organization: - - crewai-iuxna1 - openai-processing-ms: - - '19' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15552000; includeSubDomains; preload - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '10000000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '9999947' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 0s - x-request-id: - - req_8ec4409ad3f853ceb951750c74bc1516 - http_version: HTTP/1.1 - status_code: 200 -- request: - body: '{"input": ["test value"], "model": "text-embedding-ada-002", "encoding_format": - "base64"}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '89' - content-type: - - application/json - cookie: - - __cf_bm=fCNrGtPzpUFtfoUMG2M7ss3WN9l7UWSHySlztYzxTug-1727120915-1.0.1.1-qw_aPQTh6.36xWyc5KuutqbYcTnlLJNPLVPbhmpVJi3BlkOJSv2ZFFB8ZaTHzSU5OUeNvdkT8Zf52isf39ig_g; - _cfuvid=iDcOeEoxyAXVk7Sd7cdqMnYCstioELtzVAcYdG7ahuA-1727120915182-0.0.1.1-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.47.0 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.47.0 - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.11.7 - method: POST - uri: https://api.openai.com/v1/embeddings - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": - \"embedding\",\n \"index\": 0,\n \"embedding\": \"mPaDvPqIYbyw5KW7Yibxu+Bgu7wLu7s8EcDOvJTFp7xyhJS8Nq/uvPTpm7uVmV48+o6Au8qsnbxfNTi7Zb0aPK6NnzyMaQ68Rg2SPOjZVrxRfQU4LD/7ueef0ryFp6c7cMfAvLh6Q7ybh448dPgcvZ6yyztffgM7GlwLPUbwD7ztqgS9bxCMvIzmPjqNA0E7WHNRvMhVl7whuCQ9sbjcu8wgJjtbTSc8Y313PCm0j7zmyxu80FECvAxycDwmOui8bRm0vPSDTjyrGZc8t6YMO9W2w7zeUoC8KBS+Ows47DscakY8R175uyh6izzLgNS6DY/yvNkEIrwAlBO9uk56PBHd0LtWghi80W6EOw14DzxC3DU8fHG4PKsZFzzlSMw8SbV/u3AtjjwaeQ091yrMu8MHubxLctO6ltPiup37ljxiJvE8xV6/vA2VkbykVzA8WfYgPJfwZDy0FYK7v7naPNKoCLuesku7W2opPLTMtjtoMSM8BtMqPJxbxbvPLuE70txtOyrRET0ZIoe7/GK3vPKSFTumK2e8uwtOPKv8lLwn2rm8ECD9O/FvdLxm8f+75oLQOyh6izxKux694N3rPFKaB7x6nYG8aMvVO2DViTpF0w08XzW4u15hgbxNTCk8YkNzPF3BrzsEXyI9xKeKvCCbojxWghg85yKivBkiB73Ge0G8ZdR9uwKFTD3j1MM8KcvyPO7HBjx/aJC8Cv5nPDjvkbzPsbA8AeuZvHIeR7yJ72Y7qsKQPAWZJjzhAA28dwZYOzMB4jwChcw8PJ0ePOl5qLs1deq8aMvVuz9Lq7ymSOk8zhHfPJNC2DpyHkc7D88VPP65PbtyoRa8mWTtu6XxYrtFtos7/3aRO2bxf7xnrtM7ixIIPbpOejwc0JM7m7vzu83XWrxoTqW8pivnPNq71rxlOks80qgIvL1i1DxF04089AD/O+bLm7qhqSM6YOzsvKw2GT1NTCk94GC7PN8mt7vfjIS7tiO9O177MzyB1nk7u4h+vBk/CT39Hws8inI2PF1b4rpr3y+/6rOsvBfi4zuzkjK8Y2YUPX1F7zzIVRc9dJLPu7uI/ryYE4Y8CoE3u+0ntTx+Loy8SP5KvJa24Ds3T0C8N7WNPMmPm7xFtos7FajfPCj3OztFUD48pfeBvHwL6ztvjTy8H97Ou4m7Abzm6J28fkuOu+DGiDsP7Je8YkNzOZ37FrxEM7w8gpNNPZ2VybuifVq88pIVOSrodLx1MiE9DuZ4vBHATrxF0w08YiZxPJmtOLznBaA6cqEWPFi8nDvPsbC83m8CuLj9krscs5G86rOsO0wvp7n7RbU7URc4vKX3AT2ogu284zqRPIG/Fj2ss8m7LhlRvHoasrpu7Wq8aejXvA14jzuQF5s7TebbPPooMzr0Bp68g7BPO2x/gbt54K28btaHuw0Swjyw5CU9Daz0PC15/7zLA6S7GAWFPPKpeLszhDG8moFvvIIQ/joOTMY8NCQDvCd0bLzSJTk8fREKO/PMmTstYhw9W02nPDrDSDyeTH67QTxkO6v8FLrmyxu8VUiUOxlWbDwtYhy8RW1AvHCqPjuldDI815AZujkpFrv22lQ81vBHPAob6jshNdU8p8u4vDJnr7yc3hS8F+iCvO5hubzdsq46yHIZveDGiDyMTIw8yHIZu/urgryPd0k8n89NOwqBtzw1+Lk7GIK1umXUfTuMaQ68xCS7vIWnJ7mfb5+8E5okPJUcLruTQlg87aoEvY/6GDySbqG8CoG3O2/ziby493M8da/RvO7767v1oFA7Zb0aOyh6i7wLnrk82Z7UvIZe3LzEoWs7dNuaOzkpljzvGO681op6vPFYEb1+Lgy8mmqMvPAejbwOTEY7RvCPvArnBDwAsZW88pKVvPUjIDwyZy+7N9KPuym0DzsQCRq8Zbd7vCagtTtT8Q28cWcSvX0o7btm2py8R175ucc4FTxF6vA6M4QxPPTpm7zTX7276fZYO5dztLwdCpi6w22GO/5Zj7rCZ+c85JGXPHwLazymkTQ7Cz6LPF8Ytrlwqr480/lvvEwSJTx6/a+8qsIQO2x/Abx9EYo8g7BPPLe97zvFxIw87aqEPMhVFz1BPGQ8WfagPAgqMbtStwm67aoEvU4g4Lrzxnq8oIyhO3BKkDxHp8Q7ulQZvdfE/rzGe8G6XIcruRfogjyWtmC7idgDu6Gpo7tBPGS7FajfO4xMDLx9EYo86jDdOuxwgDubnnE7RW3AOzrgyjzEoWs803y/u/tFNbzZ5588wS3jPCUA5LrkDsg8hSTYPEq7njwDJZ688Yz2PFQOkDytcB27PXFVPLrRyTz1I6C8UrcJPC1/HjyQsc08NV6HPBswQryTQlg8MmevOy6coDsaXIs7Z67TO7kalTwsKJi8pDquvMSnijzVVpU8VUgUPfOvFz0q7pM8RbaLOyj3u7vuxwY8gVlJvEwSpTltuQW7eze0u0jhyLyAohS8gzOfu2kF2jxJgRo7J0AHPeqzrDoSYCA8YqnAPF8YNjzBFgC8t9rxvEJCA72s0Ms85mXOO+bLGzwGUFu801+9vJLr0bpQ3TO8gvkaO1PUi7vQ67S7FisvPI0gQ7wa9j08ocalPKk/QTwc0JM795coPNetG7yFiiU8LD/7u8SnirxKVdG8gvkau/AeDbzCMwK9DrITvCLyqDycwZI83yY3PKAJUrwO5ni8MPMmugitgDwOTEY88FJyvLJYLjtvjTw85yKiPIZB2ryDzVG8KZcNvNXTRTvtqoQ8bbkFPS3fzDt3bCW6NL61uw7meDuQlMu8pDquvMMHuTxoTqU7LMLKOi1/HryLRu07QkKDu28QjDsdChg8RgdzOt+MhLzQzrI78FLyOzy6IDwqBfc7ZJr5PBxqxjxwLQ48Dsl2vN5SgDz57q48EcBOPGvfr7tbTae8dTKhPIspazt1r9G7RBa6On4uDD2y8mC8idiDPAitgDu2I727qsIQPJHOTzyEUKE7t13Bu6v8FLxKux68UX0FPJlNijwVqF88idiDvGFvvDvau9a7tDIEvSdAh7w7/Uy7btaHPDaYC71JG028blO4vKiC7bweJ5q82iGkvGIm8TvtRLe81nOXvOztsLwhuKS75oLQvDHH3bwYn7e8Q3wHvGjL1bxdPuC8NnsJu0K/szxFUL45VA6QudGi6Ttqpau7WNmePHT4nLdJgZq8YLiHu9QcEbz+PA28Nq9uPInYgzrS3O07Ufq1OhgFBby1bAg8R6fEOu2k5btBPOS87HAAPTBw17xHp0Q8Daz0u7sLzjsOTEY8w22Gu/AejbyppQ486rMsufZdJLtKu5487wELvP92ETwX4uO6kc5POp6yyzwdh8i8DXgPPPA7D7z/dpE8bxAMPBift7tY2R49VUgUvH7lwLutU5s8u46dvNx4qjz7yAQ7mBOGO2BSujw1+Dm6ppE0O2C4h7uagW883m8CvAR8JDwKG2q8t6YMu8Jn57yZMAi9gzOfO181ODydEvo7uk76uy1/nrxxAcU8VUiUvCiRbrxQ3TM8umv8vKYxBrxMrNc87seGvHbM0zybnnE7mCppvGo/3ryy8mC8e7qDvKw2Gb2VHC68d4mnOmeR0TwZVmw8btYHPYZBWrxJgRo8ehqyvO8Y7jjNPag7JYMzvJfw5Ltvjby7sbhcOAnKgjy0r7Q7VKjCO7dAPztkAMc74zqRPN8mt7q3XcE74MaIvDBw17rBsDI8aYgpO+M6kbztqgQ8GAUFvWvfr7vVUPY8Y0kSvIHWeTu4/ZI7g7DPPIr1Bb3bWyi79/dWvGz8sbsIKrG7NDtmvEQWurwrInm85HSVPARfIrwPA/s8AyWeu6FDVjxyhJS8FNSoO8GwMrx9EQq8jGPvu5zelLwzBwE7SbX/umKpQLvdz7C8wjOCPLag7Tzsh2O8bbkFu5E0Hbx9KO077Y2CPMNthjogmyI93C9fPGW9Gj2bIcE8OmMaO0GiMb0gm6K8QAJgvOR0FTvZ5x893lIAPZiQtrwzBwG8NV4HPVF9BTyW02I7Qty1u+5hubsNeI88KgV3vD8uqbvRhec7t0A/vbgU9jsEfKS7fkuOvDhswry493O8J9q5vBJgoDzSJbm8NV6HPN8mtztiQ/O8CWS1PJTiqbuB1nm8RdONO1c5zTtVKxI9uZdFvPi0qrsi1aY6QSUBvJfZgbwySq08blM4vO3B5zxJmP28NpgLPDpGmDsjLK07VoKYuydAhzt/nHW8jYaQvPW90rx02xo8uRqVPGBSujmldLK6Ufo1u5j2g7tb59m7CoG3vEh7+7y6Tvo76jDdOrN1MLy0rzQ7k0JYOuztsDyfUh278W90vEuP1TzRiwY9xKcKvdenfDxYvBw7LpwgPJFRH7wBaEo8nkx+vDpGmLwV8ao8JOPhvKAmVLz6iOG8ECB9O4HWeTxchyu8K6VIvHdspbtJG008QSWBPOl5KDsJygK7sAGoPGbxf7yeL3y7N9KPu0P5N7mYEwY84zoRvGXU/TtnrlM88+P8vPPj/Lsqa0S8uPfzO/8QxLrit8G6uk56vBzQE7z4MVu8Dsl2PEjhSDxNTKm8Iw+ruTX4uby49/O8Gva9O2banLz/dhG84N1rvA5MxjzHOJW8dJLPvK1TGzwndOw8QkKDPAgNLzpJgRq9MarbO59SHb24/RK9f4USu59SnTxRfQU8l3M0O5V83Ly0Sec88izIOdcqTDzVOZO821uou0v1IjtSsWo4CMTjPFKaBzxvCu28RiR1u4qPuDwZPwk8+8LlO9JCOzx7NzS8HsFMumNmlLzBLWM7TC8nPDJKLbwRwE683JUsvOWuGbrWDco7I4zbvIZeXLxLj9W8jsAUOrJYrry7jh07b/MJvSTjYbyE6tO7sJvavLG4XLvqlio7F2WzO6rfEj2fUh08hAdWPHl64Ltqpas8vzyqvBf/5bucwZK7rLNJvEOT6ryeTH68qIgMPLLyYLzau1a7w22GvO3B57twqj67qsIQPAUz2Tz9H4u729hYvG7WB7znIiK7RJkJPNQz9Dnx8kM8fPSHOymu8Dt9EYo8LFx9vF8YNrx9EYq7Di9EPJ4YmbsM2L27CMRjPrpUGbpJgZq8BZmmPCnL8rtxZ5K77scGPHUyoTtYc9E7tBWCPJu78zynZWs78g/GO04g4DvOd6w7l9mBPLABKLw7gBy8NCQDvCRJL7yk1OA7hacnvJ0SejvfqQa8ZB3JPLDkpbu6N5c7dTKhvJ4v/DzSqIg8YM/qvGGMPrsfRJw8NEGFPKZI6bxYvBy8jGkOPMb+ELxm8f88ZvH/OzHH3TvVtkO8Z67TuisLFrwOshM8d4knPE894jl2T6O8YFK6OuKavzzLA6S84QCNu3ngrTxlvZo8qsKQPNYNyrrehmU9aiLcvGo/3js2Fby6v9ZcvAFoyjvS3O28u4j+PN+MBL3nn1I7l3O0vHvR5ruAopQ78pIVvN5SADy2oG27cWcSO0uP1Tqjmty85subvKhrCj2s0Ms8p06IPF9+Az0HbV27tiO9OhyzEboOshO9MweBvJE0Hb2mSGm78B4NufKSFTsH8Kw7ws00vHcGWLy4/ZK8gDzHvFPr7jm8KNA8SGSYO7wo0Dxe+7M5YLiHu9PiDLwprnA8GnmNO54v/DyZyro65HSVO35icbxVxUQ6PDfRO7VsiLxjxsK6O4AcvcJnZ7v50ay8wjOCO6gFPTuE6lO78ql4vMOE6Ts0oTO8RgdzPEGisbt7uoO73BJdPIAfxTq3w467kc7PvIIQ/jkBaMo7/lmPvF+bBbxY2R68Qr+zPObLG7yMY286Tz1ivNDrtLwfRBw7+8gEvXR1Tbo0oTM8sTusvDjvkbt54K037vvruzVehzvfqQY9Y2D1uo7AlLzCM4I6KU7CvO5hubzbW6i8zLrYu8rJHz18cbi77uQIvQ7J9rz6iOE7OxrPuyxc/bwVqN86bZwDPWbxf7yEbaO8on1avHHkQr5kHck8LFx9PEjhSLyXVrI7lZneu/OvFz3z43y8Marbu4IWnTtJmH0895covEACYLzLgNS6KPe7O2NmlDydEno7pq42O4CilDwR3dA8ev2vPBCjTLupIr87GJ83u97ssru8q5+7XvuzvN8mNzzwHo07S49VurkalbxL2CA81BbyORWo3zvfCbU7W+fZO2NmlDycPkO80W6EOy4ZUTyjHaw8GII1PZzY9btpaye86LxUvPFv9DxkHUk8rLPJO+owXTyr/JS83mljPJ14x7tkHUk75StKvEhkmDzOEV87JgaDvEwSJbx/f3M8ED3/O61wHbyvYVa8j3fJO8Z7QTtfm4W8O4AcvL1i1Lwblg88NCQDvRxqxjtgUrq7/GK3vI/6mDzvfru8j92WPBJgILwLu7u8yqydPLjgELya5zy7DNi9um7t6jwLuzu7HE1EPG25hbro2VY8xziVu/A7jzy0FYK7umv8uiYjBT1oTqW8Z5FRvNBRAjulDmU85a6ZPAshibsmOmg8iBuwO/gxW7xBha+81W34u2z8sbxCvzM8wlCEPNx4qjwYHGg8zzQAPQ2s9DzFxAw8YibxvKzQy7syZ6889AB/PE3m2zunZes8Y0mSvH2OurzKRlC8FYtdOkElAT3ehmW8RDO8vBJ9ojwk42G7Iywtu97ssr2gJtS8R0F3PArnBD1wqj47NDvmPJ9SnTxR+jU9gpPNu+7kCD3yqXi8ZaAYvOciorxp6Fe8j/qYuuQOSLoGUFu8t9pxvFefmrx1FR89y2NSu0mY/bzEpwo87aqEvPIsyLu95SO6e9FmvBM01zw8nR47H/tQvIWnJzzw1cG8+e6uPAbTqrwv1qQ7YiZxvMB2Lr0JyoK8dwbYPJw+w7svuaI83bKuPGz8sbtDfAe8YQnvujrDyLwRQ5671TmTPNcqTLxch6u8cMfAvPcU2TleeGS7t9pxPIHz+7tT6+67FQ6tPLj38zv+PA27ehoyu8S+7bucwRK8hYolvDJnLzxlvRo8SYEaPACxFb0JR7O8DrKTPDZ7CbvWDcq6Gby5O6iIDLzO9Fw8uPfzvNVteDzDbQa9ghD+vKiIDLvOlK67SHt7vKfourtuUzi8pFewvJ4YmTzOdyy7btYHvFEXuLqeNRs83ya3vOEADTodCpg8jGPvPC1iHL1vJ++8gXZLPEN2aDxxZxK8ZvceO8Et47qbIcG84/HFvDDzJr0c0JM8iLXiu1VIFLxtNja7IvKoPLCbWjw6Y5q8qAU9O9vY2LkriMa8g7DPO7gU9rwMWw07P8jbvIgbMLxRF7g85A5IPJa24DyujZ87bJZkPCgUPrwmBoM5y2NSvNmB0rzzSUo8UN0zvJ4YmTwzHmS8o7devGbxfzxGDZK8t6aMvBA9/zy/udq7WLwcvPbaVLxlt/s8Y+PEPPvf57yzdTC8vWLUvO2qhDspy/K8MBCpu4KTzTsVi928Po7XuiOpXTxV4sY6L9YkO3ldXrvtRDe8Wq3Vu7N1MLtnFKG80W4EPLXpODtjSRI80Qi3OhTUKD0K5wS7fmLxPFUrkjuTJVY7S9ggPBxqxrsALsY7m55xPG5wurwoegu83UxhvKJ9WjxaMKU8UEOBPPvIhDuImOC7VUiUO9LFiroVqN88mWRtOxTUqDsOL0S9hsQpPBsTQDyS69E7SOFIvIRtI7yZMAg78B4NOxfoAr06w0g888b6u537FjwIDS+8kLHNOy1/njq0MgQ8KmtEPEXq8Dtsf4E8nRJ6PM6ULjx9qzy8ZTrLu+5hubmWObC8jqMSvVi8HLzqMF08QkKDPAs47LtSt4m7wjOCPHngrbyoiIw7JgaDvEW2izuVfNy8j/oYPDjvkTu44BC7mmoMPSlOwrutU5s80+IMuws+CzzjOhG9b428PDEtqzsE+VQ7aqUruvPMmbx4w6u8Q18FvJCxTbzUHJG7Y2B1PJgN57vu5Ig9wwe5PBL6Urvdz7A7gzOfO7sLTjzzSco7FFHZPD3Xoroblg+94EO5POEX8Do7/Uw8tWyIu2lrJ71Ftgs77wGLO0menDy6Tnq84MaIPPNJSjzCUAS8ghYdPTsaTzyTJVa7Y333OZWZXjyTQti7Ztocu0VQvrxvjbw7160bPE1MqbwGtqi8+LQqPOef0ryEUCG7ubTHvIoMaTzit0E70GjlPJw+Qzwblg+9ubRHvJOoJTxfm4W7z7GwvIi1YjtiQ3M7\"\n - \ }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": - 2,\n \"total_tokens\": 2\n }\n}\n" - headers: - CF-Cache-Status: - - DYNAMIC - CF-RAY: - - 8c7d0d9a1df7a533-MIA - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Mon, 23 Sep 2024 19:48:35 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-allow-origin: - - '*' - access-control-expose-headers: - - X-Request-ID - openai-model: - - text-embedding-ada-002 - openai-organization: - - crewai-iuxna1 - openai-processing-ms: - - '19' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=15552000; includeSubDomains; preload - x-ratelimit-limit-requests: - - '10000' - x-ratelimit-limit-tokens: - - '10000000' - x-ratelimit-remaining-requests: - - '9999' - x-ratelimit-remaining-tokens: - - '9999998' - x-ratelimit-reset-requests: - - 6ms - x-ratelimit-reset-tokens: - - 0s - x-request-id: - - req_5916e70901482ad417bdb6d701dee598 - http_version: HTTP/1.1 - status_code: 200 -version: 1 diff --git a/tests/memory/short_term_memory_test.py b/tests/memory/short_term_memory_test.py index 8ae4e714c..c09b22691 100644 --- a/tests/memory/short_term_memory_test.py +++ b/tests/memory/short_term_memory_test.py @@ -1,5 +1,5 @@ import pytest - +from unittest.mock import patch from crewai.agent import Agent from crewai.crew import Crew from crewai.memory.short_term.short_term_memory import ShortTermMemory @@ -26,7 +26,6 @@ def short_term_memory(): return ShortTermMemory(crew=Crew(agents=[agent], tasks=[task])) -@pytest.mark.vcr(filter_headers=["authorization"]) def test_save_and_search(short_term_memory): memory = ShortTermMemoryItem( data="""test value test value test value test value test value test value @@ -35,12 +34,28 @@ def test_save_and_search(short_term_memory): agent="test_agent", metadata={"task": "test_task"}, ) - short_term_memory.save( - value=memory.data, - metadata=memory.metadata, - agent=memory.agent, - ) - find = short_term_memory.search("test value", score_threshold=0.01)[0] - assert find["context"] == memory.data, "Data value mismatch." - assert find["metadata"]["agent"] == "test_agent", "Agent value mismatch." + with patch.object(ShortTermMemory, "save") as mock_save: + short_term_memory.save( + value=memory.data, + metadata=memory.metadata, + agent=memory.agent, + ) + + mock_save.assert_called_once_with( + value=memory.data, + metadata=memory.metadata, + agent=memory.agent, + ) + + expected_result = [ + { + "context": memory.data, + "metadata": {"agent": "test_agent"}, + "score": 0.95, + } + ] + with patch.object(ShortTermMemory, "search", return_value=expected_result): + find = short_term_memory.search("test value", score_threshold=0.01)[0] + assert find["context"] == memory.data, "Data value mismatch." + assert find["metadata"]["agent"] == "test_agent", "Agent value mismatch." diff --git a/uv.lock b/uv.lock index 6abd2b4a8..b0416fa72 100644 --- a/uv.lock +++ b/uv.lock @@ -627,15 +627,14 @@ wheels = [ [[package]] name = "crewai" -version = "0.67.1" +version = "0.74.0" source = { editable = "." } dependencies = [ - { name = "agentops" }, { name = "appdirs" }, { name = "auth0-python" }, + { name = "chromadb" }, { name = "click" }, { name = "crewai-tools" }, - { name = "embedchain" }, { name = "instructor" }, { name = "json-repair" }, { name = "jsonref" }, @@ -683,14 +682,13 @@ dev = [ [package.metadata] requires-dist = [ - { name = "agentops", specifier = ">=0.3.0" }, { name = "agentops", marker = "extra == 'agentops'", specifier = ">=0.3.0" }, { name = "appdirs", specifier = ">=1.4.4" }, { name = "auth0-python", specifier = ">=4.7.1" }, + { name = "chromadb", specifier = ">=0.4.24" }, { name = "click", specifier = ">=8.1.7" }, - { name = "crewai-tools", specifier = ">=0.12.1" }, + { name = "crewai-tools", specifier = ">=0.13.1" }, { name = "crewai-tools", marker = "extra == 'tools'", specifier = ">=0.12.1" }, - { name = "embedchain", specifier = ">=0.1.114" }, { name = "instructor", specifier = ">=1.3.3" }, { name = "json-repair", specifier = ">=0.25.2" }, { name = "jsonref", specifier = ">=1.1.0" }, @@ -730,7 +728,7 @@ dev = [ [[package]] name = "crewai-tools" -version = "0.12.1" +version = "0.13.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "beautifulsoup4" }, @@ -748,9 +746,9 @@ dependencies = [ { name = "requests" }, { name = "selenium" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/11/60/1860127d927939f9143cab9af059cfbe6f160839b6ba1d652a9ed4e04fa6/crewai_tools-0.12.1.tar.gz", hash = "sha256:22fa3ea57936913faed77a2a64c131371f78b2ced207e63dcc71220eac445698", size = 420190 } +sdist = { url = "https://files.pythonhosted.org/packages/d5/81/b8a0bb984aea2af49b0072e074c87c75a6c4581902b81f3a3d46f95f01c7/crewai_tools-0.13.1.tar.gz", hash = "sha256:363c7ec717f4c6f9b61cec9314a5ec2fbd026d75e8e6278f49f715ed5915cd4d", size = 816254 } wheels = [ - { url = "https://files.pythonhosted.org/packages/23/e6/cc9acbc6ee828898956b18036643fc2150b6c1b976ab34f29b9cadc085b5/crewai_tools-0.12.1-py3-none-any.whl", hash = "sha256:e87d393dd1900834a224686644e025eb44e74171f317c4ff2df778aff6ade4b8", size = 463435 }, + { url = "https://files.pythonhosted.org/packages/09/8a/04c885da3e01d1f11478dd866d3506906bfb60d7587627dd4b132ff10f64/crewai_tools-0.13.1-py3-none-any.whl", hash = "sha256:62067e2502bf66c0ae2e3a833c60b900bd1f793a9a80895a1f10a9cfa1b5dc3c", size = 463444 }, ] [[package]] @@ -921,7 +919,7 @@ wheels = [ [[package]] name = "embedchain" -version = "0.1.122" +version = "0.1.123" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "alembic" }, @@ -934,6 +932,7 @@ dependencies = [ { name = "langchain-cohere" }, { name = "langchain-community" }, { name = "langchain-openai" }, + { name = "langsmith" }, { name = "mem0ai" }, { name = "openai" }, { name = "posthog" }, @@ -945,9 +944,9 @@ dependencies = [ { name = "sqlalchemy" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/25/9b/fa14dc95f8736c672bcebd677f48990670f1a9fac8ea1631222b8b820d69/embedchain-0.1.122.tar.gz", hash = "sha256:ea0a4d00a4a1909e0d662dc499fa6a0da119783ec4773df1271da74da3e8296b", size = 124799 } +sdist = { url = "https://files.pythonhosted.org/packages/5d/6a/955b5a72fa6727db203c4d46ae0e30ac47f4f50389f663cd5ea157b0d819/embedchain-0.1.123.tar.gz", hash = "sha256:aecaf81c21de05b5cdb649b6cde95ef68ffa759c69c54f6ff2eaa667f2ad0580", size = 124797 } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/3f/42c97c1d3c9483076843987982a018115b6a28be02091fb475e6dbc743f2/embedchain-0.1.122-py3-none-any.whl", hash = "sha256:c137be81d0949b5ee16c689837d659837980cfabbb38643c2720cd1a794d8d27", size = 210911 }, + { url = "https://files.pythonhosted.org/packages/a7/51/0c78d26da4afbe68370306669556b274f1021cac02f3155d8da2be407763/embedchain-0.1.123-py3-none-any.whl", hash = "sha256:1210e993b6364d7c702b6bd44b053fc244dd77f2a65ea4b90b62709114ea6c25", size = 210909 }, ] [[package]] @@ -1551,7 +1550,7 @@ wheels = [ [[package]] name = "httpx" -version = "0.27.2" +version = "0.27.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1560,9 +1559,9 @@ dependencies = [ { name = "idna" }, { name = "sniffio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/78/82/08f8c936781f67d9e6b9eeb8a0c8b4e406136ea4c3d1f89a5db71d42e0e6/httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2", size = 144189 } +sdist = { url = "https://files.pythonhosted.org/packages/5c/2d/3da5bdf4408b8b2800061c339f240c1802f2e82d55e50bd39c5a881f47f0/httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5", size = 126413 } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/95/9377bcb415797e44274b51d46e3249eba641711cf3348050f76ee7b15ffc/httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0", size = 76395 }, + { url = "https://files.pythonhosted.org/packages/41/7b/ddacf6dcebb42466abd03f368782142baa82e08fc0c1f8eaa05b4bae87d5/httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5", size = 75590 }, ] [package.optional-dependencies] @@ -1908,7 +1907,7 @@ wheels = [ [[package]] name = "langchain" -version = "0.2.16" +version = "0.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1923,30 +1922,31 @@ dependencies = [ { name = "sqlalchemy" }, { name = "tenacity" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fd/53/8ebf21de8d17e7e0f0998f28d689f60d7ed420acb7ab2fba59ca04e80e54/langchain-0.2.16.tar.gz", hash = "sha256:ffb426a76a703b73ac69abad77cd16eaf03dda76b42cff55572f592d74944166", size = 414668 } +sdist = { url = "https://files.pythonhosted.org/packages/70/b2/258c6a33b5e5f817a57ecd22b1e74756f7246ac66f39d0cf6d2ef515fcb7/langchain-0.3.3.tar.gz", hash = "sha256:6435882996a029a60c61c356bbe51bab4a8f43a54210f5f03e3c4474d19d1842", size = 416891 } wheels = [ - { url = "https://files.pythonhosted.org/packages/0d/29/635343c0d155997569b544d26da5a2a9ebade2423baffc9cd6066b01a386/langchain-0.2.16-py3-none-any.whl", hash = "sha256:8f59ee8b45f268df4b924ea3b9c63e49286efa756d16b3f6a9de5c6e502c36e1", size = 1001195 }, + { url = "https://files.pythonhosted.org/packages/92/82/c17abaa44074ec716409305da4783f633b0eb9b09bb28ed5005220269bdb/langchain-0.3.3-py3-none-any.whl", hash = "sha256:05ac98c674853c2386d043172820e37ceac9b913aaaf1e51217f0fc424112c72", size = 1005176 }, ] [[package]] name = "langchain-cohere" -version = "0.1.9" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cohere" }, { name = "langchain-core" }, { name = "langchain-experimental" }, { name = "pandas" }, + { name = "pydantic" }, { name = "tabulate" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a4/a9/30462b68f8c15da886078fe5c96fab3085241168ea03d968eee1182e00a9/langchain_cohere-0.1.9.tar.gz", hash = "sha256:549620d23bc3d77f62d1045787095fe2c1cfa233dba69455139f9a2f65f952fa", size = 29987 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ea/53fd2515e353cac4ddd6d7a41dbb0651dfc9ffb0924acb7a1aa7a722f29b/langchain_cohere-0.3.1.tar.gz", hash = "sha256:990bd4db68e229371c90eee98a1a78b4f4d33a32c22c8da6c2cd30b5044de9eb", size = 36739 } wheels = [ - { url = "https://files.pythonhosted.org/packages/52/b1/ee8d44898cfe43703f05a0ffd95294d3ebe4c61879f19c6357c860131312/langchain_cohere-0.1.9-py3-none-any.whl", hash = "sha256:96d6a15125797319474ac84b54024e5024f3f5fc45032ebf228d95d6998c9b13", size = 35218 }, + { url = "https://files.pythonhosted.org/packages/64/5e/bbfb1b33703a973e7eef6582b523ae932e7e64c9b84ac7eecaa8af71475e/langchain_cohere-0.3.1-py3-none-any.whl", hash = "sha256:adf37542feb293562791b8dd1691580b0dcb2117fb987f2684f694912465f554", size = 43992 }, ] [[package]] name = "langchain-community" -version = "0.2.17" +version = "0.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1955,19 +1955,20 @@ dependencies = [ { name = "langchain-core" }, { name = "langsmith" }, { name = "numpy" }, + { name = "pydantic-settings" }, { name = "pyyaml" }, { name = "requests" }, { name = "sqlalchemy" }, { name = "tenacity" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1f/54/be928e3962d24b40c31899f5c5ed99b0c7ef7c3bb7601eb2fe7a6ce75dc4/langchain_community-0.2.17.tar.gz", hash = "sha256:b0745c1fcf1bd532ed4388f90b47139d6a6c6ba48a87aa68aa32d4d6bb97259d", size = 1589425 } +sdist = { url = "https://files.pythonhosted.org/packages/86/6e/119bbbd4d55ab14dc6fc4a82a2466b88f7ddb989bdbdfcf96327c5daba4e/langchain_community-0.3.2.tar.gz", hash = "sha256:469bf5357a08c915cebc4c506dca4617eec737d82a9b6e340df5f3b814dc89bc", size = 1608524 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ac/33/c6ee472412f751062311075bb391a7870ab57cdb8da5d47f359895b2d3c2/langchain_community-0.2.17-py3-none-any.whl", hash = "sha256:d07c31b641e425fb8c3e7148ad6a62e1b54a9adac6e1173021a7dd3148266063", size = 2339964 }, + { url = "https://files.pythonhosted.org/packages/cc/57/a8b4826eaa29d3663c957251ab32275a0c178bdb0e262a1204ed820f430c/langchain_community-0.3.2-py3-none-any.whl", hash = "sha256:fffcd484c7674e81ceaa72a809962338bfb17ec8f9e0377ce4e9d884e6fe8ca5", size = 2367818 }, ] [[package]] name = "langchain-core" -version = "0.2.41" +version = "0.3.12" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -1978,48 +1979,48 @@ dependencies = [ { name = "tenacity" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2a/92/2ad97f0c23b5ee5043df1a93d97edd4404136003e7d22b641de081738408/langchain_core-0.2.41.tar.gz", hash = "sha256:bc12032c5a298d85be754ccb129bc13ea21ccb1d6e22f8d7ba18b8da64315bb5", size = 316952 } +sdist = { url = "https://files.pythonhosted.org/packages/7b/15/76ec101e550e7e16de85e64fcb4ff2d281cb70cfe65c95ee6e56182a5f51/langchain_core-0.3.12.tar.gz", hash = "sha256:98a3c078e375786aa84939bfd1111263af2f3bc402bbe2cac9fa18a387459cf2", size = 327019 } wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/02/2b2cf9550cee1a7ffa42fe60c55e2d0e7d397535609b42562611fb40e78d/langchain_core-0.2.41-py3-none-any.whl", hash = "sha256:3278fda5ba9a05defae8bb19f1226032add6aab21917db7b3bc74e750e263e84", size = 397013 }, + { url = "https://files.pythonhosted.org/packages/ce/4a/a6499d93805c3e6316e641b6934e23c98c011d00b9a2138835d567e976e5/langchain_core-0.3.12-py3-none-any.whl", hash = "sha256:46050d34f5fa36dc57dca971c6a26f505643dd05ee0492c7ac286d0a78a82037", size = 407737 }, ] [[package]] name = "langchain-experimental" -version = "0.0.65" +version = "0.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-community" }, { name = "langchain-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/e0/d92210398a006f6e43ddd25166537f79cb3e9ccc32e316e70d349353842b/langchain_experimental-0.0.65.tar.gz", hash = "sha256:83706df07d8a7e6ec1bda74174add7e4431b5f4a8818e19b65986b94c9c99b25", size = 138516 } +sdist = { url = "https://files.pythonhosted.org/packages/bf/41/84d3eac564261aaab45bc02bdc43b5e49242439c6f2844a24b81404a17cd/langchain_experimental-0.3.2.tar.gz", hash = "sha256:d41cc28c46f58616d18a1230595929f80a58d1982c4053dc3afe7f1c03f22426", size = 139583 } wheels = [ - { url = "https://files.pythonhosted.org/packages/97/ca/93913b7530b36869946ca8f93b161bea294ea46a367e748943a78bc3553c/langchain_experimental-0.0.65-py3-none-any.whl", hash = "sha256:2a0f268cfb8c79d43cedf9c4840f70bd8b25934e595311e6690804d0355dd7ee", size = 207160 }, + { url = "https://files.pythonhosted.org/packages/63/f6/d80592aa8d335af734054f5cfe130ecd38fdfb9c4f90ba0007f0419f2fce/langchain_experimental-0.3.2-py3-none-any.whl", hash = "sha256:b6a26f2a05e056a27ad30535ed306a6b9d8cc2e3c0326d15030d11b6e7505dbb", size = 208126 }, ] [[package]] name = "langchain-openai" -version = "0.1.25" +version = "0.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "openai" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2f/cb/98fe365f2e5eee39d0130279959a84182ab414879b666ffc2b9d69b95633/langchain_openai-0.1.25.tar.gz", hash = "sha256:eb116f744f820247a72f54313fb7c01524fba0927120d4e899e5e4ab41ad3928", size = 45224 } +sdist = { url = "https://files.pythonhosted.org/packages/55/4c/0a88c51192b0aeef5212019060da7112191750ab7a185195d8b45835578c/langchain_openai-0.2.2.tar.gz", hash = "sha256:9ae8e2ec7d1ca84fd3bfa82186724528d68e1510a1dc9cdf617a7c669b7a7768", size = 42364 } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/2e/a4430cad7a98e29e9612648f8b12d7449ab635a742c19bf1d62f8713ecaa/langchain_openai-0.1.25-py3-none-any.whl", hash = "sha256:f0b34a233d0d9cb8fce6006c903e57085c493c4f0e32862b99063b96eaedb109", size = 51550 }, + { url = "https://files.pythonhosted.org/packages/b0/4e/c62ce98a5412f031f7f03dda5c35b6ed474e0083986261073ca9da5554d5/langchain_openai-0.2.2-py3-none-any.whl", hash = "sha256:3a203228cb38e4711ebd8c0a3bd51854e447f1d017e8475b6467b07ce7dd3e88", size = 49687 }, ] [[package]] name = "langchain-text-splitters" -version = "0.2.4" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/83/b3/b1ccde47c86c5fe2585dc012555cff7949c556bd6993dd9c09e49a356190/langchain_text_splitters-0.2.4.tar.gz", hash = "sha256:f7daa7a3b0aa8309ce248e2e2b6fc8115be01118d336c7f7f7dfacda0e89bf29", size = 20236 } +sdist = { url = "https://files.pythonhosted.org/packages/57/35/08ac1ca01c58da825f070bd1fdc9192a9ff52c0a048f74c93b05df70c127/langchain_text_splitters-0.3.0.tar.gz", hash = "sha256:f9fe0b4d244db1d6de211e7343d4abc4aa90295aa22e1f0c89e51f33c55cd7ce", size = 20234 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/f3/d01591229e9d0eec1e8106ed6f9b670f299beb1c94fed4aa335afa78acb0/langchain_text_splitters-0.2.4-py3-none-any.whl", hash = "sha256:2702dee5b7cbdd595ccbe43b8d38d01a34aa8583f4d6a5a68ad2305ae3e7b645", size = 25552 }, + { url = "https://files.pythonhosted.org/packages/da/6a/d1303b722a3fa7a0a8c2f8f5307e42f0bdbded46d99cca436f3db0df5294/langchain_text_splitters-0.3.0-py3-none-any.whl", hash = "sha256:e84243e45eaff16e5b776cd9c81b6d07c55c010ebcb1965deb3d1792b7358e83", size = 25543 }, ] [[package]] @@ -2166,7 +2167,7 @@ wheels = [ [[package]] name = "mem0ai" -version = "0.1.17" +version = "0.1.19" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-community" }, @@ -2179,9 +2180,9 @@ dependencies = [ { name = "rank-bm25" }, { name = "sqlalchemy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9b/23/fc537f7125c88efeb81190b661b4e17786d039d4e00da2975ea253b45c8f/mem0ai-0.1.17.tar.gz", hash = "sha256:3b24c5904c96717c2285847f7ad98be0167421fd67b23c19771e81bef00ec2f1", size = 51167 } +sdist = { url = "https://files.pythonhosted.org/packages/6e/12/23f8f250a2ce798a51841417acbbfc9c12c294d3ae427e81a0a0dbab54f6/mem0ai-0.1.19.tar.gz", hash = "sha256:faf7c198a85df2f502ac41fe2bc1593ca0383f993b431a4e4a36e0aed3fa533c", size = 51167 } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/49/4fa5e5f759004e90fa9b4adbc9f224f09f09e182bf3d4dfebed69b10fe8a/mem0ai-0.1.17-py3-none-any.whl", hash = "sha256:6505bc45880c26b25edf0a17242d71939ebaab27be0ae09b77f25fd400f61b76", size = 73252 }, + { url = "https://files.pythonhosted.org/packages/7e/43/04d22bc9cac6fa19b10a405c59c21e94b8ae2a180b40307ec4a577f6ee39/mem0ai-0.1.19-py3-none-any.whl", hash = "sha256:dfff9cfe191072abd34ed8bb4fcbee2819603eed430d89611ef3181b1a46fff9", size = 73240 }, ] [[package]] @@ -2496,6 +2497,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b2/07/8cbb75d6cfbe8712d8f7f6a5615f083c6e710ab916b748fbb20373ddb142/multiprocess-0.70.17-py311-none-any.whl", hash = "sha256:2884701445d0177aec5bd5f6ee0df296773e4fb65b11903b94c613fb46cfb7d1", size = 144346 }, { url = "https://files.pythonhosted.org/packages/a4/69/d3f343a61a2f86ef10ed7865a26beda7c71554136ce187b0384b1c2c9ca3/multiprocess-0.70.17-py312-none-any.whl", hash = "sha256:2818af14c52446b9617d1b0755fa70ca2f77c28b25ed97bdaa2c69a22c47b46c", size = 147990 }, { url = "https://files.pythonhosted.org/packages/c8/b7/2e9a4fcd871b81e1f2a812cd5c6fb52ad1e8da7bf0d7646c55eaae220484/multiprocess-0.70.17-py313-none-any.whl", hash = "sha256:20c28ca19079a6c879258103a6d60b94d4ffe2d9da07dda93fb1c8bc6243f522", size = 149843 }, + { url = "https://files.pythonhosted.org/packages/ae/d7/fd7a092fc0ab1845a1a97ca88e61b9b7cc2e9d6fcf0ed24e9480590c2336/multiprocess-0.70.17-py38-none-any.whl", hash = "sha256:1d52f068357acd1e5bbc670b273ef8f81d57863235d9fbf9314751886e141968", size = 132635 }, + { url = "https://files.pythonhosted.org/packages/f9/41/0618ac724b8a56254962c143759e04fa01c73b37aa69dd433f16643bd38b/multiprocess-0.70.17-py39-none-any.whl", hash = "sha256:c3feb874ba574fbccfb335980020c1ac631fbf2a3f7bee4e2042ede62558a021", size = 133359 }, ] [[package]] @@ -3179,8 +3182,6 @@ version = "5.9.8" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/90/c7/6dc0a455d111f68ee43f27793971cf03fe29b6ef972042549db29eec39a2/psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", size = 503247 } wheels = [ - { url = "https://files.pythonhosted.org/packages/fe/5f/c26deb822fd3daf8fde4bdb658bf87d9ab1ffd3fca483816e89a9a9a9084/psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e", size = 248660 }, - { url = "https://files.pythonhosted.org/packages/32/1d/cf66073d74d6146187e2d0081a7616df4437214afa294ee4f16f80a2f96a/psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631", size = 251966 }, { url = "https://files.pythonhosted.org/packages/e7/e3/07ae864a636d70a8a6f58da27cb1179192f1140d5d1da10886ade9405797/psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", size = 248702 }, { url = "https://files.pythonhosted.org/packages/b3/bd/28c5f553667116b2598b9cc55908ec435cb7f77a34f2bff3e3ca765b0f78/psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", size = 285242 }, { url = "https://files.pythonhosted.org/packages/c5/4f/0e22aaa246f96d6ac87fe5ebb9c5a693fbe8877f537a1022527c47ca43c5/psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", size = 288191 }, @@ -3387,6 +3388,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a9/f9/b6bcaf874f410564a78908739c80861a171788ef4d4f76f5009656672dfe/pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753", size = 1920344 }, ] +[[package]] +name = "pydantic-settings" +version = "2.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6c/66/5f1a9da10675bfb3b9da52f5b689c77e0a5612263fcce510cfac3e99a168/pydantic_settings-2.6.0.tar.gz", hash = "sha256:44a1804abffac9e6a30372bb45f6cafab945ef5af25e66b1c634c01dd39e0188", size = 75232 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/19/26bb6bdb9fdad5f0dfce538780814084fb667b4bc37fcb28459c14b8d3b5/pydantic_settings-2.6.0-py3-none-any.whl", hash = "sha256:4a819166f119b74d7f8c765196b165f95cc7487ce58ea27dec8a5a26be0970e0", size = 28578 }, +] + [[package]] name = "pygments" version = "2.18.0" @@ -3436,14 +3450,14 @@ wheels = [ [[package]] name = "pypdf" -version = "4.3.1" +version = "5.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f0/65/2ed7c9e1d31d860f096061b3dd2d665f501e09faaa0409a3f0d719d2a16d/pypdf-4.3.1.tar.gz", hash = "sha256:b2f37fe9a3030aa97ca86067a56ba3f9d3565f9a791b305c7355d8392c30d91b", size = 293266 } +sdist = { url = "https://files.pythonhosted.org/packages/9d/28/6bc2ca8a521512f2904e6aa3028af43a864fe2b66c77ea01bbbc97f52b98/pypdf-5.0.1.tar.gz", hash = "sha256:a361c3c372b4a659f9c8dd438d5ce29a753c79c620dc6e1fd66977651f5547ea", size = 4999113 } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/60/eccdd92dd4af3e4bea6d6a342f7588c618a15b9bec4b968af581e498bcc4/pypdf-4.3.1-py3-none-any.whl", hash = "sha256:64b31da97eda0771ef22edb1bfecd5deee4b72c3d1736b7df2689805076d6418", size = 295825 }, + { url = "https://files.pythonhosted.org/packages/48/8f/9bbf22ba6a00001a45dbc54337e5bbbd43e7d8f34c8158c92cddc45736af/pypdf-5.0.1-py3-none-any.whl", hash = "sha256:ff8a32da6c7a63fea9c32fa4dd837cdd0db7966adf6c14f043e3f12592e992db", size = 294470 }, ] [[package]]