From 54e4554f49b94937ab59039ab7b3e6bc02e407d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 14 Jan 2024 11:51:58 -0300 Subject: [PATCH 001/391] first commit --- README.md | 0 src/crewai_tools/__init__.py | 0 tests/__init__.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 README.md create mode 100644 src/crewai_tools/__init__.py create mode 100644 tests/__init__.py diff --git a/README.md b/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb From c1182eb322568164d3e8592ebf1a33cfb2fd3b30 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Tue, 13 Feb 2024 20:10:56 -0300 Subject: [PATCH 002/391] First take on a rag tool --- README.md | 101 ++++++++++++++++++ src/crewai_tools/__init__.py | 1 + .../adapters/embedchain_adapter.py | 14 +++ src/crewai_tools/adapters/lancedb_adapter.py | 49 +++++++++ src/crewai_tools/base_tool.py | 82 ++++++++++++++ src/crewai_tools/rag_tool.py | 74 +++++++++++++ tests/adapters/embedchain_adapter_test.py | 67 ++++++++++++ tests/adapters/lancedb_adapter_test.py | 22 ++++ tests/conftest.py | 21 ++++ tests/data/chromadb/chroma.sqlite3 | Bin 0 -> 159744 bytes .../data_level0.bin | Bin 0 -> 6284000 bytes .../header.bin | Bin 0 -> 100 bytes .../length.bin | Bin 0 -> 4000 bytes .../link_lists.bin | 0 tests/data/embedding.txt | 1 + .../requirements.lance/_latest.manifest | Bin 0 -> 237 bytes ...0-d2c46569-d173-4b3f-b589-f8f00eddc371.txn | 1 + ...1-5ae04c7e-dae3-47e8-92e9-6b84b7a4d035.txn | Bin 0 -> 97 bytes .../requirements.lance/_versions/1.manifest | Bin 0 -> 183 bytes .../requirements.lance/_versions/2.manifest | Bin 0 -> 237 bytes ...2164da72-df18-4c76-9f6f-d51cc6139c92.lance | Bin 0 -> 19605 bytes tests/rag_tool_test.py | 21 ++++ 22 files changed, 454 insertions(+) create mode 100644 src/crewai_tools/adapters/embedchain_adapter.py create mode 100644 src/crewai_tools/adapters/lancedb_adapter.py create mode 100644 src/crewai_tools/base_tool.py create mode 100644 src/crewai_tools/rag_tool.py create mode 100644 tests/adapters/embedchain_adapter_test.py create mode 100644 tests/adapters/lancedb_adapter_test.py create mode 100644 tests/conftest.py create mode 100644 tests/data/chromadb/chroma.sqlite3 create mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin create mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin create mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin create mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin create mode 100644 tests/data/embedding.txt create mode 100644 tests/data/lancedb/requirements.lance/_latest.manifest create mode 100644 tests/data/lancedb/requirements.lance/_transactions/0-d2c46569-d173-4b3f-b589-f8f00eddc371.txn create mode 100644 tests/data/lancedb/requirements.lance/_transactions/1-5ae04c7e-dae3-47e8-92e9-6b84b7a4d035.txn create mode 100644 tests/data/lancedb/requirements.lance/_versions/1.manifest create mode 100644 tests/data/lancedb/requirements.lance/_versions/2.manifest create mode 100644 tests/data/lancedb/requirements.lance/data/2164da72-df18-4c76-9f6f-d51cc6139c92.lance create mode 100644 tests/rag_tool_test.py diff --git a/README.md b/README.md index e69de29bb..9ca0d36f2 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,101 @@ +## Getting started + +When setting up agents you can provide tools for them to use. Here you will find ready-to-use tools as well as simple helpers for you to create your own tools. + +In order to create a new tool, you have to pick one of the available strategies. + +### Subclassing `BaseTool` + +```python +class MyTool(BaseTool): + name: str = "Knowledge base" + description: str = "A knowledge base with all the requirements for the project." + + def _run(self, question) -> str: + return ( + tbl.search(embed_func([question])[0]).limit(3).to_pandas()["text"].tolist() + ) +``` + +As you can see, all you need to do is to create a new class that inherits from `BaseTool`, define `name` and `description` fields, as well as implement the `_run` method. + +### Create tool from a function or lambda + +```python +my_tool = Tool( + name="Knowledge base", + description="A knowledge base with all the requirements for the project.", + func=lambda question: tbl.search(embed_func([question])[0]) + .limit(3) + .to_pandas()["text"] + .tolist(), +) +``` + +Here's it's a bit simpler, as you don't have to subclass. Simply create a `Tool` object with the three required fields and you are good to go. + +### Use the `tool` decorator. + +```python +@tool("Knowledge base") +def my_tool(question: str) -> str: + """A knowledge base with all the requirements for the project.""" + return tbl.search(embed_func([question])[0]).limit(3).to_pandas()["text"].tolist() +``` + +By using the decorator you can easily wrap simple functions as tools. If you don't provide a name, the function name is going to be used. However, the docstring is required. + +If you are using a linter you may see issues when passing your decorated tool in `tools` parameters that expect a list of `BaseTool`. If that's the case, you can use the `as_tool` helper. + + +## Contribution + +This repo is open-source and we welcome contributions. If you're looking to contribute, please: + +- Fork the repository. +- Create a new branch for your feature. +- Add your feature or improvement. +- Send a pull request. +- We appreciate your input! + +### Installing Dependencies + +```bash +poetry install +``` + +### Virtual Env + +```bash +poetry shell +``` + +### Pre-commit hooks + +```bash +pre-commit install +``` + +### Running Tests + +```bash +poetry run pytest +``` + +### Running static type checks + +```bash +poetry run pyright +``` + +### Packaging + +```bash +poetry build +``` + +### Installing Locally + +```bash +pip install dist/*.tar.gz +``` diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index e69de29bb..1aea157ad 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -0,0 +1 @@ +from .base_tool import BaseTool, Tool, as_tool, tool diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py new file mode 100644 index 000000000..8ef1d2a11 --- /dev/null +++ b/src/crewai_tools/adapters/embedchain_adapter.py @@ -0,0 +1,14 @@ +from embedchain import App + +from crewai_tools.rag_tool import Adapter + + +class EmbedchainAdapter(Adapter): + embedchain_app: App + dry_run: bool = False + + def query(self, question: str) -> str: + result = self.embedchain_app.query(question, dry_run=self.dry_run) + if result is list: + return "\n".join(result) + return str(result) diff --git a/src/crewai_tools/adapters/lancedb_adapter.py b/src/crewai_tools/adapters/lancedb_adapter.py new file mode 100644 index 000000000..630ce972e --- /dev/null +++ b/src/crewai_tools/adapters/lancedb_adapter.py @@ -0,0 +1,49 @@ +from pathlib import Path +from typing import Any, Callable + +from lancedb import DBConnection as LanceDBConnection +from lancedb import connect as lancedb_connect +from lancedb.table import Table as LanceDBTable +from openai import Client as OpenAIClient +from pydantic import Field, PrivateAttr + +from crewai_tools.rag_tool import Adapter + + +def _default_embedding_function(): + client = OpenAIClient() + + def _embedding_function(input): + rs = client.embeddings.create(input=input, model="text-embedding-ada-002") + return [record.embedding for record in rs.data] + + return _embedding_function + + +class LanceDBAdapter(Adapter): + uri: str | Path + table_name: str + embedding_function: Callable = Field(default_factory=_default_embedding_function) + top_k: int = 3 + vector_column_name: str = "vector" + text_column_name: str = "text" + + _db: LanceDBConnection = PrivateAttr() + _table: LanceDBTable = PrivateAttr() + + def model_post_init(self, __context: Any) -> None: + self._db = lancedb_connect(self.uri) + self._table = self._db.open_table(self.table_name) + + return super().model_post_init(__context) + + def query(self, question: str) -> str: + query = self.embedding_function([question])[0] + results = ( + self._table.search(query, vector_column_name=self.vector_column_name) + .limit(self.top_k) + .select([self.text_column_name]) + .to_list() + ) + values = [result[self.text_column_name] for result in results] + return "\n".join(values) diff --git a/src/crewai_tools/base_tool.py b/src/crewai_tools/base_tool.py new file mode 100644 index 000000000..eadef2368 --- /dev/null +++ b/src/crewai_tools/base_tool.py @@ -0,0 +1,82 @@ +from abc import ABC, abstractmethod +from typing import Any, Callable, cast + +from langchain.agents import tools as langchain_tools +from pydantic import BaseModel + + +class BaseTool(BaseModel, ABC): + name: str + """The unique name of the tool that clearly communicates its purpose.""" + description: str + """Used to tell the model how/when/why to use the tool.""" + + def run( + self, + *args: Any, + **kwargs: Any, + ) -> Any: + return self._run(*args, **kwargs) + + @abstractmethod + def _run( + self, + *args: Any, + **kwargs: Any, + ) -> Any: + """Here goes the actual implementation of the tool.""" + + def to_langchain(self) -> langchain_tools.Tool: + return langchain_tools.Tool( + name=self.name, + description=self.description, + func=self._run, + ) + + +class Tool(BaseTool): + func: Callable + """The function that will be executed when the tool is called.""" + + def _run(self, *args: Any, **kwargs: Any) -> Any: + return self.func(*args, **kwargs) + + +def to_langchain( + tools: list[BaseTool | langchain_tools.BaseTool], +) -> list[langchain_tools.BaseTool]: + return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools] + + +def tool(*args): + """ + Decorator to create a tool from a function. + """ + + def _make_with_name(tool_name: str) -> Callable: + def _make_tool(f: Callable) -> BaseTool: + if f.__doc__ is None: + raise ValueError("Function must have a docstring") + + return Tool( + name=tool_name, + description=f.__doc__, + func=f, + ) + + return _make_tool + + if len(args) == 1 and callable(args[0]): + return _make_with_name(args[0].__name__)(args[0]) + if len(args) == 1 and isinstance(args[0], str): + return _make_with_name(args[0]) + raise ValueError("Invalid arguments") + + +def as_tool(f: Any) -> BaseTool: + """ + Useful for when you create a tool using the @tool decorator and want to use it as a BaseTool. + It is a BaseTool, but type inference doesn't know that. + """ + assert isinstance(f, BaseTool) + return cast(BaseTool, f) diff --git a/src/crewai_tools/rag_tool.py b/src/crewai_tools/rag_tool.py new file mode 100644 index 000000000..222cf529d --- /dev/null +++ b/src/crewai_tools/rag_tool.py @@ -0,0 +1,74 @@ +from abc import ABC, abstractmethod +from typing import Any + +from pydantic import BaseModel, ConfigDict + +from crewai_tools.base_tool import BaseTool + + +class Adapter(BaseModel, ABC): + model_config = ConfigDict(arbitrary_types_allowed=True) + + @abstractmethod + def query(self, question: str) -> str: + """Query the knowledge base with a question and return the answer.""" + + +class RagTool(BaseTool): + name: str = "Knowledge base" + description: str = "A knowledge base that can be used to answer questions." + adapter: Adapter + + def _run( + self, + *args: Any, + **kwargs: Any, + ) -> Any: + return self.adapter.query(args[0]) + + def from_file(self, file_path: str): + from embedchain import App + from embedchain.models.data_type import DataType + + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + app = App() + app.add(file_path, data_type=DataType.TEXT_FILE) + + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) + + def from_directory(self, directory_path: str): + from embedchain import App + from embedchain.loaders.directory_loader import DirectoryLoader + + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + loader = DirectoryLoader(config=dict(recursive=True)) + + app = App() + app.add(directory_path, loader=loader) + + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) + + def from_web_page(self, url: str): + from embedchain import App + from embedchain.models.data_type import DataType + + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + app = App() + app.add(url, data_type=DataType.WEB_PAGE) + + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) + + def from_embedchain(self, config_path: str): + from embedchain import App + + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + app = App.from_config(config_path=config_path) + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) diff --git a/tests/adapters/embedchain_adapter_test.py b/tests/adapters/embedchain_adapter_test.py new file mode 100644 index 000000000..06a3ac9c8 --- /dev/null +++ b/tests/adapters/embedchain_adapter_test.py @@ -0,0 +1,67 @@ +from typing import Callable + +from chromadb import Documents, EmbeddingFunction, Embeddings +from embedchain import App +from embedchain.config import AppConfig, ChromaDbConfig +from embedchain.embedder.base import BaseEmbedder +from embedchain.vectordb.chroma import ChromaDB + +from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + +class MockEmbeddingFunction(EmbeddingFunction): + fn: Callable + + def __init__(self, embedding_fn: Callable): + self.fn = embedding_fn + + def __call__(self, input: Documents) -> Embeddings: + return self.fn(input) + + +def test_embedchain_adapter(helpers): + embedding_function = MockEmbeddingFunction( + embedding_fn=helpers.get_embedding_function() + ) + embedder = BaseEmbedder() + embedder.set_embedding_fn(embedding_function) # type: ignore + + db = ChromaDB( + config=ChromaDbConfig( + dir="tests/data/chromadb", + collection_name="requirements", + ) + ) + + app = App( + config=AppConfig( + id="test", + ), + db=db, + embedding_model=embedder, + ) + + adapter = EmbedchainAdapter( + dry_run=True, + embedchain_app=app, + ) + + assert ( + adapter.query("What are the requirements for the task?") + == """ + Use the following pieces of context to answer the query at the end. + If you don't know the answer, just say that you don't know, don't try to make up an answer. + + Technical requirements + +The system should be able to process 1000 transactions per second. The code must be written in Ruby. | Problem + +Currently, we are not able to find out palindromes in a given string. We need a solution to this problem. | Solution + +We need a function that takes a string as input and returns true if the string is a palindrome, otherwise false. + + Query: What are the requirements for the task? + + Helpful Answer: +""" + ) diff --git a/tests/adapters/lancedb_adapter_test.py b/tests/adapters/lancedb_adapter_test.py new file mode 100644 index 000000000..bc4d6ba4f --- /dev/null +++ b/tests/adapters/lancedb_adapter_test.py @@ -0,0 +1,22 @@ +from crewai_tools.adapters.lancedb_adapter import LanceDBAdapter + + +def test_lancedb_adapter(helpers): + adapter = LanceDBAdapter( + uri="tests/data/lancedb", + table_name="requirements", + embedding_function=helpers.get_embedding_function(), + top_k=2, + vector_column_name="vector", + text_column_name="text", + ) + + assert ( + adapter.query("What are the requirements for the task?") + == """Technical requirements + +The system should be able to process 1000 transactions per second. The code must be written in Ruby. +Problem + +Currently, we are not able to find out palindromes in a given string. We need a solution to this problem.""" + ) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 000000000..cacbc0fe2 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,21 @@ +from typing import Callable + +import pytest + + +class Helpers: + @staticmethod + def get_embedding_function() -> Callable: + def _func(input): + assert input == ["What are the requirements for the task?"] + with open("tests/data/embedding.txt", "r") as file: + content = file.read() + numbers = content.split(",") + return [[float(number) for number in numbers]] + + return _func + + +@pytest.fixture +def helpers(): + return Helpers diff --git a/tests/data/chromadb/chroma.sqlite3 b/tests/data/chromadb/chroma.sqlite3 new file mode 100644 index 0000000000000000000000000000000000000000..9c9cf0e56ca4d2d2470cc098267349634a2c1cc8 GIT binary patch literal 159744 zcmeFa1z=Oj_W0j>Q>V0eaYAuuOH)r~0u(4(tb(&8ZPQYyK$GII6nA%bTinTEbg$t|4iCKVPSXQ_xpYC_ul(YkR;p>WK%<}wz0ACz<(9+U-qj6ez48{g6~p}&)@p!VdEWt*N!K4d&|?5%zXP+a&cS` z5CjAPK|l}?1Ox#=KoAfF1OY)n5D*0Z?+Fxl%W-dBOx&CQ-xC%&3Ic+FARq_`0)l`b zAP5Kof`A|(2nYiI?+FxjaK=hFx=prmyXUsXZGqclw`*>v+zzP-zve9gai%~h|_#!*xvMu72b;drIt-B7^7Zlc_ww=e()g)0K{~%6U4UOb`5(#2H!COkYdJ~( zNKTrpoTRVh#P|QJ+JESDW8j{p$#xCiJ zX-0m~T!Kg+Pbh9y8ZR(Jko)o8!F{%wQNk+9fRoyu)HP&b{sWm=6K5BECA;n~f zH7BN~nADw(X=;-(HZ>*ATg|hGO^q|ElhaLRetEaFM6=nLqE1Xvho{H%@XlxFAQdUd zD`n{1IXW@UY&4lO^4apL@)&gT7gdc&O-kpbI6Jp6s#A={IJH3?pPphZS#9osVeZU^ zPDT?RHJQ`U5?*aEVNspa&1yqR95&CKo`y9vr==U!iSc*=HZU_?BEP1yAt^B>4%rwz z)v0(gty`iAty8fwSrFr~ICU!4?o-LUzF3un#I8swvvS_*PmMKY0pML=?trz%e&8i}=an3s ztRO#C&w8_;2P-FDf`A|(2nYg#fFK|U2m*qDARq_`0)l`b@Lx-S@Bg}2WY(f#H$(`342WV~(-DUNKtVSg(M%fPi>kt)E5{ z@B5X?cQwYEQ`4$;NHKNuN=ik`3a{*X$2AYF71<;_`|Cfo8T|Rb?6%s*ZMECEe?k|F zECm5UKoAfF1OY)n5D)|e0YN|z5CjAPLEs-JP*D~2w|oZ56;x49f9Hn@VgLV+cY?S= z5D)|e0YN|z5CjAPK|l}?1Ox#=KoIy(BOvVm|I>O*)K?G?1Ox#=KoAfF1OY)n5D)|e z0YN|z_{RzG=l_!10~`D+UV?xiAP5Kof`A|(2nYg#fFK|U2m*qDAn;#7Ade(h%5sPh zjs6h~DQ5osU)=xyS9GpugdiXY2m*qDARq_`0)l`bAP5Kof`A~9MIf)LlGEq+|C!JK zx7fIC5s(A{K|l}?1Ox#=KoAfF1OY)n5D)|e0YTv3jX)jAHp;7#Pu9zezkl&x_POl) z{WuJi5MwYIv%lwj{(sI!c>n)*>tRtJK|l}?1Ox#=KoAfF1OY)n5D)|e0YTue32^`a z*}njM@xy@iYxcR!{r{J3+%Erho+5=HAP5Kof`A|(2nYg#fFK|U2m*qDARq|*8xrua zt>pE^e*f&J|Jl0%GWY*S+qgyl8`fV`Ne~bO1OY)n5D)|e0YN|z5CjAPK|m1r&mfRd zQgVnYm*FNy`31-L1!{Z@UP1l=8ZZC2_&_ghjBl`)UrdljW6)@{fm&^vu}gYlnlag! zVm5U)rkN5=xJlovt6DYduKnZt-^tA0Ci@xxUqAmB`~UwlhLfnJARq_`0)l`bAP5Ko zf`A|(2nYg#fFSTW0lxqL@BB)Tx&Qx=joYEmuNU700YN|z5CjAPK|l}?1Ox#=KoAfF z1OY+d-;#j8ZMi6?oLdO~?(=_px9K)+&)u%Mope*&*5DWM5(ESRK|l}?1Ox#=KoAfF z1OY)n5D)|efqxN!g0@l>Nfp~6Ej8Joi%X47=Vk%+F1At;NoDBVIXW@U&YDF2B8jav ziJX~4RlwG%iu6Sa+1b{qh*M^QO#A<1Hg3oMMa>e$2m*qDARq_`0)l`bAP5Kof`A|( z2nYg#!2gy&Rh25Lc$_9MCe}CB;1%l=6yW9WqtSY4{fq`Lt$$#wZ@j-I(5Us#DoB;N z|NpFw+u8qZ(c*eRKoAfF1OY)n5D)|e0YN|z5CjAPK|m1r7Za$cijw}$&j2dx^MCsX zHu$#`){*}!f&YsAi|Z?j69fbSK|l}?1pbKx);UNnHhzv7Zuzgd6zgBt*T>i2%g5JC zHLCp0e=5$x4VgP8t*0R$nTe0P|xlL-EUh5kmt^Y;n})r?AGhCP?mxkx(WE_HFG<` zwzW^lg-5pFJkpi6Z1WV_I2ECTXTK+FCKn;8zIhe+sgct5uAYX?oj@y9-^K>z8q9pQ zeown^XhdV>)$F>qr1J6}K>lCN5Rqp9={mn3(KxK2wa=At+|wH>#wAc~^gh>8Y>iyUGJ;-=`BHJ@OQtRbnRTT zZhVeLrk$k7$KpJ>JyOir?AUn$b`yx1G}Ma!=YWZvprayMzr; zjnH+9s6}os8%240t_>?q!aKI4DI?=yYxNa`b1h{YKz)XurJGg_2U0aZH99W`L+Rl# zh_14nyX~Z3(xe63=`fvKeAArn+IgE+Nf`q#?@?uG*OhR*`*hee#2*?jb=6PZxQrHQ z5r_3EPAea{X>s|@f%cx24AAT~DP7QpZCj!y^^4S|#8FRqT~A6OkYV>T^FDWhvbq)M zF%J`YvY|8UJn=fyp6|r#&fJ~u*}iW;`My8 zV(fFvsdE!}o-CxUt={lvJ;gq@%)6REYY*?i*4zAIdFmGm7pB_NICuxODlQ{^XH15u z`it4*m8T27`bW#Oj-CFngT2l`Ist*>``2Ka8S2$QdzA$=Nkgr&0v zP~Pq;DQ^0pZpFy)X0@r~My4y%xXkt;jo<24-6{^cvjyO{yH%|Hl|P^~A6Ilna%o5z zd5fOS8w9_2R3qD4lu&+cHilt2x zy>fn`Wq(sY+UtI2K>4=dF-wK-rqL$OSJ}+HrRcfa!@%!$QyTL6Eal_BlSeUFQgSs> zm48fszEc!@$DTm`vqF)wOg!uaeQHgGO7R6KA16-6Ens_i#@6cfFM?l@9^~+@9-y_^MNv-a{z~nL*UJEA zs_B6JHv<}HY_Y^!7W4kHRGao4NjfnFHk=JoP$yBZuES}eNz(y!11pt#5cNNpPD#}X zu)5j|nANxrtrR~|J1NRXH+ob}Fh5bzQLYad^0N4DUJ9|TSCYDSpICT*6=^h{iT(jG4u9G=cJ6X7_waYsZ z>*cOg+~lsGy8SI#5Ff;LMi-`UesorXO3fnkH|Z3Vdum?Tf(0+#4eO3kx~R|wqErk6 z*Ju6dD|4)+aqFL9{GQQp>~RAUxPLYjN~w)wcnmc?yI`^JJ(V{9p)1UKQI~G_sSA7E z$I?SpGw83CwOpUDh@z)0#iDXkURIx4h4nM{&Y{HzZ)6>NKce=hCbRwnH<9M^%0hs< z2fHkX!?=<+>5K0Ne#o;K*9#>4Wy72(uc zg_SN94iOwoIoxR?mQYPD7|=-tXkl z^+;Oy`2?5}7zIz3&&Dya2_b5;O_RXG zn$KGZCo4|ToedsEJZ6`tXA=W-gOi)n4>wQfKIB>k1qBpx0O4}vosP|Dv8a{3nIOVqBTyT$8ZBdn#a&Lk2+C(~h;e49AwmrrECRiRHtEAWcM_WgJ z_ZZIC7z#LV;QSLuLh26GZd&CGK4(;VTxgZx_x}!(*2ZNWD z0i_%qibxTIDm6+rnhkL=GWWLN>t#%iF~-FurX+Y-4;#z|Z&R0~M6;2evp(*iPzhiRKP!Lz3B;rZyX5l8l~egGp^rrx{J@ zNoM?#nbwe*+8J|AOzE1M)YYhtO*0zISU4t5F?P$mSZzp&Q^%%u?!k|nJ7C#ZDRpXB zW16?RzFD1UvVK-~N=!=1D%hGm&(@%hNli^M8dB8e9-T4w)TH#}6mNATW`fouNr|yo zSZa#8TN-bYIn|oGwZd4sG0wyf8B&uv3`MZ24A1}K^5&Cia13K(`WS%?;y>ecTlbH(2&T`FDqnHS9Dh6D={v+Y<1J{ z`i(-uTd5m{w(_)|GZ_=OeGXdXs3SvLMyeY(!GBR$KYJPF0 zwMUI{(FU_RvVNn`h{%veO+O}4*AA@{5)~Gyt`!v?9@;oE`eTyJVxt<@Zx$7*uK1~P zo;ffl##M5zM4X+!-sn%g@Q22Sht>(j;%bFPe9FyK5wpuIwUe>O*BF@0X*dG2`j9iq zOr40(KI_-a!|_R}hU^pADOntIU{_;-o@y-XD~(7tbdSzLnpycz05WTqBcIITzSaRb z;WV!w9vKx9_T|Wkj!#cQG3{{zX)9IE8A zPOD$XIwV3{){lscP-k8CY0z2+RsWFD6 zq#jnmNHHWEzZecVlH%=lR(-9BGpqgwAyVfUaapb7!%f|!F-kyKC`xRtkce6#wL>#I zGrMIUbMsX5T2$iYhGy#{6evSh(drJUUQ<(&dfM_P(GG-}5bwXlSV~RC{oC6|L zXMK(d^<$DTma9Zlv^lx+KLu8HS7hQ!?VK3<1zhWixI|P&Ca#;alw~HE?2G>taCRcA zME@0H&ehX8qi|%HZ>BUN=vsU#LYl^S15R4mr1@NwU4V1_#u1_6k?JPl*uPD~LTZKb z+BW$#et8!+4+)DxJyWqlcHrX7POVUh@ZRJz(HAA48pSE?%jD`*oPpAC62-YFDyn{M z)3*xGt{m!ou3s|S_i-TQs1FxW{`#}vN)tjRya7~5GR{TXf)OEs}G~)Ar_V^~wwZoe<%~FS-9nU$X=Qv>% z2%I!R8sYTXh)zgP^4r>ud9}(oxkKd?9&Lzdi%d&-pIQ#p?`TEBB1O)lV`Dx<(HQHETgO4#NKF~KH zR_kXl45+I@JzagUTV_?V)_hsB%Qw|?_W3_i2$|x{7g=9fJ(zIy_SF;SkMf@>-=COd zW|Kp0emcc9=N~m$_6+%P?e+DEGp8obZq}cv;%~17{tp)d*>%skAn?^{{9fB#?~zaV3H zt(`q@StqMMcG0Jzzg(N;IFr4;{A$<#sY?Hsbk%1y{j_}Z3-$}}jSKP(FvMwMHGw`s z{_%!be_x*%UtBo_`uX*5Avt(C)f(vD=;KP-6pc%bbYo`6eL9*$1$?$d`jjH`x-41D zTK8qml7C#*|F4$cD79&scSza!e|}f+Nqj%9=riRwyAwZ8-WAstscF`mgv?~dl-N{$ zt7#SX%tIf=mk&ukgyQ^y0s`XV<9z*mV~oMZU_T#CynkF!d~7`4Gx-MxTA%;B?y$kX z;w1?{P;PaF2W-oJU zCu2%tPwVs7tjDlfKMefUGaY<{WJrtckn_<_&c`~Q_;@ojYkdWLw3U!%NXB2Kvwz67 z{*wLiM~9EkiL&#on3md&|6G+b!%Eq|*M8+(<`1x+^2@%@|Lk|zPp7jVOA+UIZIB@j ze?jp#7!8JCH2&Ad`)KgD)tEs4;21o|@BdvK4%oO(b~WVocFC9PP%dZZ^G-*dMmbe= zvT-=zFv%gtzOvm@JFs1B+eOt&6)10$hspV+5jGEPVr*Lf>A-NzJnInur>VqAK|l}? z1Ox#=KoIyJ66hb`=HOnclr&Il-GG(-wAu9WL!QqbhkQKzC(k5vY}5Gf-O=IYYVRIW zOtMK#i8FT3+^H9BNH?cizei_2pf^QpvOc)A&F$b`teBMHW~H2UH0#avGs0O1|Aepg zVPN(%KkGx;>Mr)~5nkV_O!i~W?6qIC=F20w`#a`xaQE_(2Gp|F@5}RFes}q-#$TTO z_=xQDoo)XikASN>+q>5)@fR$yXR}&u%9T;g$-%uu38{aRoa{{5=Re;jmwoiFy4SjI z@?Z2wKh)9Qy_Mg$TAbs_>!$(qm4ihzLFpdRE{&* z4>|vno>ceu!Tld7Ujy@7*?hd=%6b_6<#*f9sC;?$qkLKSer7)+{=+`K|L{l9ezLuL z-O}G0mC5|RIdjLH=F?|Y{{+dwy>w}5P!TKVPiH=Tl0Ren>39}ZzWwRTS6pJ+*S6Pu znT&tO-O)L>&wcTH{%cpnCnn)`%|GmQv?}?prN_DE?^Xu4CwBO|X>$x8bwzBjXLeI6 zeYGZ)_;Q2WL2$byv@fAm3gu-H9WpG-&n%|P8rjgu$4l6?2~TOaw#SHDSO%0|Rh**j z0NTvWBkm=4Qm=~NLA~h@p?-~`q?KT=vv;=0G-i@Hws5 z%Z>EcW^Tak$R?Oakd|#sThX3MsaoDakG7J4_KWP%$%eFC)p}^VcL&f02e=Ja?#3<2 z!IrBi+D_@Zo~%gGMv0s@Sac2R6{J6R+()C!T!*Dgq5y5l=#8SgVY9sxJ>Q@;)Nk;c z)z0tc<4KlZBI2oZzY%-xJk@IVwKI4Z8F_gxK^qP_>&1SGwn?<~tGDFEgl%ZUV?jF% z59L_>gXDhA%1YSsUnsZnJXz5lI`u3E|Y!>#=*qC?Pw3Z8M^3P7}^g3+5#zPAF4w;7XSEabK)*v194`>2-| zw56f57uRNe%{9<&%}$w>UL0_I0WxN21Eb4Ta3iX zvsqBnQ2Hpc8@(FSP=DZtf2W z!t(&;lvC!EE278o4)515rwPShQnbrt+_e1cVZs6*S!iSSGP0$?^(RVJRf$gm%R-Cy2?(?0nW&TL8O zAfgW%O?h8>{uE8Q-Cpr4>uI;kZ_q|*G@W>O7`5+Rn4+C7Ogz)ZVl$};MH^YPS^S9{ zswyemw$Q7IKf!TkK^s~YGvETVZ0pC+o{BWRzLs7(zKjy>FR=9J577PM0&YVJXvYYX zPYt9$o|Pe@;X6Vc6;{1|8&dviZBp}PKW=x(nhsmf3NQJU6iV5{-e0g$&Rla$M&+v;$_`wr^$NTxOm=NQXLtjJr?JepA00ZG!>Ztvt3hgUduGg7!{|yk{<;EgJtv zTP~YJe8+}EDvgEcE~DT`$WgSRj-kEx)FO!u4^U)7&C~7lXqyYjUaz?p#mb&NNV%;X z+FVg&Pj}tkMo{Mf+EtS_x)5mM98D%X545sc_923^qujn1Z2{q!$5S2J4TJOKYqWY{ zKeUzg;+$b^okAAuUs&L(h52LRVE-$BI(76(v_&jQTxZ%+92ZLASp^lN#)hX+xIHM> z8EG>w1*45Lw$Xz2ZZ5h*j#c0;YfVZSRZ4V&Ad0$_qJ1*AfrH2&k6Y!??fog;yZ$o# z{zFxQcCUc;kJkEB8#9u)o$X6>Yqr6gQV_hHa9<@eKMbmsQuu(fVPFY_vXyc@J4|S&vy6l1(rP9%EyOJKqG}qr; z23V7pBT%3AC74G4OT;Z5uj32LM5}!u+NbM^Z@x@QZa-(ixsp6M)YNLH*SK{I+8Dp# zw))%#nC5zL#cG$2Hk5j7vmVD8!Ev)GcZoI7!{spH^B(F(Wo4imDg^BYw9nR}y*Qaa zXuJjOy=k(lhHjd%Jl)@InAOJdd4Y~(S^3Hobq%w7R|{=p?@?~^j5emwt|riu8|{Jn zA~@M&@PnT)2*gxj}AY0mxDvv2-y1gdmY+C!^?aVxcxEVcF1VQ$@;x@ zQx0#bK(!U7l9CCn0qqH4Z|Z2u?K{^zm;z`wsxWmkoU^8riXH9&+LY2`o#(*wm_k|GmA=ses_!9IZB#(ObtuUo%C5}^FEB`?p>y1{ek zq4vvQSh=HY;LWPr&{mS($#sR<9xkSMbo+^p{$&T%w%kD*RkCnh!CBE4dv0-m29_gv z7Q;LL=CrBhBA^b36a9Z9_Jcd3op6kP(ZhC7d}S_hJ+cDrGED^MP~da;wk2~>&o4rI z(Sg)5WjVvPD=5dTMYGbD+v}s*iJzttw4J56MzNH?x@Rlec5^#?67;GLu5WCayhp9a zd6RN`b*EdgbY+bvl-nxXKsm)!bBONMt}&J|E>kG3wSceF9-TadV=)&!*wUL`z0sB1 z(9=tEGg#ZL1u0%n%2YVQ*HO9y=g*UK(=S{53+EKlt;!NwB5)n->0;#RaGh}3g3Pw= zt+xec9(zpD9+uniDvMIq(bbPqfZIN6D-kC?7N*9K8v~=m@Z(W~F z-QJ7asiKdAo3L;0CW`u+X`(CZO~uyIgWuay)a{JtIe*Me;OhgFHLLCM+laN?j@-It z!uA2`OUvBTlj!|t?-6kBu78;2VMXZLt1@4Q(DOxn^{$K0 z5X*rRtY2?Nxj%w|#x{I>E8M!!}*4wpA6Jpg%s|0qW$~;cBp)s=yL;j|IO_)1$Ck~ z-Tt8@xQ9+*(NDH;eW_1x&>sdBYzYfT<)&!&uS45q&H~rT2N+);?R~Tr_!@uVlxTu} z8p!L0f%MI(wz`nlEuiiA>U_LuYtHEpx8~&ooF8y*u=+O8Sq_BLNxcX2^(aOE3iP*5 zXX(;@-*Y=`z&@eeCkE;XK%WhCk$yPEvS9E;PncDrq=IX38ePPV;(7)?6zm0PLk+0Y zX|93+e6FTD45{4S8F(2emw>)A0NbTZpL~>{9Y4jnilSY;)!)YZk&zVbnHlmS!N-c| zah+&EdvcoJwISY*Os6RGWsB;$PY7K10=K=spI261 z&Cn8VqmDXTf4tcqz;%b7>kC}dQ+jm_Yv1cCach2(p7^OSsWWsw{e9qhHZCcTa`naw z*8GPT=!;Un~-OPbkLyoIo4hPSW6D4Lzo1 zo7!KY<3e>bx$1priH-UoXr>SH!aemUlrZ`VyCvE>~en_(01!2T6^s=?z4sB z{omGSamm=ep?t35>vP&~38Jc0^aT6MDTP?M%Hri+&*EzE+Z;w8?9s1kkRW-12 zJK{FoHO93{?q@DnT$b5)&z0i*!g;4lZo7+im2F3=?x-5%{PRD2NFgp01paFYWE3y& z;NGZ_lu_mL--f^3`x70Tni8Lw@bTB+Z=5gt+3%Yb{wjHeik&{QUQ&0hq;63^vR;#@ zNOgFV7WHdqsLMIHH)$eeRQ-$UVW?3P?w9&@fmsR4{M`y>rLTZ?g4vdqpI6n?(`wXI zDWh1~%qo`qiz*^Pe7t7LWeW~Kjj6=QuWWq6lyaIafe%4qW!wM&fa{_UFK z$M`F4%@qhj^n ztujyiZHh5Tsp#E=d%398-@)C%U89i(WInI{RDAZssXrcZ_^c9tJpbugT=uh+9FLYh zCjX~9s`!ell1$sY&wiT!WUHEMy{h_utx^;X69oRb1pJEr>GQ>po?xOipS;2p?pnn9 zcrhc$s>(l|`1HyBGiCGX_{WE~pKprDA={b4{&j~kwF=w2x6pj+VdzI4_0hgNTJy(a zg^=f;Z@c{;1J5W@+QGegb;pd%?IWMc&fZU)wSD=UC$gaZGj>+{75cM&{#Ya2(edfC zegFIgtsOm}%ja{p=~EiJ&pP_k@juQcU+Lq|2f^p&+8?v}gSVA$kC4nW|Enkd*&B+o zcBo+CfAIMfP{7{3ewlBL!cSW?OwqnMz7!0||93Cb;Ov3gQ2I(w%d}E~ zv}yk+a%)d6)-td^tx{qs#Mj@&-$}z6Q>eDt98yKfqq{uSNx?L9+RwL`?%*?8XTp5; zc+om~F7+~KW<7yx&+5>jy_>^Bm&IVWZ!p^v@{W{i@2ZqP7t0>B$bfn}LPu*mL-~S! zv~0?BsM+`moJtx(57ih1t4?;KodV|2{%hC3v$GB1N$azu!=ev5yhiB}htCV{oq=_C zUa>g@bwDHSR(D-E}t#Da}sI9q`#eJHwTi43Kkwb%M7n{A7Q%MfW`?7;* z!RCY({ca7Z)O8H0StFb#4*OL%cyUis{pxe7y8J!AUdcFefK_YgsPH;Pe5lVJPgxAE zyZ3Hck+=#}JAZ}Y4lQW2mKxf*)e3g+a6dX~U_)K&W3F)I{5Yu8)eZ71-lSCgBGy84 z1K#@hf>qB}krJ~{lN&X=0|ffQZF(IR9auq!Y+c0eo2JpQq)6Jnosp%c*QSrw zR|A*IT1&~b{a{l&8)eP+tBKc;$E^OttGWWKJ@jLr`svp+{S5;5&4$Pu8q)2`BzkE> zY1rC3MOXh}G-#WZB6uAOC}podmNr}G7x;nf^eIM%R~$_W>B`UsC+ot=p>v?*b{DAa z=%IM*ih@2<+7vRy)lCpk#B=1KH z&(mg&9bH<#pXxN)Mk(v-F62$#@%Q1#Bh@ zToYL_e-~x$xM%Qt#}KSbd)lVM45lvjqowz=Li)vhj>D)$vi{x=2O&862XbI{9tGtIN|_?Ax)r=IzUClkV1{73Z#p7jufSeuc}jHUrm__Dxz*KjSV~ zjnB?j&)82VT^*}M*;1}n_n=swrQgfnsLi%;~zEdF=a|^;p}m z0ibS>8(Q64Oz61U`tGYw(E53%z>{j**`dbc;7PJfJx=FV1~yR0P3Hq_Gy0J&*qlMN z;-P>{Ntyi_bn^V2WNO<%?7Lsa&@DHv)8D`E@oKyDSe~$`hxRdw|Q1ee?vVQlcWD%)dpxWtbpL6XW%+~?5V_$2Ip2RMyQ$5k_cPYiR~)~DHC$H4 zf-*=`)Ayjx>O=F->_%941G=%tRC@i~D)vrwbgSL4AX+b1cfiktGou5x1cfyP>N@xZ zt?@+7*5mWha+-!P_G~dW@!Q`h7?HLvK;oIKXcxf`5c|u_x^zLpO-qmDy8do zr68HSrV+ONF--q;5XcoTvWM@BC=E{Tr#B0qVkdSS)1ll^&8##)9nE6z4}_V~7s=)i zuQ_Al*6|9C<#ZT2MpDif>dIcm9Hz5s?Pe&$mR3KeLZ@b%Szc8qK)tPZf4HB0_p}&6 zIf6wi3ebf&_vpBsaQ*UfzyRPnD&6-AeeF7sH1b~vxe^>9sMl^f(S0W!H$4ohRYt$N z)5EIO9aM;x?Y)@BdsWtRUEVRe7A-T}myZkG0@pjxdi-|OYpcliVf|UW*K$CeN>)}M zPMXY4hUW*j!}ncsDPy0m$M+F%c7-REU3Rl7ZZlzitJ=_XZDIYmgJT%#38-1)1lQMk z-9a<$b^RX9*iw_W?^_(lm#hA5_apFjNgieyQ$#=J#P1OEq6;lJ!I_*ru!n8gf6(%1 z=Z}`_%kF8hAE1881<)(cYT$LnK8I%J`uf%9Ce!G%D%?x(lAbQ@sYh0Hz@(`-KQSGS zBZ!{vPjSwoQ@YP$o7Ec9K{Z(S-JX^-@^m?P+#%NTVde!Ev?ZU8&v#e{CHC+UdZmpH zQpR1PSZ`XcOm*c>K84FL=37`lwXGfUT28Ti#r26JUB1CdLH)1bIAWo{4Fg?(1GI1V zi>}L)EpXdAiXKZVNv~=qqi*g8n?Lk`!_SwYuDeMN4h~^;2TdmZ*ZL6Gjay-g@d@kX(6mHO8su#a*79?rvQXXcV( z{v&DA?PVeN{X3}N=hJ?L7gFpOf^Fw>4?UUCfizv9CROBl==x1>oz4z@G!R zJSgf*PORI_q}nE0v(G%>`o?SNQpR=7+Ad|3zN5-Om1`}b#u-QD*OomfA3NALEXHXT zJ#(fhBo0deLyRqZ_4XXJ=~@B&CJv$=H>Lr+j^S-%m><{;+YIv?Zqa!+*-73^G{WHh z8rsG6I33@214SJU)ooU?zct^{hnbyZ0Vc5@Sx~A zkX>pi@A9^$7q5g8E;rl8r_#7J8_CrAt=Z9L3A%f?3)6u;`oT|kP4JwpXP$d%(Dt3j zYKK>JB}qjb^uJE3N8(*fFmk;soa=Lk-Y9>KEdOa9sHwzwdhBcJwX~uFuYV?YPv2*+ zD$k@iZ;@)n_Y%(&VH9;LmqC{M_;IcKbp{STP3fH`F7#mc08rjb^uWYMs3$g2K1WCG zpN2J@%mRPj#2oEc=r|Y7y?nv-ICqp+%Iuf)mv*g$2{FZ$Rku8}oAZ^^^LFFfg)Vgr z$2r`VHVb;ga@9Fbxz50K6XR>F5-YCrdBi$K%l0m>e{e5_dbew2k&Me(fqv2Se)!Mq z(uR2uq0WH2r;}kx*^ZX`rXqSAFLch)-CCTV_)#We`mCo(cXx07W&3Y1W$-en+(Ctmm%@C#CmUeC4rSUr zWMg~=@HvRsSGz7=I4J`^b1RQ%?T{mj!;?z*^>U5r-xNZ-p^tH)T z=@I>Ks07a4?zC>b{p6Xw8$Gi8C?x#c7~ABj@31eB8N1toKHZb+4eU2RQfIXV>dfZIv+gXkagx*)Jd98X8`?KW4bj!L{2!I&$s}=J|90T@qSa z*R@a`{r0XctZQ3btKynCf#Uj+#+l;jjo7NxH={M%ILSvB;hkH-`G%m*Aajn^0vt1z z$kU5hd4~%8o&j(jOj{H@#MjqQ*|`s$bLkk2JLskaM^{m>Pl!HYF5OqBIE*gQlJt9- z1PP^Pld68pa7{Oe-CA&p&VH2umlO99nbUN%Ul^O9a=$ELVB~rDT6S8{gL-cz^QypG6}sqizdhmnL-7 zp-uITZ8*an>ZIMHrV)DyU*o?yz5yJX4WYPpv#iXsU(460$C?i&s6Y97O~==IC5}vG zn;M*EBj9cD@prRPZx5oW3Bmdk>uOO^V=dpvq%%LRsO7B*V@h^_TMh5bH|;vB+oJqS*B^V98% zrdzO&saK~~6q!?l-%c`lVgSYbDVD=-w<~PHH1K*&AN;PSn`nKC^NzmRq<%qkPE-18 z4<9~<@-cyHKYJS8qYsxKcH8?jJh=COwR78mI(`Ykdk%gtM^n~Kzgqr+y9_I+nklsDdIxS#D3;M$DO{dDB|(PZS#>40lAaPZheC*7Y3$ck?L zLBsDq2+rTEYRE>)?`2SL&bb`@gU|gV zvL4R!2mhZQ*~h1+SUm}&=XCT`Y&LYlXZ-je-|9n~dxK9*>5Mx7xI5i6qd7e-1)Z&@ zq3h+uc&n@RtaSLK-;iUMfM=!`+-`~J+&JElWHR#S|9JmzXV*<~S?+e*Wt|hT>-InX z^-x?W2nYg#fFSVK1cuO&{qQB(+KldPW235R>*MTfR>_5B+1x=T7m-wIjgLyMCd-Bx zmFz9cF-Dbbl@M8uO;yS5WjQfLC8x`Ba=J=3%5rC;N^Yu>(o}MwET`d@Tq+6AbeCll zeu-2`m}Oa&)I%kg$jra3O2T~nte4c1V-4C!Dj)t{Q?{2~8ze#fo5>`m&saX`Ey^#iWw!NU9j4T4n2r)l9;2ePp?Zw@U6MsbURy zjiW3ZF_li1J7DE)WjQrYB^y){erbWVN>s@WBvqUmn_F3yy)oO?cmck}N-Ep}r;@!? zQmjgjR!PY!IZh>ER>f5kR>D^$byLYnSR-tu%G$8zNCvO;P)S&gd{{gDVzMSKAj@5l zOm4)4$HEa1=0hYEx{+7e77N9sSPwg_8zw3*sW8BaN_NQ1per85x`t&E&VyCP!aGZ< zBsJ1=mgP9iuu^7~HIgb>jTx80uXss*Sx(1q@scVLfzP+7KPo1MU=~|FU6W-wL4frtFgbaw33p_%zH60^8(r0VWqA8oQ4|FQmcbab%Ce+~{#4%Sol{C`J#{>n`C>6G;hKWWW?U&9mg zi}?}E2mjAJW^d2WW@hL(IALg(437%8v5{YK8OCnKvCz#Z%ehd74Qkmwf8juEyet#! zFESz`m2^@2q)Z`c8OxIb?{Pf+C0Yz{$uf4J z3H!~D_oFPAMo~3m5>!99YgCr=S+hnZl!iGQxg287ID9clLu;LLTk}FO^2Dq0XdJ%f z#iW=s3P>6jf*0}vD&Te4;1EfL>r+{7jMYGiMUhHJGKC}+1}u_gjWy?1D2)b8gI5{x zUo&eRu%~flE6X_Gd9`CzQi5z-wX(I5fmlk4Z0mp{hZofb2P2j(sc^+Cb0Nk(vZz1t zBpybUn2vedqJSDbaWtYxI%9u1VR3jEwPP&4p%Uf|!Qqc!u{0xIhyyS|E!+Aa%g)%4 zpvP40lmQ8sy7+h>*>j52A&GHM_%th6`gmx9%- zjcrKAp@E4opZeH8NE4L{uHR+bia3Dqt3PTmS+0|b1=X21${SvWdE?8RCfj;e=Awrn zb=H&~2pI=ZS!^aAE5)(cHZ6tarpmVV2rDg7w)H}USbr4mbZl!)6gtkl0ahwj=4v1f z(cuKa2Zz>L2|E-$p2P+7!FqQ=$#c#WD65cgk#R&J%aRbcl;ljH(?_QdxlT>KIwK5d#LuH0jiB)K#fJc$cg z8kUE`h5NWAd%IGYI5r*2K}~_;CdnnX7V&yP%9iQ#6{?)A{>RPK6lUe;LR=Oy*M$9@A`vj|xf}M(a@%0H3%fqL> zcuB5cy$mG+G1}TXd_Lrr$DwV;x00E4@wQ5v#MLZ9wB^!=o987N2Lo2v zHM8D)-ieWHRY|-xeD34jlA8+~_7X*%BFP;w3D%zWRj5QRMv z`w4Y(QV)r1y;QYiTOJ2S8lJ?Z3~u3jm-zh}=()(g1Y@nhD6T-cln zI;(7a__|b5scck|B*~J5Z#K?&NagmMjoTZyr}!pbf`A|(2nYg#fFK|U2m*qDARq_` z0)l`b@E=aV-d1vv@L>aglVv+wCw$}2|5dJoY}|&rIpUjm2?BzEARq_`0)l`bAP5Ko zf`A|(2nYiIJOUXuDqEZWc9F)|4k@_j4!6`9aR*zPF&Uja;f}OO+@5E`onA)VRo5Xk zJt$t9t1-=#n3`hCpZ`nt zw`}mQcnJc6fFK|U2m=3Q1lGC9E;fFS8E*NnxfJW)!q>;w-^<6>OXH{31nK;ObOC<2 zSu9# zP^(Rh^TqfybnESYw8ZQv!hIMk?=#?98E2?va8g#Ce93q`8}#vPnOgq>*+dUw?3H=+ z*C7-p&kx~o99Sbe14Td76yu?g=hJ)hm^tV-e*q-fx5d~R5mfbPIZXB11#Mf8q5FdK zC_`tBqEn1%@T1oS+Oy4YxOU3|X*X8@`uzutGs60Q?{1CTP|CcPp?`ViJZ=TSm^AE; zx;|^(elfq2%u4RDPpi5bopZnj2JdO7;*TG#G#+jfzzDcu5HuTNz zrB*-d1?CmvKB6(sh=FarZDTpHa5GH2{f=Rr8scBSvcC7`x)6PK0QbR7O$R0t_jeJX zuak&=or~)?HHZQpH{!HSI(c^X5cg4yzNAxlTnO?`)tI2KboA5Tl@9V=#55N|=lBHP9!ocI-{O*;; zer@?0$K(viSEvxn6?qxsOoH0#Y zKAU0xXy5PaOJ_A}LPM5>!n?c|I8!oX%PxX-1@vdm+~1uf+FJRQ2Q4Ihi!x^^`h4fH zk*u+?u+230)yqV=+<>B9(OvpU3o;o@Jr@5+c`O{1M>^-7y<(bnobmWDJhl+VB~mc% z7x(e5M?dIWF@_fC>}%7+UTuhv(;(Q*W?0C|w)FSLmEdx>6Xf;%F!*j$7CiTo5}Y|IC;4?u&&`w|N9yU)G7|3)}}i#)p{@mL<{ub-A_lL}C=yngzr3uJWb3VQU%uXkJGg)zZg0sY}a+?e;QO^0F_ z%c2!jShd}Pu^rgLCC|5F3=H_b<^gLwB-Arj-LRrjA&jXO3jQt^3C2xfF2z@3{IXLJ z^+N_sh#afK_!X!J92AUeL@}0(^8WcwYK=j3wFa%(_AWixqAy@PBF6iU_aW*yTByix zv{}5KVvGii=Q9E@&Jggp2gr_Kyc$b~Q@eCL-ov2Bry1%-9v_X*v3iW_0Ca}~#`Gyc zvCps#GYH0cgCavuVazyNeR@Pex}!>O3&s-xjLAh0s{$2_eaA2s8^gY5^Css9j0u3c zzdw)f0}JyHg`h3t3C>+~#If>hPv;#NS7{>GZFKstc6y_+HO2;+!v^-S;qg)c<&w`O z@S<;=f@8=UyDGpdAH`TimVLovb=VgS+eo+V^rfiZD8_t&>8g^H&v7Vw3}ZA|=TwX( zf&D(i8ZXB`}IK{}jkfX7qdGHtu)81Zq>3yVgyBWLU;lAfM*^*jb1#v&rP$ScJc zdT+?+N8YeucN$5*>CUlod<;bgh3sOYTm22_dWJfNSe0`XJv*T6A?qLKf|#J`X>z36~Y! zk#=k7%$u%CO4400O)kiO`BBGa510W(LL%wn;#1*miz#4Vy%2bpV$|2A0n7-M;bvGL z+Bf+J$aAMV-BkKJC^+by?os=y`nq@5z=iomF-?Cs_FDvKJc~oY$WY+*|Gr&KUF)6& z$)qYf$cFySn5{>cu1dbv^!~zgWOA{y1lw+zZStjgw(OvRTY{)f{Bw#7Xs7$DY2iG@ z_1Jdi$F|c;k7m-5^HPHl5pRk1bOvp8s1&QT=PYen(*^9;K7=<* z=fR`3TfzIvG?kafwx(aRl%!v)L zUkWji>*>1+ZOQrq@5ubl%W1_0L&4dz95h@!1pI~%VWF3sz_J-9=y_vb{juMvepUBZ zx|lQ1N!i7D=%ck-B8NM`*~r2~@86NWyJgYkotgsE9dFUmk5*wl=TL3kiwx_?usk}) zqas6wq~D=J%44@hkmUM-Ozp9Y49`D|>RvP@kDKm;I{`xg%b;OrtFxmn1%HZ|RS^ZlBvS ztKfd>=+*{ucihH`u3k!x{SGkZ+FqJEtpp6}W~+Zw^ewf_%ujDlli^O+rLZ=?4Q=*V zVhw`VQykm+HXS|TxyvlpeAHk%zv?i0rLm*pe&aSQZ*YKy&h)0&q90gpCDw&EMSIag z>(;>b;Hh+rLtj**#~V~hN1xm+U1L%-i?!9jiC!g!6c z><317P9DHKhSh;nLt6ru%~8!fAZ+q%IRCUP`O(dr9`4g0h7bB#hy6NLvFBpAAA14(~o1FI{fK)JK^NZvv#=*gwaY4O|z^xb`@LxifEjA9aMy=NJb^q3fw2DMMN+~$R+}_)5quc5 zpDq2ZEzFE>236+Tkm&LQAUt0)lJaIOUiXZ=ymXxbxMo+mC|74==*){p;x3 z{QW4Gxz;QY>L`7H>z)7{DYUKad-kFBww6=Y`fsi3elzLEx zN-0vRb)Cx?Whfhwv_reLAu^O9?a(e2ib^s?WO#OpC`zsCT+x;x4T@x_Oc|cAv5lLZ z$8%o2U)~Sz*Z2MK9vqHCNABac*0s*z|Ns3Rcb>4p)C{(6AB##t7M9kULj7n9`TVo3 z*tBGdwBOK;W@1Q`Fyby7K1HpHkh^v1|uB#^gK;tSws$1IO)0 z^Q+JJ%MXv((cnYi5R{LdS{6xJ6Ww{^f$e;1Ks`2D=O|HDT6o*lj^?e5c6!s8+mcYc zFu+1JnyNC*+6fSCyhqGP{CDapB|X}N7rSe+&4DRsS2YCsE@?r7{6c=OdLnBHo8Q_i z?#B{wsOf14{JkCSH1UQ;vl%0(uOZ90D-LvYkg4uj#Jq{h!-Nc=`cbxMA7rz60ekkW z4bsfRoqKkm{>2P_xX(d0=+7;<{f~j_E_YqkYve`@GtXt0r+b6B`vBOXE>^UnN22>N zJ$dZc44_>ENprLzqK}o#`n7>n>ru*(VmG$)Za&a^oi#Zy5`VGNS6__(15W!#VU;`w z_Qv*O!>{zkPW*}soi8lEIzvsG6V1w&X8`RO)Oc^gJGC?;^)QHM0|nGYKy1 z7bvcOI79So1*$wQ^OP~oE~`;XKvMjx~CV!*U)~e5tg)X#b|fIpwLRt zSze{kyv7jA67+vMN7g*tjcJw6Ls!(m=Cczp?qaM&&k$Zm{>rJwaNB4XbYmRTKfG7+ z-weR7M~#7I2ZpwP&9XZD#vesE{ei{>QIj5DIWe3N!z1@BN?jhAb>RkIx%sy5SG$X4o=i{krZ*J;152=}d31V-4?kW_UCnI1+?Mb1twaf4^0z zJ~{O^r+p1^Ze=KDCH07OD^ODvuU}j9PyRCWAl6Uq3JXS$!NRYxaP#AF?AI^d9X z|71QMKVeJQQ=U0^J*an&i#d#j*R^m;trI_)-yP^aNi+PV^!;Kk{}6ow){P1iHLrGd z(SbaNYxroAhD>#==*4ZqGf_3n@Ol({(~iLz_qx(PUk1CbRsi)GOf#3@MQIifJMxY# z7Rtk4K`}1U2bpZ z;@R%qnZR>CI)eOr5~evC!~Ii#F~34v4A+_qe|1Q888Aaf9i00V%9fk8o+IfBN$j1I zN3)eDYlpD;+bfh~4WTY#=TS{?(o;%ixVE}`kq^~P4z3Tj#^;8Pm{i@s*OZP!+L7#L z*lR|!ACfm*$NCG4SXjXIBB2HE+YaZ~Uf!VHk`Cb|Dihl5;6okxwU(>6F2v`lV36-D zdJO2A__T*c^6|{mq_bAFYRl0NZb@_xGR+qx{nUEhrEPC=>LWZ-d|dVacp1C(uz|j; zR9aJdkrkY5BPWl@UQd4&|1E&~X~?)4+D#kK8il zfD}9_P4X@tq23Dom1d`b{_J&I*UrFYSQ750$2gqBoS71!>YKjX^NlY%Oj`4?x-%q!Yd> z#hcCHRZTn!9VgC~_9(#wa_{<4OZ@g46QoIC^;i$A)oXt6YR?~(PU0< z{#mNzRCDY_+X}qf#4+qhsJbm=1<*A?Z{M9*u}(`qf2+TkH?+4$;_aAfM*0T_uAYGt zX0>gdPns#2NV5`$f4g!Wl}bJ7a#+ua z2Rb}tR0GO@ofb0r6sc|GS#&hc#y^u6;K0~ozQlJU+C=8Sx^H##tnJmm;=R#)^Cb*1 zy3PFxb2w=y5c*~AKy6%R@_^eU?NVGW8sTzx179{&&9Qg-SB8qq|kjz>G;+3+>C^0f~D4FSYDODg$}LDk7p){b&x;wG6Mq>cv$&+ zYyH$6J%qDN2dPwFX!g7a>A6T2119kwjSs<*ZIxgX;V~y|#$ME>hHDCF2=t^^&zUBc7LzV^mOM%M* zR*8=`5P$VQf98SsS%Ci+pL+0LKmSit=dlL;{{HLx34A|+?!$tR z(tqFoo%DZLe#k*Fi9{ ztAU!Yp$EivSkc@yigsunTYS6=|Fy*vduVeP&JMa%rc7jamNNs zeV#6u4IW(=->P9rf1=QlP76*T@d}Cfh-CXJ3a<9q#S1r-afcvBS@?jch6oV4pL99) z^dAUk6Z7z0a91Rjg1^>mgU+`qXdM_P$sE|%phF>`d*Ru0qSa>>aaUD&(x`-D%0A60)4~XxQCyEnZ zj@y@87EN611;YO(p6o0Wr^frsywPLzPNwjqTmp2}q-} z-i@;aSBLKXI>VIOv&4-p#o05#^T{WvSCf}BV!K#Xz7fLgdqH-gDFkis#f{RxpoV@@ zxOZ`}B6Rb_Pu<9$=^-%*MqCAmF|c;8I)a<^bxv%|Yc#FY<2> zZUJ!%F21I-f=>=2LP_l2)*L4wW-v*4fJ|bCXp9KVu3*Hii>N!6fjB_Y`J*zp(4<>NAkt2sL_G^morYA9s*CZ z7Q)(xiQ;{6{G|Tpgm%8$#Kbk>%QK@vWHH2_;pK;);NqF9F8x0Bg0Y=D;u7CBoIJlc zOYZh%55D*Nt(7f`%t843w^h+_Tf2+k%s{>xZ#sEWws4NyRh>dFyEN!)?Tf@biA6nD zL`ER;pGDPA;9bs8CjJ!OT`*4J9qE}$*SLvXa3GCemu6yswiTr8+s6w|ZUo9$ASsap zu?fNL(4_hkUwJl=?$Z|qf7G=(%A(SI8TnAbaKVi+@EX}hooauWxI_wE8@x&|87`Rg zsnjuO+Ia`DK|j2aIf#=-=feNoSg@HgqJ2t$fj-XD$z^o^S970?K40 z@-&QkUhrrXUYb~@@_gn@e!Vb7A`T{t*+g>z-S-S+#FEJy??k(;iD2k;5r^d5!==^} zgr`ID<*HyK#F!;IPu5jE*usmAINB zvc{!LfWg zlMps5eThPzQAw$+MdHNrw3?I9&D#U$Y+GM1*y+c?FGc+lBb>s5BW^HahCo>d%(#~% zI0+ZI5&2=kih!7%LN%sr{5&2%1-0iI1H5R)l`!I9qJ9LcRVspwk^DM@C&e&g5(@cs zM$Z*ex~vvH8YtJ{lzU3#Rhi%_H1}kh8+c*nefC-M02dvJW*Bjw&O5ls*#aH>S#Sbi zQ0FMYX?d)-z5JrqfCL2vF6!H{znu(PArSRR(84%keS-#W1ktaSF zr}iz z$+3ue?t0JOq8+O_>6Tyn+7# D^C&%N literal 0 HcmV?d00001 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin new file mode 100644 index 0000000000000000000000000000000000000000..ea3192e8ec5112eb8b4f2a7bde5d13bbb95e6ac0 GIT binary patch literal 6284000 zcmeFtfdBvi0Dz$VsTV1P3IhfV7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjxE(qc00000802p~jU9!M z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA hz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VqIS}Y^00961 literal 0 HcmV?d00001 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..3e0932a7d0033aedf7dad4109641188fbb1f6091 GIT binary patch literal 100 tcmZQ%K!6v_2sVh-BLU&Jz-XxSe<%=u@)e*ojQ_7mJJntEx_t^%8~|Sk4$lAp literal 0 HcmV?d00001 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin new file mode 100644 index 0000000000000000000000000000000000000000..1dc89f8e47e591af4c36a35046077f0ba1d1ef9d GIT binary patch literal 4000 ocmeIuF#!Mo0K%a4Pi+hzh(KY$fB^#r3>YwAz<>b*1`NCh1`nVB0RR91 literal 0 HcmV?d00001 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin new file mode 100644 index 000000000..e69de29bb diff --git a/tests/data/embedding.txt b/tests/data/embedding.txt new file mode 100644 index 000000000..c3cc374c2 --- /dev/null +++ b/tests/data/embedding.txt @@ -0,0 +1 @@ +0.025692760944366455, -0.004980836994946003, 0.008317121304571629, -0.013715836219489574, -0.02352249063551426, 0.024290846660733223, -0.02944018319249153, -0.00994145404547453, -0.009779694490134716, -0.027930431067943573, 0.015367128886282444, -0.008276681415736675, -0.014302213676273823, -0.009388776496052742, 0.00832386128604412, 0.005149336066097021, 0.027593431994318962, -0.010999629274010658, -0.0037170927971601486, -0.009934714064002037, -0.011660145595669746, -0.0008045837748795748, -0.003956361673772335, 0.006497330032289028, -0.011350107379257679, 0.00026391190476715565, 0.02470872551202774, -0.018925832584500313, 0.027418192476034164, -0.01903367228806019, 0.01558280736207962, -0.024061689153313637, -0.02167573943734169, -0.016270285472273827, -0.01949198916554451, 0.0019411110552027822, 0.0031981151551008224, -0.020031187683343887, 0.02670375630259514, 0.006214251276105642, 0.010089733637869358, 0.028092190623283386, 0.01414045412093401, 0.0019175211200490594, -0.01567716710269451, -0.0036530629731714725, -0.021567899733781815, -0.019896388053894043, -0.022781094536185265, -0.004411309491842985, 0.017240840941667557, 0.0107906898483634, -0.03022201918065548, -0.02000422775745392, -0.01438309345394373, 0.004252920392900705, 0.0053582750260829926, 0.0011028273729607463, -0.002139940159395337, -0.007750964257866144, -0.002953791292384267, 0.009193317033350468, -0.0018888761987909675, -0.013452977873384953, -0.0053515350446105, 7.161216672102455e-06, -0.027714751660823822, 0.017106041312217712, -0.0243986863642931, -0.013816935941576958, 0.011727545410394669, 0.008579980581998825, 0.017362158745527267, 0.026488076895475388, 0.03019505925476551, -0.024317806586623192, 0.019235869869589806, -0.011181607842445374, -0.007831843569874763, 0.0052234758622944355, 0.010689590126276016, -0.004522519186139107, -0.02803827077150345, 0.03092297725379467, 0.004411309491842985, -0.0002287376846652478, 0.020597344264388084, 0.015218849293887615, -0.0026269028894603252, -0.011997144669294357, -0.000433464243542403, 0.01713300123810768, 0.025962360203266144, 0.0021685848478227854, 0.014019135385751724, 0.01903367228806019, -0.003508153837174177, 0.007326345890760422, -0.015286249108612537, -0.021244380623102188, -0.02191837877035141, 0.006305240560323, -0.038336943835020065, -0.007050007116049528, -0.0011550621129572392, -0.01621636562049389, 0.00021378337987698615, -0.0058064828626811504, 0.011936484836041927, 0.0009318006923422217, -0.02216101810336113, 0.00864063948392868, -0.002648807829245925, -0.01830575428903103, -0.0008745109662413597, 0.0035452235024422407, 0.0331067256629467, -0.009227016940712929, 0.0033312295563519, -0.01769915781915188, 0.033942483365535736, 0.0009014708339236677, 0.02021990530192852, -0.006891618017107248, 0.029035786166787148, -0.016782522201538086, 0.01173428539186716, -0.010817649774253368, -0.010837869718670845, -0.020597344264388084, 0.012765500694513321, 0.018831472843885422, 0.007211766671389341, 0.025301842018961906, -0.012408282607793808, 0.008842838928103447, -0.017173441126942635, 0.016782522201538086, -0.01964026875793934, -0.0016723547596484423, -0.006847808137536049, 0.02189141884446144, 0.00038122947444207966, -0.001764186774380505, 0.0025746680330485106, 0.0011213623220100999, 0.01900671236217022, 0.004953877069056034, -0.002926831366494298, -0.021244380623102188, -0.000591011019423604, -0.0057357135228812695, 0.015407568775117397, -0.006274911109358072, 0.002881336724385619, 0.007225246634334326, -0.025113124400377274, 0.013365358114242554, -0.0008104812586680055, -0.015097529627382755, 0.010844609700143337, 0.006082821637392044, 0.005641353782266378, -0.007258946541696787, 0.016148965805768967, 0.03019505925476551, 0.021567899733781815, -0.0033952591475099325, -0.006325460504740477, 0.00723872659727931, 0.015528888441622257, -0.0021972297690808773, -0.02944018319249153, 0.00900459848344326, 0.021837498992681503, 0.01800919696688652, 0.011788205243647099, -0.012246523052453995, -0.013668656349182129, -0.002812251914292574, -0.008344081230461597, 0.020233385264873505, 0.004542739130556583, 0.00920005701482296, -0.02398080937564373, -0.0023421391379088163, -0.005742453504353762, -0.009098958224058151, 0.00953705608844757, -0.008317121304571629, 0.025059204548597336, -0.010238012298941612, 0.008916978724300861, -0.009948194026947021, -0.6888787150382996, -0.009193317033350468, 0.03343024477362633, -0.02873922698199749, -0.00014174997340887785, 0.023697730153799057, -0.0012350992765277624, 0.008654119446873665, -0.00398669159039855, 0.0304646585136652, 0.005186405964195728, -0.0051931459456682205, 0.02055690437555313, -0.021015223115682602, -0.014827931299805641, -0.02871226705610752, 0.00853280071169138, -4.6784862206550315e-05, -0.0058098528534173965, 0.02152745984494686, -0.03210921213030815, 0.010029073804616928, 0.0010969298891723156, -0.0033093246165663004, 0.006588319316506386, 0.0008395473705604672, 0.01066937018185854, -0.0343199223279953, 0.007326345890760422, 0.0034542339853942394, -0.007420705631375313, 0.03016809932887554, 0.011377067305147648, 0.004475339315831661, 0.029871542006731033, -0.02570624090731144, -0.04669450223445892, 0.014720091596245766, -0.023684250190854073, 0.022363215684890747, -0.018723633140325546, -0.017106041312217712, 0.015461488626897335, -0.007818363606929779, -0.003892331849783659, 0.011929744854569435, 0.022093618288636208, 0.0016386548522859812, 0.014693131670355797, 0.012880080379545689, -0.015879366546869278, 0.009267456829547882, -0.001182022038847208, 0.019653748720884323, -0.0015181779162958264, -0.030060261487960815, 0.024169526994228363, 0.010136912576854229, -0.009078738279640675, -0.008398001082241535, 0.005782893393188715, 0.010298672132194042, -0.02371121011674404, -0.029305383563041687, -0.020934343338012695, 0.007595944683998823, -0.014086534269154072, 0.014625731855630875, -0.0051324861124157906, -0.030491618439555168, 0.0009301156969740987, -0.009449436329305172, 0.006423190236091614, 0.014895331114530563, 0.015030130743980408, 0.006517549976706505, 0.004394459538161755, -0.017348678782582283, 0.012597001157701015, 7.60879265726544e-05, 0.007467885501682758, -0.004407939501106739, -0.009024818427860737, 0.0016453948337584734, 0.01888539269566536, -0.003915921784937382, -0.01426177378743887, -0.029062746092677116, 0.014720091596245766, 0.003663172945380211, 0.027175553143024445, 0.032756246626377106, -0.003065000753849745, -0.025908440351486206, -0.020139027386903763, 0.014531373046338558, -0.013270998373627663, -0.023657290264964104, 0.015245809219777584, -0.0029655862599611282, 0.007191546726971865, -0.007279166020452976, 0.003912551794201136, -0.013452977873384953, 0.03216313198208809, -0.007764444220811129, -0.007528544869273901, 0.02085346356034279, 0.0363149531185627, -0.01213868334889412, 0.0026926174759864807, 0.005985092371702194, -0.03135433420538902, -0.00711740693077445, -0.015865886583924294, -0.031300414353609085, 0.01961330883204937, -0.007663344498723745, 0.026717236265540123, -0.0097931744530797, 0.018211396411061287, 0.007346565835177898, 0.024964844807982445, -0.039604056626558304, -0.004445009399205446, 0.03609927371144295, 0.007528544869273901, -0.021028703078627586, 0.00466068834066391, 0.007535284850746393, -0.01730823889374733, 0.0031408255454152822, -0.011309667490422726, -0.01786091737449169, 0.029790662229061127, 0.011046809144318104, -0.007831843569874763, -0.008519320748746395, 0.019545909017324448, -0.02601628005504608, -0.01861579343676567, -0.007919463329017162, -0.0024853635113686323, -0.028874026611447334, -0.019599828869104385, -0.009456176310777664, -0.005092046223580837, 0.0046775382943451405, 0.0014196059200912714, -0.0018686563707888126, -0.013816935941576958, -0.026380237191915512, -0.019047152251005173, 0.004286620300263166, 0.013938255608081818, -0.01400565542280674, -0.008337341248989105, -0.020745623856782913, 0.011464687064290047, -0.02218797616660595, -0.006675939075648785, -0.007097186986356974, -0.018090076744556427, 0.00785206351429224, -0.009719034656882286, -0.009456176310777664, 0.007103926967829466, 0.03437384217977524, -0.013001400046050549, -0.05828724801540375, -0.0003245716216042638, -0.007231986615806818, -0.001015207846648991, 0.026501556858420372, -0.025220962241292, -0.003218335099518299, -0.006962387822568417, -0.0019798658322542906, -0.006301870569586754, -0.012981180101633072, 0.0017439669463783503, -0.00012131944095017388, -0.004650578368455172, -0.005513294599950314, 0.013500157743692398, 0.009887534193694592, 0.02194533869624138, 0.017833957448601723, -0.004785377997905016, 0.03283712640404701, 0.0023202341981232166, 0.011289447546005249, 0.012165643274784088, 0.009739254601299763, 0.01144446711987257, 0.0006099671591073275, 0.016148965805768967, 0.0029419963248074055, -0.0012822790304198861, 0.009166357107460499, 0.037150707095861435, -0.0011314722942188382, 0.02286197431385517, -0.005489704664796591, 0.002625217894092202, 0.0035452235024422407, 0.022848494350910187, -0.013439497910439968, 0.012078024446964264, 0.017510438337922096, 0.015137969516217709, -0.0500914491713047, -0.014962730929255486, -0.005462744738906622, 0.0028274168726056814, -0.0012308867881074548, -0.00835082121193409, 0.02228233590722084, -0.04771897941827774, 0.016135485842823982, 0.012691360898315907, 0.011808425188064575, 0.04362107813358307, -0.01579848676919937, -0.01606808602809906, -0.0028931316919624805, 0.019208911806344986, 0.02895490638911724, 0.009038298390805721, -0.0066119092516601086, -0.001964700873941183, -0.013156418688595295, 0.014639211818575859, -0.004013651516288519, 0.01173428539186716, 0.004552849102765322, 0.009651634842157364, -0.013992175459861755, 0.009408996440470219, -0.002134885173290968, 0.010736769996583462, 0.019060632213950157, 0.01320359855890274, -0.004825817421078682, 0.042273085564374924, -0.015515408478677273, 0.028577467426657677, 0.021810539066791534, -0.007191546726971865, 0.014625731855630875, -0.0012342567788437009, -0.019626788794994354, -0.03162393346428871, 0.0032048551365733147, -0.011484907008707523, 0.006679309066385031, 0.0016133800381794572, -0.00010225795995211229, -0.0037137228064239025, 0.029817622154951096, 0.0326753668487072, 0.0159467663615942, 0.036153193563222885, 0.002785291988402605, 0.014194373972713947, -0.006467000115662813, -0.014720091596245766, -0.012677880935370922, 0.01725432090461254, -0.020260345190763474, 0.005159446038305759, -0.0029723262414336205, 0.010568271391093731, -0.008047522976994514, 0.017591318115592003, 0.013520377688109875, 0.0033699844498187304, 0.0152323292568326, 0.0014676281716674566, 0.018049636855721474, -0.03537135571241379, -0.01247568242251873, 0.018292274326086044, 0.01910107210278511, -0.0033059546258300543, -0.003932771738618612, 0.0033817794173955917, 0.015218849293887615, -0.011862345039844513, 0.03534439578652382, 0.021837498992681503, -0.021540939807891846, 0.01279246062040329, 0.004461859352886677, 0.00847214087843895, 0.0030110811349004507, 0.018925832584500313, 0.024034729227423668, 0.009186577051877975, -0.025409681722521782, 0.00523695582523942, -0.014019135385751724, -0.022174498066306114, -0.030599458143115044, 0.024883965030312538, 0.0006335570360533893, -0.002844266826286912, -0.019559388980269432, -0.00037048765807412565, -0.010116692632436752, -0.00829016137868166, -0.0005076039233244956, 0.004054091405123472, -0.015097529627382755, 0.022336255759000778, 0.0034643439576029778, -0.017321718856692314, 0.0018366414587944746, 0.0284696277230978, -0.0020910752937197685, 0.005189775954931974, -0.017496958374977112, -0.003216650104150176, 0.001592317596077919, 0.048312097787857056, 0.029817622154951096, 0.009240496903657913, 0.020085107535123825, -0.019667228683829308, -0.012468942441046238, -0.025625361129641533, -0.014625731855630875, 0.00926071684807539, 0.005250435788184404, 0.008687819354236126, -0.0042562903836369514, 0.008081222884356976, 0.0023775240406394005, 0.008802399039268494, 0.013021619990468025, 0.009119178168475628, -0.011923004873096943, 0.001608325052075088, -0.006305240560323, -0.016270285472273827, 0.006227731239050627, 0.026272397488355637, 0.011134427972137928, 0.007467885501682758, 0.002615107921883464, 0.00426640035584569, 0.02718903310596943, 0.004232700448483229, -0.002045580418780446, -0.008869798853993416, 0.007427445612847805, 0.015596287325024605, -0.013244038447737694, 0.0023539341054856777, 0.04014325514435768, 0.00963815487921238, 0.02774171158671379, 0.010709810070693493, 0.006059231702238321, 0.01854839362204075, -0.0105952313169837, 0.005604283884167671, 0.0010598601074889302, 0.02652851678431034, -0.010743509978055954, -0.013190118595957756, 0.031300414353609085, -0.008081222884356976, 0.002584778005257249, 0.03464343771338463, 0.012441982515156269, -0.010372811928391457, -0.00261847791261971, 0.0017962016863748431, 0.006814108230173588, -0.007750964257866144, -0.013115978799760342, 0.008896758779883385, -0.03701590746641159, -0.023576410487294197, -0.0037204627878963947, 0.0211904626339674, 0.009658374823629856, -0.004876367282122374, 0.006231101229786873, -0.03356504440307617, -0.017901357263326645, -0.020502984523773193, 0.0068376981653273106, -0.005671683698892593, -0.023118093609809875, -0.03254057094454765, 0.0026841924991458654, 0.0304646585136652, 0.008209281601011753, 0.029521062970161438, -0.007798143662512302, 0.013796715997159481, 0.006258061155676842, 0.0026606025639921427, -0.025719720870256424, 0.003177895210683346, 0.002901556435972452, 0.01035259198397398, -7.677245594095439e-05, -0.031111694872379303, 0.010965929366648197, 0.0031256605871021748, 0.011848865076899529, -0.0029200913850218058, 0.021810539066791534, -0.008748479187488556, -0.04305491968989372, 0.016984721645712852, -0.007211766671389341, -0.00218206481076777, 0.02925146371126175, 0.0038013423327356577, -0.020637784153223038, -0.011363587342202663, -0.03806734457612038, -0.0043304297141730785, -0.01813051663339138, -0.003044780809432268, 0.0020927602890878916, 0.006726488936692476, 0.0010059403721243143, -0.022080138325691223, -0.02427736669778824, 0.0142348138615489, -0.02470872551202774, -0.0005771098076365888, -0.008000343106687069, 0.03391552343964577, 0.018480993807315826, 0.016108525916934013, 0.019047152251005173, 0.026784636080265045, 0.007501585409045219, 0.010521091520786285, -0.02944018319249153, 0.028307868167757988, 0.020691704005002975, -0.011714065447449684, -0.002397743985056877, 0.0027195774018764496, -0.034427762031555176, 0.0016529773129150271, -0.009948194026947021, 0.005732343532145023, 0.01903367228806019, 0.0077307443134486675, -0.021352220326662064, 0.0013311437796801329, -0.015636727213859558, -0.004269770346581936, 0.013722576200962067, -0.003845152212306857, -0.0028223618865013123, -0.0034542339853942394, -0.0019849208183586597, -0.016620762646198273, -0.013297958299517632, 0.008512580767273903, -0.03065337799489498, -0.0015341853722929955, 0.022969814017415047, -0.00829016137868166, 0.01579848676919937, -0.012826160527765751, 0.007407225668430328, -0.01447745319455862, -0.022565415129065514, -0.021365700289607048, -0.01755087822675705, -0.004916807170957327, -0.011646665632724762, 0.01985594816505909, 0.024358246475458145, 0.0391726978123188, -0.008815879002213478, 0.021540939807891846, -0.012428502552211285, 0.012441982515156269, 0.006568099372088909, 0.01679600216448307, -0.004859517328441143, -0.012711580842733383, -0.00723872659727931, 0.03434688225388527, 0.008189061656594276, 0.010305412113666534, -0.0024364986456930637, -0.0010716550750657916, 0.014922291040420532, -0.017928317189216614, -0.019316749647259712, -0.021473539993166924, -0.0284696277230978, 0.021972298622131348, 0.00028497431776486337, 0.004057461395859718, 0.0056750536896288395, -0.032028332352638245, -0.019289789721369743, 0.01897975243628025, 0.0105952313169837, 0.0007190704345703125, -0.007629644591361284, 0.021756619215011597, -0.015960246324539185, 0.009530316106975079, -0.012428502552211285, 0.017537398263812065, 1.3707016250918969e-06, -0.021500499919056892, -0.0242234468460083, 0.008991118520498276, 0.00243986863642931, 0.024627845734357834, -0.013830415904521942, 0.010905269533395767, 0.0035351135302335024, -0.005230215843766928, 0.012812680564820766, -0.0010859774192795157, 0.008344081230461597, -0.006167071405798197, -0.0030211908742785454, -0.0031745252199470997, -0.017968757078051567, -0.0023556191008538008, 0.014558332040905952, 0.0033076396211981773, 0.002446608617901802, -0.017941797152161598, 0.022835014387965202, -5.823754327138886e-05, -0.012597001157701015, 0.01010995265096426, 0.0007725689210928977, 0.03259448707103729, 0.02567928098142147, 0.030275939032435417, 0.00991449411958456, 0.005112266167998314, -0.01182190515100956, -0.010743509978055954, 0.003723832778632641, -0.0029723262414336205, 0.023212451487779617, 0.051008082926273346, -0.002589832991361618, 0.006345680449157953, 0.014639211818575859, 0.012745280750095844, -0.01891235262155533, -0.014490933157503605, 0.025086164474487305, 0.0017254319973289967, -0.010244752280414104, 0.00809470284730196, 0.005482964683324099, -0.01937066949903965, 0.015070569701492786, -0.007454405538737774, -0.004050721414387226, 0.016135485842823982, -0.02376512996852398, -0.00884957890957594, 0.015124489553272724, -0.02449304610490799, 0.0057660434395074844, 0.014046095311641693, 0.0004201949341222644, 0.013655176386237144, -0.016512922942638397, -0.004532629158347845, -0.002337084151804447, 0.008768699131906033, 0.01073003001511097, 0.017321718856692314, 0.0026504925917834044, -0.009550536051392555, 0.006682679057121277, -0.026744196191430092, -0.010548051446676254, 0.002079280326142907, 0.027269912883639336, -0.014881851151585579, -0.026973355561494827, 0.000777202658355236, 0.014693131670355797, 0.005890732631087303, 0.013365358114242554, -0.003211595118045807, -0.003181265201419592, 0.0025258034002035856, -0.002244409639388323, 0.01047391165047884, -0.01570412702858448, -0.03043769858777523, -0.008802399039268494, -0.002785291988402605, -0.008613680489361286, 0.006595059297978878, -0.009024818427860737, 0.008674339391291142, -0.02286197431385517, -0.010150392539799213, 0.0049673570320010185, 0.011437727138400078, -0.005499814637005329, 0.0018012566724792123, 0.029116664081811905, 0.026097159832715988, 0.023697730153799057, -0.017092561349272728, 0.001610010047443211, 0.003814822295680642, 0.028658347204327583, -0.01000211387872696, 0.01338557805866003, -0.012212823145091534, -0.03779774531722069, 0.029170583933591843, -0.0010767100611701608, -0.02691943570971489, -0.028604427352547646, 0.011073769070208073, -0.007144366856664419, -0.028685307130217552, -0.011714065447449684, -0.009193317033350468, -0.0034811939112842083, 0.011983664706349373, 0.017469998449087143, 0.01924934983253479, 0.013345138169825077, -0.016998201608657837, 0.018440553918480873, 0.027849551290273666, -0.008768699131906033, -0.003555333474650979, -0.01017061248421669, 0.005075196269899607, -0.011997144669294357, -0.032756246626377106, -0.010446951724588871, -0.014571812003850937, 0.002780237002298236, -0.009968413971364498, -0.0034508639946579933, -0.009847094304859638, 0.0073196059092879295, 0.0034643439576029778, 0.02701379545032978, -0.00269767246209085, -0.027418192476034164, -0.015259289182722569, 0.017631758004426956, 0.005270655732601881, 0.004583178553730249, -0.01934370957314968, -0.006271541118621826, -0.013695616275072098, -0.018480993807315826, -0.036180153489112854, -0.013237298466265202, -0.005695273634046316, 0.04526562988758087, -0.0021214052103459835, -0.005102156195789576, 0.002330344170331955, -0.017416078597307205, -0.022754134610295296, -0.0177665576338768, -0.006072711665183306, -0.0038620021659880877, 0.006615279242396355, -0.006089561618864536, -0.007137626875191927, 0.02106914296746254, -0.005105526186525822, 0.013715836219489574, -0.022632814943790436, 0.016876881942152977, -0.026272397488355637, -0.013803455978631973, -0.0038620021659880877, -0.0019849208183586597, -0.042812280356884, 0.004974097013473511, 0.029170583933591843, -0.01757783815264702, 0.0009747679578140378, 0.029817622154951096, -0.005432414822280407, -0.009152877144515514, -0.005075196269899607, -0.0036362132523208857, 0.02376512996852398, 0.01798223704099655, -0.01852143369615078, -0.003615993307903409, 0.0013033414725214243, 0.0009368556784465909, -0.01664772257208824, 0.016728602349758148, -0.0211904626339674, 0.0052538057789206505, -0.009604455903172493, 0.006018792279064655, -0.020570384338498116, -0.020772583782672882, 0.0015813651261851192, -0.02225537598133087, 0.012994660064578056, -0.00494039710611105, 0.00832386128604412, 0.013028359971940517, -0.004232700448483229, -0.02616455778479576, -0.015137969516217709, 0.014437013305723667, -0.03380768373608589, -0.009435956366360188, 0.008842838928103447, -0.03019505925476551, -0.01211172342300415, -0.0019006711663678288, -0.017443038523197174, 0.005166186019778252, -0.004468599334359169, 0.0030953306704759598, -0.00967185478657484, -0.006271541118621826, 0.00876195915043354, 0.007872283458709717, 0.008606940507888794, -0.0038552621845155954, -0.014302213676273823, -0.0187640730291605, 0.01411349419504404, -0.006200771313160658, -0.012226303108036518, -0.0076363845728337765, 0.006170441396534443, 0.002357304096221924, -0.04070940986275673, 0.0008176424307748675, 0.00711740693077445, 0.022700214758515358, -0.027687791734933853, 0.02825394831597805, 0.22085529565811157, -0.011073769070208073, -0.0009056833223439753, 0.04289316013455391, 0.000397658150177449, 0.004276510328054428, 0.031084734946489334, -0.002760017290711403, -0.001194659504108131, 0.008499100804328918, 0.005230215843766928, -0.0023202341981232166, -0.0011045123683288693, 0.004478709306567907, 0.0032874196767807007, -0.02061082422733307, -0.023050693795084953, -0.013607996515929699, -0.00835082121193409, 0.009105698205530643, 0.008620420470833778, 0.0035452235024422407, -0.002480308525264263, -0.0066624591127038, 0.0560765415430069, 0.018831472843885422, 0.009294416755437851, 0.0051931459456682205, 0.008337341248989105, -0.025531001389026642, -0.005452634766697884, -0.01535364892333746, 0.006581579335033894, 0.016688162460923195, 0.001151692122220993, 0.007703784387558699, -0.0026066829450428486, -0.008836098946630955, 0.0067332289181649685, 0.012388062663376331, 0.010238012298941612, 0.015124489553272724, -0.0008572397637180984, -0.014153934083878994, 0.012765500694513321, 0.02303721383213997, -0.010756989941000938, -0.016418563202023506, -0.031030816957354546, 0.00626817112788558, -0.025423161685466766, -0.013041839934885502, 0.026663316413760185, 0.00938203651458025, 0.0005804797983728349, 0.005331315100193024, 0.008148621767759323, -0.01120182778686285, 0.015016650781035423, -0.004653948359191418, -0.02104218304157257, 0.014059574343264103, -0.019357189536094666, 0.033511124551296234, 0.005071826279163361, -0.0061872913502156734, -0.04310883954167366, 0.005782893393188715, 0.027849551290273666, -0.0006129159010015428, -0.018507953733205795, -0.023509010672569275, -0.0007687776815146208, 0.021446580067276955, -0.016054606065154076, -0.0035991433542221785, 0.017496958374977112, 0.0243986863642931, 0.04092508926987648, 0.026474596932530403, -0.028577467426657677, -0.01728127896785736, 0.016135485842823982, -0.012374582700431347, 0.0006074396660551429, -0.012940740212798119, -0.006473740097135305, -0.0007304440950974822, -0.016593802720308304, -0.005145966075360775, -0.005921062547713518, -0.010251492261886597, -0.02677115611732006, -0.018440553918480873, 0.0029032414313405752, 0.011720805428922176, 0.012441982515156269, 0.028442667797207832, -0.0008947308524511755, -0.01740259863436222, -0.02549056150019169, 0.06793888658285141, 0.0022258746903389692, 0.009921234101057053, -0.03488607704639435, -0.004849407356232405, -0.004131600726395845, -0.0036598029546439648, 0.0383908636868, -0.012637441046535969, -0.002766757272183895, -0.03820214420557022, 0.0025224334094673395, -0.0063524204306304455, -0.004566328600049019, 0.02024686522781849, -0.021999258548021317, 0.0057357135228812695, -0.012051064521074295, 0.005304355174303055, -0.001956275897100568, -0.029898501932621002, -0.030734257772564888, -0.004387719556689262, 0.009483136236667633, -0.04847385361790657, -0.001224989304319024, -0.0034811939112842083, 0.0067635588347911835, -0.03507479652762413, 0.0044820792973041534, -0.007649864535778761, 0.0034205340780317783, 0.008654119446873665, -0.014989690855145454, -0.024169526994228363, -0.00214162515476346, -0.011242267675697803, -0.00017313295393250883, -0.03671934828162193, 0.013149678707122803, -0.021783579140901566, 0.0024213336873799562, 0.0195863489061594, 0.024654805660247803, -0.017780037596821785, 0.004839297384023666, -0.01643204316496849, -0.035452235490083694, -0.028334828093647957, 0.0005948022590018809, -0.029844582080841064, -0.0006647293921560049, -0.013075538910925388, 0.02388644963502884, 0.004579808562994003, -0.002925146371126175, -0.02228233590722084, 0.004172040615230799, -0.013816935941576958, -0.021298300474882126, -0.004498929250985384, 0.00028286807355470955, -0.00956401601433754, -0.012010624632239342, -0.00591095257550478, -0.1752391904592514, 0.030761217698454857, 0.019667228683829308, -0.01937066949903965, 0.018454033881425858, 0.009280936792492867, 0.00202367571182549, 0.008842838928103447, -0.04170692712068558, 0.002621847903355956, 0.03971189633011818, -0.030087219551205635, -0.03531743586063385, 0.004091160837560892, -0.02036818489432335, 5.897472510696389e-05, 0.015744566917419434, 0.007932943291962147, 0.007602684665471315, 0.034697357565164566, 0.01294748019427061, -0.010433471761643887, -0.017955277115106583, -0.0035351135302335024, -0.006625389214605093, -0.0351017564535141, -0.03922661766409874, 7.645651930943131e-05, 0.0014448808506131172, -0.0038552621845155954, -0.007501585409045219, 0.008499100804328918, 0.02048950456082821, 0.01951894909143448, -0.01414045412093401, -0.012994660064578056, 0.0004629516042768955, -0.01560976728796959, -0.006113151554018259, 0.03216313198208809, -0.0031998001504689455, 0.03378072381019592, 0.018184436485171318, 0.007420705631375313, -0.0026403828524053097, 0.013136198744177818, 0.003250350011512637, -0.0007557189674116671, 0.007306125946342945, -0.024937884882092476, 0.016539882868528366, -0.022727174684405327, -0.01450441312044859, -0.013749536126852036, 0.011120948009192944, 0.008216021582484245, 0.00327056972309947, 0.009516836144030094, -0.017901357263326645, 0.010500871576368809, -0.0048729972913861275, -0.02364381030201912, 0.016499442979693413, -0.010548051446676254, -0.012893560342490673, -0.00773748429492116, -0.0023404541425406933, 0.023145053535699844, -0.008896758779883385, 0.004758418072015047, 0.0036598029546439648, -0.03065337799489498, 0.005365015007555485, -0.002921776380389929, 0.025126604363322258, -0.007899243384599686, -0.01997726783156395, 0.034239042550325394, -0.004953877069056034, -0.011255747638642788, 0.004387719556689262, 0.03305280581116676, -0.0018939311848953366, 0.008916978724300861, 0.013075538910925388, -0.007427445612847805, -0.011228787712752819, 0.015380608849227428, -0.014423533342778683, -0.015758046880364418, 0.01100636925548315, -0.0031627302523702383, -0.021379180252552032, -0.0011896045180037618, 0.009725774638354778, 0.011478167027235031, 0.0211904626339674, -0.01573108695447445, 0.016903841868042946, -0.014302213676273823, 0.015407568775117397, 0.01461225189268589, -0.023684250190854073, 0.006709638983011246, 0.02325289137661457, 0.02318549156188965, 0.005654833745211363, 0.02925146371126175, 0.017510438337922096, 0.0038552621845155954, -0.014949250966310501, 0.008182321675121784, 0.019384149461984634, 0.019357189536094666, 0.004788747988641262, 0.02801131084561348, 0.003875482128933072, -0.020961303263902664, -0.011707325465977192, -0.006628759205341339, 0.034966956824064255, -0.004613508470356464, -0.0173351988196373, -0.0061569614335894585, -0.013466457836329937, 0.001577995135448873, -0.09322724491357803, -0.008424961008131504, 0.003358189482241869, 0.013432757928967476, 0.018925832584500313, 0.0008736684685572982, -0.003612623317167163, -0.0027718122582882643, 0.01585240662097931, 0.01064241025596857, -0.009826874360442162, -0.022336255759000778, -0.0032368700485676527, -0.010575011372566223, -0.005991832353174686, -0.01982898823916912, -0.010069513693451881, -0.013756276108324528, 0.003477823920547962, 0.009307896718382835, 0.001196344499476254, -0.021379180252552032, 0.00947639625519514, -0.019168471917510033, 0.005371754989027977, -0.007501585409045219, -0.027660831809043884, -0.00202367571182549, 0.013068798929452896, 0.003403684124350548, -0.007299385964870453, -0.031111694872379303, -0.014800971373915672, -0.0023775240406394005, 0.0031222905963659286, 0.012313922867178917, -0.03262144699692726, -0.024883965030312538, 0.0142348138615489, 0.0028897617012262344, 0.002163529861718416, 0.001998400781303644, 0.025477081537246704, -0.023091133683919907, 0.003046465804800391, -9.16741046239622e-05, -0.010298672132194042, 0.0016723547596484423, 0.010500871576368809, -0.008438440971076488, -0.005482964683324099, 0.000617970887105912, -0.009631414897739887, -0.011754505336284637, -0.004498929250985384, -0.013769756071269512, 0.0006600957131013274, 0.014437013305723667, -0.005887362640351057, 0.005830072797834873, 0.01803615689277649, -0.005240325815975666, -0.0032806796953082085, 0.0069286879152059555, 0.017941797152161598, 0.011835385113954544, -0.03458951786160469, -0.019815508276224136, 0.001964700873941183, -0.024870485067367554, -0.006288390606641769, 0.02473568543791771, -0.014935771003365517, -0.0007986862910911441, -0.018063116818666458, -0.027350792661309242, -0.011363587342202663, 0.013938255608081818, 0.005749193485826254, 0.0034340140409767628, 0.005024646874517202, -0.016256805509328842, -0.004653948359191418, -0.013500157743692398, -0.0008837783825583756, 0.013601256534457207, -0.007110666949301958, -0.01247568242251873, 0.020395144820213318, -0.03361896425485611, -0.00046842783922329545, 0.02801131084561348, 0.018966272473335266, -0.021783579140901566, -0.011093988083302975, 0.015933286398649216, -0.0159467663615942, 0.020961303263902664, -0.006136741489171982, 0.004734828136861324, -0.018844952806830406, 0.014086534269154072, -0.05801765248179436, 0.015501928515732288, -0.023913409560918808, -0.021864458918571472, -0.002763387281447649, 0.0009596030577085912, 0.01922239176928997, 0.004431529436260462, -0.002188804792240262, -5.686848453478888e-06, -0.008114922791719437, 0.007825103588402271, -0.012131943367421627, -0.003044780809432268, -0.024048209190368652, 0.016512922942638397, 0.0234011709690094, -0.012394802644848824, 7.687776815146208e-05, 0.003184635192155838, -0.018440553918480873, -0.0013404112542048097, 0.006510809995234013, 0.009483136236667633, 0.002879651729017496, -0.006082821637392044, 0.007784663699567318, 0.021365700289607048, -0.029548022896051407, -0.013075538910925388, 0.041787806898355484, -0.011046809144318104, -0.0017119520343840122, 0.03022201918065548, -0.0016159075312316418, -0.006709638983011246, 0.013345138169825077, 0.012367842718958855, 0.01163992565125227, 0.032270971685647964, -0.0011314722942188382, -0.03529047593474388, -0.009280936792492867, -0.0407363697886467, -0.013857375830411911, -0.008135142736136913, -0.017227360978722572, 0.007279166020452976, 0.006510809995234013, 0.00022368271311279386, 0.006625389214605093, 0.004610138479620218, -0.0010876624146476388, -0.006948907859623432, 0.03882221877574921, -0.01866971328854561, -0.0025207484140992165, -0.0220127385109663, 0.007090447004884481, -0.031084734946489334, 0.041760846972465515, -0.0008618735009804368, 0.018817992880940437, 0.004664058331400156, -0.014693131670355797, -0.0033177495934069157, -0.002930201357230544, 0.0201929472386837, 0.007804883643984795, -0.03254057094454765, -0.02342813089489937, -0.002923461375758052, -0.005290875677019358, -0.010938969440758228, 0.015838926658034325, 0.0169442817568779, -0.0021500501316040754, -0.00321328011341393, 0.002048950409516692, 0.010682850144803524, 0.011040069162845612, -0.0004273561353329569, -0.01532668899744749, 0.01508404966443777, 0.041059888899326324, -0.0029049264267086983, -0.010932229459285736, 0.00011489540338516235, -0.027391232550144196, -0.0031677852384746075, -0.0224036555737257, 0.014760531485080719, 0.016472483053803444, 0.024209966883063316, 0.014032615348696709, 0.023630330339074135, -0.03272928670048714, -0.014490933157503605, 0.019411109387874603, 0.007703784387558699, 0.00597161240875721, -0.0010472226422280073, 0.0040338714607059956, -0.03685414791107178, -0.0017304869834333658, 0.018480993807315826, -0.03747422620654106, -0.028685307130217552, -0.0031172356102615595, 0.02997938171029091, 0.005452634766697884, 0.014437013305723667, 0.030356818810105324, 0.007548764813691378, -0.018117036670446396, -0.005031386855989695, -0.014275253750383854, -0.026272397488355637, -0.01866971328854561, 0.03019505925476551, 0.016445523127913475, -0.005974982399493456, -0.009523576125502586, -0.012502642348408699, 0.025504041463136673, 0.005321205127984285, 0.017240840941667557, -0.009752734564244747, 0.0011205198243260384, -0.02358989045023918, -0.01400565542280674, -0.0038316722493618727, -4.798278496309649e-06, -0.015124489553272724, -0.003356504486873746, -0.013176638633012772, -0.014315693639218807, 0.013149678707122803, 0.006948907859623432, 0.0540815107524395, 0.01706560142338276, -7.092764280969277e-05, 0.018507953733205795, 0.009328116662800312, -0.006349050439894199, 0.020408624783158302, -0.020637784153223038, -0.02570624090731144, -0.001013522851280868, 0.015758046880364418, -0.0008585035102441907, 0.01888539269566536, -0.008465400896966457, -0.0009233757155016065, -0.022605855017900467, 0.0023758390452712774, 0.03208225220441818, 0.006372640375047922, 0.0003909181978087872, 0.007400485686957836, 0.021379180252552032, 0.010507611557841301, -0.0010952448938041925, -0.027054235339164734, -0.011902784928679466, 0.01786091737449169, -0.007939683273434639, 0.005250435788184404, -0.0004490504215937108, 0.03237881138920784, 0.014207853935658932, -0.015407568775117397, -0.015933286398649216, -0.008451920934021473, -0.007157846819609404, 0.009119178168475628, -0.006463630124926567, 0.008923718705773354, 0.007339825853705406, 0.0258275605738163, -0.005941282492130995, -0.018602313473820686, -0.02155441977083683, -0.056130461394786835, -0.00016586641140747815, -0.007879023440182209, 0.010372811928391457, -2.594624675111845e-05 diff --git a/tests/data/lancedb/requirements.lance/_latest.manifest b/tests/data/lancedb/requirements.lance/_latest.manifest new file mode 100644 index 0000000000000000000000000000000000000000..38d9af183730dc0462592fa654d3509d55b5b5b1 GIT binary patch literal 237 zcmcb~z`(#IE5uTgT2Z3#9|jn;*osSvGV{_67%iB%OoiCWQj<&aizFD4l}M#!R-~rH z7iU(b#^+=fmsq9c86+(CMTO28e1k?8tLUE<|U^JF)=VIFiS95@o;>3(!Y*PV8gE$N12ke40TNtQw@MR zQ*~1kQ;l^^%u_9NEsatwb-;xPUdI6hxz`qgjqzTrtD&@+`Wk2OokIzSVzkYo77drp| literal 0 HcmV?d00001 diff --git a/tests/data/lancedb/requirements.lance/_versions/1.manifest b/tests/data/lancedb/requirements.lance/_versions/1.manifest new file mode 100644 index 0000000000000000000000000000000000000000..f83222174dd39818d6ee8bcf0aba4cfc33fa64fd GIT binary patch literal 183 zcmZ3?z`(#IE5uTgT2Z3#9|jn;*osSvGV{_67%iB%OoiCWQj<&aizFD4l}M#!R-~rH z7iU(b#^+=fmsq9cTFDMraAW~OGA zx+#X{#=0g+#%a1qrWTgEX%=Y)2B|42$;Re}dLIMnG3W L0V9Ktqn|SXW*86+(CMTO28e1k?8tLUE<|U^JF)=VIFiS95@o;>3(!Y*PV8gE$N12ke40TNtQw@MR zQ*~1kQ;l^^%u_9NEsatwbuvLEs90|o~F-;<|>2l@Q-WIyjPt1$1G zbalE!%|)%e#XHTS_x7eWg!qPqhXjRMg@uIsTKP|-OZ)!!JbDlB|5;yqt6+LCWR8ER zuhlf~fKXrik-k2DLH<780d$qw;r=1MfwbXJ1A~#`8uLQKd;_gQ{er^-rdmy*3jDis zTDFgGXsDHwqoX6et9MYScg<31L%tzap}s!BK~wFm#3g)!r}|n2hKGiUm(L0D4-2Eh zP>qI%Pnl=0@$bKof4{!|_p6`gKi^zz^OChX(jV3sX~CJ~d+gD^*;w7sg}Xjlhk4x& za97PPAl)lrsV{c(;@LYHa2d?jbmT2trZX+qv#@!768z~F%l-Gaf}>?6N@VS?ybY|y z9SIJn6b0~T53Io5{DHy=zb+D_6Xs5 zmW$CT(H1%mtRs(H>x`Xi-Npo2LoUBO5B4k%p*1}L+r_!i^+pPwZ-0t=o!A4jPL{!# zw-wT$%}cQRArpw~KY;#MDDl6uf$FK0r!tKA)ll94dmW6P<%aVjPV?!9mMP<_TCt{I z`$NzDT=Gm@&2$SI!;Rldv1j!v_G@w=3~L$yM|@7f*#OxliDGhfGZUDo_9Of+__huumUS#9T_27lZ6ij|M7@x&<;CCB61KqXWuwG9CN;-T| z8cuk~-G{uEY?iIyv-H+UeolX-!jx?==E6Q^IY?4W)+NF8Lt0pJaXnsnG+Vm5WwbKU zW+S9l)JCVET5`QMJLtLX@Lkgs(E9d`CtkbEsn0ODpOX|D`55a@&0y+_HQ1xmUf!%j z2KZgs%8ZWA#oF298J#O0)ky1Wo zVo_%nJEa#i9%ssmXWJe=6Vi=+uJc>cF7!fL6VQ7>)UzLK?zcjyw6zvz4Ce<FlVTzl+NRxx4=M%kW+w{y?4nK`FH^#9=IJJEOjJ_ylrQ|>mJrX0I(1kE~_ z<9eM}5MvXIQMDZSZOLVG&LmG{8s_29yWK3bj=t6JKm550x#n|lp zZZNNm;q*T8+Er`ODZ4kExZMdC4SmOG#-Ze05svHs0G~}aREs@6z@vTDT(6-%Ob^e) z8Sfr*svW1gz>|ao-lI_+sMHNYnrU!4^8_3Rj{wp8E>m@x*~RzpT6w`w{HVmG*``1> zl*N1~f&J0p!coZS@q_nn_nyr!G?&*+m!x0TdF)>G3vNH_Ldo0oWn6u}2v3@RV14yJ zLH?%gXuN45OR9`Oy~_nGy6-ZqT6U4vU;ro0moqvOp4GXjY!?xRZj;;a+j9=#hhd+U z#%@p9S;y^|p5>^fCUgPwY8wddw;!qZ`JUw;SQnd)aOc`CJab?T()yrU( z{>lEk+vwam?EdW4STVv%t!`MP{F?lR(LULw{+sZr?{~?y+b28@Z$UF7gJpQ9@~gt8PpBg8i_3#Y0(zb;~FJ8zOZG$OzV;kvM#7W0a4cJD3FG1q;^}V*Vs8+3q#3?V3oL2T>LEMZKo>@a ztz)x&K7w`h2&k28itF@G!?*r%%3rmqME#5dy?gKtEqbEv+K1wCWs^De;)uC8)Vv&8 zV*t?HLA$iwaH+o~4q7^yb$q=Z6DDkC4~L}S&bOBG*`e+@JL@FwtXC@D7biW^K*?e* zSng{t*P7*vWi8r+&#jX*yT-E1)EGXXYynvLH=ur(@WJ^&>^9mKaL-|Aw6U{1uK#3+ z$ZVmStgFumdwJ(7j&IYofE)Z^V_$2@U5h5mfrD0I`Q`5*aLFnz-_6hX0X%NE9)3)% z;CWUJ<%l1Dl&p}e;>_XYQhk7*MkR#(m@>czOJm)Ua1-5!1V}@0bj>`>xOkl1x)lw> zzMX}(88NWEfjMiXkqH88#B~V^8J!8Rjhiq=dpNpGnFU9B?Z&JTUDVy#+OolGd-dYW zec%$c9jFJf;!}>`B>2(09qj+zL@f*r!k`zWu)bF*Yt(BR;)j7QuzE z&(@W*jh3o;^>+5Sorx@Zpls3%AY8_^D;MK{#}2gTO1QA11jRn7SAlQ|pZ8qE;noC2 zoMF2!X0S;@(HE@KjJ;r_c^1dyudLzUnXC3FLT89Il3oBhHx_;D59`|Gat*e*I}BE? z9sZ1qlgf~|hjc?rpxe^OwTE~My)yW|^C^t@qQg%&-3x?C3TY5_L(Ku-q5TJ(UE9-vux&7bW!izzdk0%0N_vUt5h z+y+5yjdA3Gkz(GJ221WrCGT8R;%Ygv_8bh{Gn3!2u!0keK#Qx{K4 zJNejb2N1n0_BqTn62{r}07Kn9%96pm;Lhihq;*~}!e;hD;}JY<&0*o%=IZXfK`d^Q zr25aUgmbxvN=`SmBit|myNFMm_My55Z-LSsk2swh&p5IiZhNl7Hqa3LIyJ%zdp(s$ zXIp{)8aLC zbP7AHJ;SSfocOMQeCoqPbe8kQc_Z%WCe8#%Lm^=~`!uwJq&dDH7yL?dSWMagk9Uni zRsR4Ip9meqN$1wgQy2G3nD})$f3jx}_Z%#PLvj@4tL}1i%n_Jd?8AqT>8AS5`O1k~ z;M#+>gjK}w+buw99{+cf3#_1dUHhj$C_t)@pe5O^!}Bv>YV1H)&vIe&FaA`V5;9XP|m1-y0| zu-+YFNaHOdnEv}Z ze4;z&F;6mRU%361*vQnqfzt!_Uz`~e{6zW=Zk1ewU)rltXe#m^C^WX;l@_EIuCw=Hv&Gy?26ll+>tusA|K-z~ zsO)H&8)@%lLL0z>kXJxHN9lEb4YpNm81)OP&wm4HcSh&Sd(U*o=)PJ=niw}14@T** z8K%By1Jg$SWG^?jra80~IzjFCysrBFnJdy<2ppB0R}2)G#a7P-EODp@L$M|jBDv8K-7{8~#BVn!o3n9oW7;bZ^Nm{j=%bi>Nr1g4pV zu0_(BoH!I4OfH4GwW{&<9A_->%qB0kvj!u4iz+aAe-eJYlmP+l#=(GSm6I+)yD_7> z4l~shS{KyQ_khlr4=>zbqa}sMp!#5gC0=N=)sB%SB3}DVcvc_Q1l|{%i|g{Gu_<5WXN`WUoK zJ3x_f0g~^NpXyu4Hp}Xers{*^t9B?cC)O&Bf9@3CM5fs#UNL~bcM2iLdORFyRhu9C zlg9`Lkj@SSmdr3(kK@Z@!EVe|e4SB2T(k?~EzY9wAJoe-^`yM6Uxv`glDW}wB)upl0}I;tp(R*;3S0I%sl}e+n7|r@nkv$n(G^^GuvRdNrNp5B9bEA&Z+h zozrvqqzA8v>#9Tzi6b5X`A_bUzm1U(kO;du`FSq(NW37+4lS20k4}s3hBf)snFJ5QhR?$Q1h{iTJ)$<1H@IghR zWA#97eF<$R)rTROE}U+op>A7+YR2z6Z(y~O)5&{f)adSMUz_2PUTxGOrx9RsWiXKD z$H18#fV{KNLhSFIE|~W83=-Dkmw^`<@v`uLyw!cb8r&l9g60B0mORx8q}s<>II@tqK_@+D?Q8b zbeH}>7@?A<;V&nRgi)717C-GXP8OL*a^4Tp8C!5&>*vy|yS`XiOB;z>QOr2$bjl!l zO2nt?<(QOW{rdY#)=gK0eqn?oKp4Wv^SKHxAze;+%`6Dqw^Z30(NXTX#sd60rQm?F zpN!@XNH0LM^KDehM%XO9Cj9lneXynJ5geU#1D3q)%)2%j!N?r2!|AI6&FqC_!56O%zx_8Vss@_Jcfco8TNE-+(k9lB0bABhQ4L+tioaroZPt z8$MFKb41>N#Q8`W2J8CuAW+VLC+MXf6{s!irn@Y+;xNQZ56eBanOQp-kv0WM-}a{R`KH&c@`6xh2E}l$9;3 z$$Kcj!QC%=khh%=#QlsshWxzcJtcv?;-uf1&}{qJD7oJ9wBodr**thx7U4k`kq>~| z?~uG?vDc5xO^KcQG(Nrwo&{rxli)?Z)vI!>I6J9E!)$}>ck z#Rh6!z$bg!!n}xnCAy=ska7=L!5c84Jqf=#`JxiR0hEb>(YCR~zd>$!6E9JQVh-eS z@p!YLjC?6~?G}bU8$u_eC08XB+G0vYgXQ5RV_`q&0xJ3q^Kv(lmtCt-8bpM9c8`^<7?=VvXdR zsV|b4STErrWha^ZUDHNTcW*;!*tbCn%v{x$^kxzF448mgKkBK3Gr~te?6x$TpD3if3iCJFP`;*18ge2CjV&}zdf8CQ z5MLnq8QN=Y_4vXb@HXuYkOu(0%bj6k*K~aCRTqdKik8J^vI%`n_L9#Aeob-#orhjQVM<#2&%q}x`))hX-2TMZ`c{R^)bw!=-g%W>|_ zrUDO8yw>6E1BrAj(rwV`qT?nqng@C3iGQqRam`nEpW?n+>2P!0J?!|}gvD-aiu630 za7pMY=-=uL4%S!>_fl5!_F?V$_fzRW_|B5^=7Z_S*PJvVX_TK_l)bmg_br`bds3vPJgDi;~s z!myb%iJ+Erco3yGMc_y(Rg?x#~IcoSoWaE}A>j7zOu}=1(<2BJ+GG%Pij-g{2ohzer zLb^7%JDd}|FS0V0H*pvSwOs?9$~W@5we?`@s6Qy`Og;}L#9YJ1W+y@5+N3DM8l0qz z5@`N~w&uaH@7Po`Gl_C)AdQV86Bsw65nj>W33JRlk%r5}aejXl;c>|eQH|$vq3f<3 z8!i(MOJYt5L&?`hqw|SwxV}|$l`;cPnK}^8Gs+3@!Jy`HUXThKX67o3*Jwe3*-ktZ z^ijF`xuH7DxjqIKd&8>3^?4+Afw7Yh!-GMaA^Vs!&MyANA6myMk0LHZYNN62bf_V$ z_IuCY%u7Z`-BsxC5(>j6PJ#25`!U<3KYG`72jf3K@Nne( z zZZDv{H^;^!gIVX>N!T$rNHJ~~$qrjs%3{9<(}&}lMx`)#QX*^cw>#GTbDa6DGL}s> zH(~0l7<6;^CJmeT2;3azVxr$DY}EEMJJ4Gf51RaC-@XS!EA4gM@NX;lXww;YA2WmH z7{Km^kHN1V_3@ZR3F}!4Ffk>SWxUperN%~hsP`5Op6US6RSo0?Nk4d~VKsahG8juv zu0l7x9sFo@DGy6J&fWFCKu)>^^oy?p%h#skG(%l^=9EAzHT#MtdmrQb3%Wpi!5dr5 z<*v6ULs^J1BzGOdFJ{hVI+q)%TaSE2z1_}Wuakr6jly7EcT?!$Gzy&ZI%9Rq%WPPJ zHN5#chq=ck0gxYTKBfcSvFfGJxwCNJc9`RSjBB<@1Z%BBa4swXUTo_R)pMtT?uczX z&R+w!rfugf$Mh?vvx7jNAy~T6NWSse9?C6(vBh--oE+oWp0X>fdize?aCAAR9>FQD z+H{6#9A~dX>J8;`MLJ8H*-&+i3InPYI^XIqZC|g;T%2^|*=`=(E~=F8Nsr`f{4e3% zikC3IK_m5fOD(zgy(nnip#lO88K3N8eRyBmXSdB|QA}_69qJQ3`BdK_U=q0+^!Ka- z>O1ZpS6_ZNq$A9dnm~w>g^_Xs^6EIL>ApP}SCtL)m{RAjK~1eD)H#Ji4<> zueQd>X&_;@CRvS$l_-pqN{$h3`)C#(!_)`o;>UddiOnYp5E-ck;S+ zF}y6p7dO6Q^7mHx;8DQb)|Ng|92{E1XoEAX>w!e}ao#Ctd`%xa>9%E0whqRdXBy+I zr^}G$2X1dEEu-7ijMJ(hROL$>SS%bVxKGG49E za@Su2`ox)wt-cNMr47-lLkc&%>11IpV7Z3g}A6cKfGF}dkg_7PJ zu2?=t!X4H&T1Pc*r->^|?a|hLHHyB?4x9i1iO;Ez-YNCkcEe4tE#S;)kZyfRLa#18 zl*teK!I{vuDBs!1e(a3Gm*?YwW~YSe#LkBvmr`dO5NpD&w;wyB;k z>Bj~x&cFABmwZ&61~quM->eR{x&EI2y;zA^WtObU8aQDCEMonz_LbFGeEd1y?Eedv zJ5V?Iv;9|t->NlCa%v7YHecnHr$Z#|T~`?46^Q%jI*`(MvvPH_1hg+! z96S=xLK|a~KcJldjM;Q>!1sd#q(!U8W98`&pf`L1E@G!Koa?CeF{G&rW@GM~7hpEp zNlson1NQ_TC7f#l30+&rgq`r@Ts=r@q9GGM)STh{h+>BC%lHxV8?cyPbu+!{rJgyB zu&(qk>$SW;3Y_%nauiOSj)m1JjxxBfY)F=y-6aA3pO75a( z0K44l$;2u0rypi8CutHk(bUFwStod#+-694fzr4SjQX9M?_JDjuJEb#RyguONB(kw z!^U;HVXVP3Mwo?p?~K4>(r&`4(~zHDBzO^j*u3OV28QA4hIcVzh94L$>7*KtECJY` z4cnUpz}xM0WY7CEk>*_Kcil$lfBFb{*qI6VFiPMo3%x5B`w3w)dQ*He7NZZVV{H z{m1u90}6E644+QQxGuKn=jsMu&wsALwxCs~k!lWm%0|NCHOqLo(Rv83zY*q4TPeE~`$8f7f%|8Zar3KdK>9^sFI?;I&GwtE;8PNtsKlYrKDxQUKA;}w z;(Q~fK1G}BSFy*P9#D3w6%wvt)!9c%M&nVSHKLY!{KIA*@2B7bF2U($&Vu8aRd#oQ zW9T?;D+tW#W!690~lp`znn!|QLd?|Pw>i@Y9NBf!(wnd`uA44%K>fMSNko9V| z&^_w+R*v}Fqc@8Am}PjarpJE#TnDE^S3%^oyUOmMT3{L3hj3X#S=AL8;TVXS7qxhu zWruI(HBf2qaJJdSl2$p0b~a z99DZMiZkA0Vvluqzrxio<4g9wZ6t6HNLR3dog0!?_{6D>IL7$|xW)|Qbe_aJt)X|V zF*J9d*vV7fai*IYueEricn($$s0*G6p44NVcy_Pa@}PKId3MqsB+Y7!GMjY4onSROqb@KS!gsB=68tLpCOW|o{U8t;Zd{k0NSX|3kAl--Pr9aBTAhXNcdcPdz+OiD zC$K{7iJlk9C%Dw2GknR6mo^f;CZTe)~Fv#vbRn?mCUyiz6jE zcS)Ibh>7015VDwRdY6&L7Ijew1M#HANN~~Z#W!?&3xew=7Nwx}@E&~oQBPcuG#?+G z&E_t#X;g!FOn+sCq}73HC|isnUv#qyKYVwq(XDkBRWq6q)%5CJ)<)Zjx6B@ogl8yt z_`IbPjOnsM;5m$npCi@3x)M^3)R9-My?|P~&19Mrd^R`(j(=#5FS6>_cmmSw%pqC= zokbi8V|ei6@%VG-6Q18|JBT$;tlNf>x4}Ap6ZoXUAt3w(ou#_VJ&{{!ULhQq0)#)H z&hk*KBfZ(lj~Wmct;@ULv$4_jR?zShXX&6^{61>p7fZXr*dhdcy}7Y>^26 zIq4Cl(|9d4tLXvkzT6&u|8!Cat5~2R7hI%1ZE>4$FNx~IBpef>VV)w(6C@1$$C3ylNfI>hs?@4k7WcaZ~<-eQlpHI%P~w_s;1oA8CO zO4J6Lj`Bddp5pA-5=l#t{uZP`6?-ju~^xjCEGp(*VD1IjhoqqXafFiJ4 z;QEiBO;G38bnHA1;Ljx!fnogh+22533iU^7*VLP|slaT^Or63-O~^Zo;f2hzbqh{qnbQyP@S&T7e?`_g?OMo3L@2Urb z?}p;l0lScJRPEt(6_fl6@xTTX5dLh=(_-Sx#n``7M|r^VzdWFRBqqsUL=Vb@M-a27 z7;}#|0XiEtk9Ck|_#8ySDE4uFJ()Ni#Xh3cG->RiO1Eaq>XJ^JUQF00^co*9F&jtS z_dxwWZBbx;ojp#ngTpR3-uED*{v_>YEdRN*zDEC3KeF`h1$=e4WtsrQ(;?|MZ6 zH@I9uzmI3a$E?^S<`BtS)68j!8nMOG%B0%^vyglsC%h2aM;2QBT&5SsO#6nd$0jJ` z$I)=48xZEhh0W1G{KjdY)uGNseHhb$nRUeQnjUkTqzNs|7)B z&U|HQE;!yiS4^|RNPn{xRegbU4U&%IbX{qWNiOkl9re!US-jCGXY8flOg(%m0Xz#$ zL?(d5L13`hg%hXXpP@@QWf5p}r4Q0`fxH+V-M9}u6K?PWrYA_ZMWWESq$4=Z5z;(E z*pur(*bS>+)>eQ1@Idk)rEPLEsRphkLLXA^vL!|@*xv=+{@J{nA}wWK~xWUX&>kx4r-*V}eA zST1~8hdlD!-7X`|8zY?ME7lf3pEx^}atOlIBt__F^7XK~p%)XHl`aKE zC{OR08qfIDx&?cGz8l{AJQaTIP-(W-gRRy@(snmE`FK7j={{u;hv38a60s)~-aV^q z6L!gWRLOfHc`tdwij{D%qcNZM){`9fP1^=YxJwM-fn+IaS2R|mAll$3!$*$5tdw+|D$ zE#n`u9ta$g1us#r$)rn_dtdCRZ|`#L-yIlXqfEWxMrX?X9=C#*a~+{LBL&`{KdjbW zvR=|$w1wrobtt}6?urY+P=5S%Ct-ddkiJKORm5rB$3?}A##hlcJ3<;$P+g-XU!JdB zlXC@3O~KWEvr6a;YI2-Kn8C#QUkom!d??R#iIKnPQ;E2i4Z7#XNEh;%ZX3|UAQi~N zkyo50@(__3(cVY%0R=HY8j!DFmq@+-SRz~l%0>7CtNS&X4S8VcR>cMKqXp3YZY_!S zp%9J$c@U($N2Q!lE=qq>qOq%llRgCMdq#N3bKd3=hE;Qs?NM%pQ@pQ{2HOS#Df$yAj613B%e4O$T@^`#^Qyk?q132kSzOnlSR%F(rCdcdOP{#FJFLERA zgO4Icpup3!n_3dCXv2rB$?$#tF)s9O&jW4P)lY-vr)N*0@Z39_o|7iKY$NTSt5|A$ zWM9vR1LvA4zq8Wu9#>GM>-93eAI9LFjLtRy|KMLBf*EP2w?TpoW}XgjPuLT3Hrdh_jX zOl0evt=Q|%1iWUoic@xo8pCa|{G+)n{3hvD=393n{9SOEy*lSk9zg>0_9nP3u#W1z z;+;g^hVZZiUw*w)LLLuowLCcS38Pt&UVPgI)WdShi+eRQLv;u8bx60N{9^{+opwuj zF;2RZyzvISw`CYy3F{6b3mI`}BajBbPSIBZ^p3F1quY>tFK%BSBXkpSo~uOqA86J& zd3@DLrynPc%BN0ok=jRZ=CnSxx?85m_MqE@&T{S>e^@j<4#=aFke(H}fNpdgvU^8!;{6yD9_Qze zi!K9}FA;e#qwGlJLhOp=bn??b*o`e2T)gJ#*}kOH=x4@y-Qmmp-ISfxld0!w9vgi3 zBYQbFAL9I%;#=zwrZr+YlFw8KgM=PoBGcJf=BB(Ga)5R3m;t8Q35w7Mq(|jv-S|D=E0kutq2LaN0Nb4d09wG5Q7L4=@?tPnrYr7TG+@)Z+k%>(D4_oB` z+xEJM>s(HOz8x>s=y}pXZ0w=oltE1<9TF?@9xUrq$rmPXr95|q+lWIJD(Nt!Y)#}c ziom(HS~EFeu1p>pPBb&aF%R}bMvqh!SvzsBO8rK-I0;D0vz+ve*v?{|28mY zZ)F#(7vP4gY~x^#$8+2e{RkJ=ujR_yo2Ym0u7gp@wIS~1R9IkWqnfKtq5S+$Mz`6L zdm(J9f0|;^t1m9|B<)fj&*raI@loq^cH&Qa^{LYdZc|waf7b3pi(dwK!Js)FcTEtt zx%<~Tsy1c0mT?!c!K-B@<2SB%<8QXZrNj)rr%?fKGt5HX|L_2G7?TQ3++4A!^J1iR z!T_TmSa*6B_iCPsm1~?NU0*NhqmIh&KFY=&XBzRG+itk^L${J|b0-^fy~pk>q{j}JTkj~OIHlpj z)Df_$=VpFj*C^gPr6-K@+5vgjW8p`mDQ+VhX0chNZQ$2XYxcZ@vE1&832xT!iKhlG zLC*|lJYD`2d;IK*(koNc9G!u|CC2b)W0;$_{dLyA&J%WEQaXRFH3zoDwt>OrR-8L~ ziccECV*S^-m11dk-9LHNXK&BjKnE{Vac21<-Z) z`Xz&4*F`%B_-YMb8!o{cPk*tNl)3zc6+c`SQlA1r*nPon0q$89QJyuklH1Oo4QZMMxD3u zs)8Okb$0^QV?LbfS;(s9Pi6KK+M#>#dKh$W1nub;w!fK+&4+{n^$%Vhxs@9$Z?V^6 zhST^GY%0JT^~;3RuO&*0OYgX+sRe7|RxG7QM1lQ;eW1791EZhJ zWSVy?Ah>%O_jOQ|$TiyPrL%@~9ew$ArxmdA@HRZAv{cVu`9$mO51Ge{!M<5B?6aJV z^DaL|`SS_M$udKnV-XZqvQ`aF!jxpX>y!@%!<+p0?U?#{h0Pdnk@g(^7Amw}zflM}_bOUX)zNoykQkcDf(* zA8;MQ=x5F+|=wDTROv|)~0HovR?4^@I|Eeh1~TGS@DeB82l{;x-atNzpq`w z^)9O=!`T-gp;ar)lxwR3Poi&IGdfE?$!C+)b6Jq+efB*3JQ~cr#*^pH;-cq=k1kid zpP7kkDC?J;MT3@Ra?FEbFtTq8=4u-BDLamnAL@wS$7i#OVRiKpw#|JR6LS}3Sza^8 zPr_#M;saB0^t#P(GUg5>zV}izN1J2itw1yi+{kPOw#5F&cCl&vFCW`_0n};Z$ZJ2b z#Z7g?;rFUkHvEDq|7y@h`m^y95T-NQ6HMKInGrXp^v@bTkB*<{Zz9bU5LPn(FNbN? z8VHPly=%e!gQc|QlDOgvQHYSn%4-9bZj8$=l)`D*|uuaB1^(!JtnY#&Vc=` zw1k7tPP0S)z2)E%`W^K2dh$=xZ%~l64PMMP7d3)5<9wM>poK)(3VAz2kZJ^>hwGr` z-2u3|x;t8&iGZ2(H*4xw57<2<8T$3^0e$t(Q!Uc5j?+9eY`Yc?ZdysaR3?e}GMJJ` z_4>?9s#byFzHHFC5()X4ZQ<6q4ea%TQ*2lLO1Yg4Ag=-r6>rEgYPttZ^Qo{vZQzEu?q^O=JCopTJp8AE^xTOj+ynFk70c{c6_3;1*0cpXn?1+{0SLACwlRUWTBu@mPMMBNAW3y~%48`Ezd+cvG(+ z9Q=v~sY$Qx;P<&W-mYO?+3J`Pt?3J2_4y-yL=BZV8y8$~M8Ze*VNpHQx}u4P**JIw zSE$!5+-QE(+cz{=X_w=SI13AHHxm!Equ-f41IIv?8hknf)~os>eP#7JoP@;}8>(%_ zJw@B=er^%vMXbno4dJdm81LUf^S>R3Bpu_#Dd1`Ci$nAqVcx_paO+_?E?k)av^V^? z;C+TBqTEIHn|qV>(wzylcXWcLUm8eL{hN}uOJOueuD5*0OGX$xZWJZ1lS3s=z~wUZ)uGkd$MVBQ$IY?F&hViQ39$ z{eCF)J;eDq)1D3FL2Dz}JmWEhXZiT*_X9Y^^sIo^%d0cZQ=5jIrdI4{cNmmCt>tC)BXu9+QI#x%PC z`yY11mLmg{tbXfRcH=aB`{x)CS8)(9*8b1Cm`fmA1?I z99YY#Wc8kC)Z8>iqTZ_E@~;Mk+<9jX2#$I9I0INS`B2_93WlC9gx`&OWBK1C zEUCVZL+jb0>$^d!?maPAhAQcBInm(~BTWNzX5_=B0O<~O=7+kZqr#ASobd5|7Hu8%zq(%*_NTS+*X z$m{NGqRziDPcdI?qfpN<(n*Z;mgMUoA$c;mw|gl0KvVn>drjap@b>HRQFRc%ZKV=E zpWyF%p2JT^*Kv*X9_akmABd;OM=WHb_A92EsPugDk}~Zr9-rm4!&jfu+(_3;89mUo zT7OoJRvL8fGVV34;MDKMLSvKXLF#EFKLFo{9OjojN5X0Sv0d9X?FK=tJ(6#b-z>e$ z9#ysC_czBvBYCFK4{VrmcbM4W6DKcBdT9d=h_B!RgUHXK?!68wX?j)Y`Rfm+F~R}3 ze949p9u}#^UTE39kt}$f_C^|J3lnGXyvumduevFPotwd;ybyY`#!ByA_c(EndSl`_ zh;5!tTs4LC!4P(Dazmjw^ zLRS#)NynG=frVF2!s9oLlkNu6fGB+MsvH-H_dA1D(V1{{qgM@oM0tTqy1)I%S;?R@SD9WHk+5jc(E+KoBQIrQni4`xc`KxZuY7KCRNx^MhO6X6S3tv%CV zvR?t8v~Qg7F<863576s4X>?p3@CQ8xHN(`!{%)hkw1*yehP+A#iL@cwCS~L71=zF&@^zaH4KA^tK2S!}RXg=8Cg1uby z;O=I>k@$&`j^)O z%r$(H%|49Vwwn#Ww-$@C>|e77fy;W2kD-#gA8ni5;ne3m(zFwN zI8iS8fm`>fCkLu0fVfAjTNV2xKaOcpe__(gbDTK7Mz3s1nhoFAEx;vL+v1&PlR&%0 z6}tVCi8Z7YpHwdNHzvM42&C13_=*w6k{;KQ3D=oS0xzuw9N`|9 z2O%CjC3sS${!{#Oqe!1U1Ca-ip5iN}uIEB`MtTP`kCd^XVKp1cf3TdE5sWey;=k9p zE_@3YJyLn=4IjH@px{;Fpbn%-?y&fW<|y(f>RnE{m-^`ioH*_2)`N4G*%3n0zvaPi+UsJ35@i9$3+f6K7(m*umPF@dEAbN zIH1QWHhY0qiDuI_bbc1x_1#&xV6Yk9|1g5DQL!+fhoMZ~9Qw@HS3L%03e5?Dzl!N^ z-xW^2L;jI?0GesGK+ed%VTyc=3 zPS+S^T0oeOCMSN=&(S;_xq}Muy_*iwwLoaar!V3Je?Z^T1|a9eLqYaT z9{uDMlD=lUhcrNutF>#m5?*(zqo#g!gGW_6q}h-JrtVM3V`!=#D^>&Xw?rO*5zi~_ z#`ja#e#k=7?{e~&Yn*%{9$VlDCRo6P_W5yk2Pb~R&|3XOUjyMS&HgjI>h%(@q@7}W zFC`LRenpy5$m(YT<*|psCSe_Ej9Q#D1n(2~ob~QC5f9l77rhU}#fRsXszs&xl>vFL4oX=z5iRo-QecyNmJC z)$2%Dq;~&18v=WUgT}BJ$^m|Xz!>UZnX(OB71Rqd`wS<~>`FSXHriAU$B<*Cg5R*d zUo4}w$)wp7`W|*)?arxA7^wEfiBDYsopMQ|Eob*ySmDH-y)bX*2hx?oYN%mlDlYk%I^g@uk;V#mUlKp_vg{(2dSnCY>fVrO!MN`KHkIn-a$3 zm%$TI-G;<9TzEX13)IV4&P7H!Bg~dRczuH}Ht}GT9l6nL zij0bI_6$&F24g1l1kxe&yHo>tM*O!L4|VrdIM6(@$8R{JxnXo2Xftdh`W2n!q&0xJ zMCzsHLI;y=jAj7N)JD{@=qh*}$?vKn=ORqOml69V^6)iW{fza-{$=kVi<@J?i& zqQ3>sh%>~#r*7d)r=>vW!pOtQB0r@pNaQIJaTnzI_GoYqBIXd(UVeY4?6z^S`gV|9zi-x1;gj`~G+Pzt{bDJBYq9XlASv<~ujc>hFL5H0|{M`7kI) zO;;@gXJfqxU!Snx5EIS+eTvyM|GB~x}t;HIx=_S+reFLUW`akde=Zz+X&6`#8j@FCl-hbcye;({;-%&g^qrQP|fOn9Oud$w^y_>yL$0c-B`m0kljkPqi zP2$#S+RZj-Vyx--zm*lm5BCW(XliUA7Dgot4+^7t>iP$Tb?NM?X<%wxJJ3I9QiOLv sxUY%U|L?HGL*8@$eMtL%A2Qa_(A3g4xtOA str: + return self.answer + + +def test_rag_tool(): + adapter = MockAdapter(answer="42") + rag_tool = RagTool(adapter=adapter) + + assert rag_tool.name == "Knowledge base" + assert ( + rag_tool.description == "A knowledge base that can be used to answer questions." + ) + assert ( + rag_tool.run("What is the answer to life, the universe and everything?") == "42" + ) From db5d371769059403b3fd696566bcb9c30978c179 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 15 Feb 2024 14:02:42 -0300 Subject: [PATCH 003/391] Quick tools reorganization --- README.md | 109 +++++++++++------- src/crewai_tools/__init__.py | 2 +- .../adapters/embedchain_adapter.py | 2 +- src/crewai_tools/adapters/lancedb_adapter.py | 2 +- src/crewai_tools/{ => tools}/base_tool.py | 0 src/crewai_tools/tools/rag/__init__.py | 0 src/crewai_tools/{ => tools/rag}/rag_tool.py | 2 +- tests/{ => tools/rag}/rag_tool_test.py | 2 +- 8 files changed, 75 insertions(+), 44 deletions(-) rename src/crewai_tools/{ => tools}/base_tool.py (100%) create mode 100644 src/crewai_tools/tools/rag/__init__.py rename src/crewai_tools/{ => tools/rag}/rag_tool.py (97%) rename tests/{ => tools/rag}/rag_tool_test.py (88%) diff --git a/README.md b/README.md index 9ca0d36f2..818b75bbf 100644 --- a/README.md +++ b/README.md @@ -1,101 +1,132 @@ -## Getting started +
-When setting up agents you can provide tools for them to use. Here you will find ready-to-use tools as well as simple helpers for you to create your own tools. +![Logo of crewAI, two people rowing on a boat](./assets/crewai_logo.png) -In order to create a new tool, you have to pick one of the available strategies. +
+ +# **crewAI Tools** +This document provides a comprehensive guide for setting up sophisticated tools for [crewAI](https://github.com/joaomdmoura/crewai) agents, facilitating the creation of bespoke tooling to empower your AI solutions. + +In the realm of CrewAI agents, tools are pivotal for enhancing functionality. This guide outlines the steps to equip your agents with an arsenal of ready-to-use tools and the methodology to craft your own. + +
+ +

+ +[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/joaomdmoura/crewai-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb) + +

+ +
+ +## Table of contents + +- [Creating Your Tools](#creating-your-tools) + - [Subclassing `BaseTool`](#subclassing-basetool) + - [Functional Tool Creation](#functional-tool-creation) + - [Utilizing the `tool` Decorator](#utilizing-the-tool-decorator) +- [Contribution Guidelines](#contribution-guidelines) +- [Development Setup](#development-setup) + +## Creating Your Tools + +Tools are always expect to return strings, as they are meant to be used by the agents to generate responses. + +There are three ways to create tools for crewAI agents: +- [Subclassing `BaseTool`](#subclassing-basetool) +- [Creating a tool from a function or lambda](#functional-tool-creation) +- [Using the `tool` decorator](#utilizing-the-tool-decorator) ### Subclassing `BaseTool` ```python -class MyTool(BaseTool): - name: str = "Knowledge base" - description: str = "A knowledge base with all the requirements for the project." +class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." - def _run(self, question) -> str: - return ( - tbl.search(embed_func([question])[0]).limit(3).to_pandas()["text"].tolist() - ) + def _run(self, argument) -> str: + # Implementation goes here + pass ``` -As you can see, all you need to do is to create a new class that inherits from `BaseTool`, define `name` and `description` fields, as well as implement the `_run` method. +Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic. -### Create tool from a function or lambda +### Functional Tool Creation ```python my_tool = Tool( - name="Knowledge base", - description="A knowledge base with all the requirements for the project.", - func=lambda question: tbl.search(embed_func([question])[0]) - .limit(3) - .to_pandas()["text"] - .tolist(), + name="Name of my tool" + description="Clear description for what this tool is useful for, you agent will need this information to use it.", + func=lambda argument: # Your function logic here ) ``` -Here's it's a bit simpler, as you don't have to subclass. Simply create a `Tool` object with the three required fields and you are good to go. +For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic. -### Use the `tool` decorator. +### Utilizing the `tool` Decorator ```python -@tool("Knowledge base") +@tool("Name of my tool") def my_tool(question: str) -> str: - """A knowledge base with all the requirements for the project.""" - return tbl.search(embed_func([question])[0]).limit(3).to_pandas()["text"].tolist() + """Clear description for what this tool is useful for, you agent will need this information to use it.""" + # Function logic here ``` -By using the decorator you can easily wrap simple functions as tools. If you don't provide a name, the function name is going to be used. However, the docstring is required. +The `tool` decorator simplifies the process, transforming functions into tools with minimal overhead. -If you are using a linter you may see issues when passing your decorated tool in `tools` parameters that expect a list of `BaseTool`. If that's the case, you can use the `as_tool` helper. +## Contribution Guidelines +We eagerly welcome contributions to enrich this toolset. To contribute: -## Contribution +1. **Fork the Repository:** Begin with forking the repository to your GitHub account. +2. **Feature Branch:** Create a new branch in your fork for the feature or improvement. +3. **Implement Your Feature:** Add your contribution to the new branch. +4. **Pull Request:** Submit a pull request from your feature branch to the main repository. -This repo is open-source and we welcome contributions. If you're looking to contribute, please: +Your contributions are greatly appreciated and will help enhance this project. -- Fork the repository. -- Create a new branch for your feature. -- Add your feature or improvement. -- Send a pull request. -- We appreciate your input! +## **Development Setup** -### Installing Dependencies +**Installing Dependencies:** ```bash poetry install ``` -### Virtual Env +**Activating Virtual Environment:** ```bash poetry shell ``` -### Pre-commit hooks +**Setting Up Pre-commit Hooks:** ```bash pre-commit install ``` -### Running Tests +**Running Tests:** ```bash poetry run pytest ``` -### Running static type checks +**Static Type Checking:** ```bash poetry run pyright ``` -### Packaging +**Packaging:** ```bash poetry build ``` -### Installing Locally +**Local Installation:** ```bash pip install dist/*.tar.gz ``` + +Thank you for your interest in enhancing the capabilities of AI agents through advanced tooling. Your contributions make a significant impact. \ No newline at end of file diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 1aea157ad..09d10d88f 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1 +1 @@ -from .base_tool import BaseTool, Tool, as_tool, tool +from .tools.base_tool import BaseTool, Tool, as_tool, tool diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py index 8ef1d2a11..cdb7f1d5a 100644 --- a/src/crewai_tools/adapters/embedchain_adapter.py +++ b/src/crewai_tools/adapters/embedchain_adapter.py @@ -1,6 +1,6 @@ from embedchain import App -from crewai_tools.rag_tool import Adapter +from crewai_tools.tools.rag.rag_tool import Adapter class EmbedchainAdapter(Adapter): diff --git a/src/crewai_tools/adapters/lancedb_adapter.py b/src/crewai_tools/adapters/lancedb_adapter.py index 630ce972e..c612d475c 100644 --- a/src/crewai_tools/adapters/lancedb_adapter.py +++ b/src/crewai_tools/adapters/lancedb_adapter.py @@ -7,7 +7,7 @@ from lancedb.table import Table as LanceDBTable from openai import Client as OpenAIClient from pydantic import Field, PrivateAttr -from crewai_tools.rag_tool import Adapter +from crewai_tools.tools.rag.rag_tool import Adapter def _default_embedding_function(): diff --git a/src/crewai_tools/base_tool.py b/src/crewai_tools/tools/base_tool.py similarity index 100% rename from src/crewai_tools/base_tool.py rename to src/crewai_tools/tools/base_tool.py diff --git a/src/crewai_tools/tools/rag/__init__.py b/src/crewai_tools/tools/rag/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py similarity index 97% rename from src/crewai_tools/rag_tool.py rename to src/crewai_tools/tools/rag/rag_tool.py index 222cf529d..5ef616795 100644 --- a/src/crewai_tools/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -3,7 +3,7 @@ from typing import Any from pydantic import BaseModel, ConfigDict -from crewai_tools.base_tool import BaseTool +from crewai_tools.tools.base_tool import BaseTool class Adapter(BaseModel, ABC): diff --git a/tests/rag_tool_test.py b/tests/tools/rag/rag_tool_test.py similarity index 88% rename from tests/rag_tool_test.py rename to tests/tools/rag/rag_tool_test.py index 8d441d341..a059c60a2 100644 --- a/tests/rag_tool_test.py +++ b/tests/tools/rag/rag_tool_test.py @@ -1,4 +1,4 @@ -from crewai_tools.rag_tool import Adapter, RagTool +from crewai_tools.tools.rag.rag_tool import Adapter, RagTool class MockAdapter(Adapter): From aa7e336989b94abc60c38dead0671c145b8fdf86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 15 Feb 2024 14:03:58 -0300 Subject: [PATCH 004/391] adding assets --- assets/crew_only_logo.png | Bin 0 -> 96169 bytes assets/crewai_logo.png | Bin 0 -> 99658 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 assets/crew_only_logo.png create mode 100644 assets/crewai_logo.png diff --git a/assets/crew_only_logo.png b/assets/crew_only_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f769da538a5f2f1fa615293e175a758c8b3ba978 GIT binary patch literal 96169 zcmZsC2Rzm7`~NYsNA}DN;Y3tMHgW6`GLD&yG7};rWMt(an{YC-$zByImFzttacm(n z|M%zU`#j(8@AW^g*YkRMdd|5&_kG>h^}gQM`?@}ny4tGbB+Mi*7>pdDrmPQxos);b zaD#{l!QT+@-fIW{B6d?Vc>sfvo`?Ry>7NQXg26ao2xWy^J{hZ1fu6T^8}^QSCDLM* zaf4AG)YTj+t_eCWRf;-tU2{HGsnMdDV799zx%J%Fm{-M!E>3@kDC-qt;LaN&`fphT zb5r4Hem1j;G*ulNht0(2uN`>np8SX;##I!#5~I2Jhns?s=lSQ=)QxGejS)H2h3%=4 z3sx5otIJnB2yk(5!1mm2PF~9KXTk3Og8ua$;)Gul+M;} z9M2HNR;?4FNvZxK_zeseG~YlZ4}L!DHo&uh*_YquLS*%x+!$8YyGHxmb1Ag<4^NBV z+r?$xU9y9Ez5aCS*QD6rwd>=*1b?V#@wM8HEHc9!I!WZ8Gzn_?-A z?hziRZqj1Eugr$S5B{*qHTu!yqJUw)PhSg@hW4ubl;~1#h|)lJo&g2+dl81%bw#XO z38kdka-L+S<7Y~whGZXmZx&8>H0fYO2(aI?l!t-K3Q|kI=$KC=O>M$Qsu6OiDfhgl zLs<{|1&#ykxvXmcpq}yi8|YV!4iNiE0%J(Y@{`BTzRJVyD~R!I9XCU@Muq-56>mP9 zceDPlI*_lLc&?E*#><)k7H zjzcBb&zqFJ*-(}6*s>VCMP1CfZ?Uc15xyPlLrKDsPZfjZM};RPF$cTiUEvkr(;obCPuq|4Az?uv_ixmf7Lyz>OaN{!^Jx;t>R0K)3xV zdF$oU7c~=VDzG_r=*aSq)koyje#ub#;#Mxb-)ihw*K<(>-b8i-|KK5cXhz3YHGW3? z1KsA!1XOm|i`}wIMW>?l$wqM6Uw7zW&UwdO=^}%{-lugaEUKWs(ohDCe~c8o;G`@< z($0nbMdf8={{s}TWj~>!TxN{d}~4Jt3QeMkH=@q1F~I5M>I9n znN8xe5vk&c$h`1?wf7&HKJ7aVGsw!p9|b?t=uvFExS2ZZfP4#lWR`~x?UaZ6HUE28`;Gu{Cagni~EL^M6eCJg7 zNvu+^ZfVnd1+h(QdmX3Pxbz0So7bDQemQ?xe~n}0N~O*$zvW?+#R|*bcpoM!Sk@}vu(5WsyCWYus>j7 z)tk6wS&$V=@o-~I=>rq@`5Ww+NzXYj`(Fn-axljE+J;^MPm0b{ORDnvzkb4E5H4OZ z5gFeIY;)@p&#`&`t9x5>^E^5`@xId#kN~}Rnp&?fNfQN0yKSNQ%+^sYs%A~B zvD7bMEFgsiC1@sy8}!$r^gqAAWqqKGH=ve9lb{xqq6mfOEbAKz*?ZTAmUh#a*Y-C~ zFK&kB@m?g4o*<8%CihA>j~btzb#L57oxpEq;J~a*-D=i`P+zZ^le$TDxHqKp%JApM z!D4ShOIZbO^9sKZANJ%7+4b~t*xv44iAlazRFD@E|0Y6^FAUwG-CwSmS-g6L`)=qv zK0dIOGD8lZO{W5R^d}_zVk|6H3A>KvfOtg$&wv|Scg_8K-z={+zjQXMTW+6j$?w6r zFDT)%r@kVuAlC06aAnn($!8*BKA zf)QSS4OEXCo733vOS#wkrOcLoTF0{47>}ToS&s&BryU1eeLty7j@e6$1h84pe%>iQ zFW)TvwR~o_r9VgC`|=N@{2yp)rz_u%9;1Pr7(c8r+b@kRwcMm61 zWv2**DH~0=l^-^ee_yfe=f2XsSY1%9v{KAIz;e0mv#d^g5Di}cEW7^ARpw59Bn(FQ zIeCW^VflibeN;ThcSs%49iHke{H42feP<>oct8_9k2g3wevW-y`XUGBRQ(_(g$H+( z0_&JEXboSDuH6M7@na7Q4a&D!|5`7W(bx5ep|?7^l|?X_#{D_@Ul+Oi`h_~KI%c@UO9mdxR7uT*)D z>(GG=8w5CJSVM}GoAy)QS;AmmIFp+mZoUKNMi8XD`AH9+rPP~yYyJ4=j92cy~^sh|73E^~YxFM_F~U>9gWjmsduxX4ZK za5NH?))^xT3zCr$bPPjRYcZTJ%lb+&^r)qH+qZTj1irSio}Vy47)-hLN;3`pqegsz zp1#vY#P?SJ0~=$0D}1aunQk`9hBV34-5-^&;|sd1w&~+(r1;kTJAY=Lp8KtSS^ri` z&uYd@LRdtrBkDw{zwXDksVtdHw(+B*Z71jZx6Cxe*fJ@8cr{(Ud*t)#uC|^Y0`dAr zA;Y(JJh!^2JsoY*PSlP+dX6<}}?NkEOWJdrlM3Bqb#me9pK}m;53XE^NdGGPdl6ye5@N zc|2I?-Bxtxl^PT`Erd}2jVvLPE_>}ulet_-bMw{rds*jrg{fDlf+EhqH}*7sXV(E4 zJxdeBR)3ZrCe~=VZ9lIrMWEq`Oq7#y9Y4&<0mA71)O=D&He4Mt;|h(Pl9~OmR;1BS zw0t55Z3GprEl6yuCYaBE!(GvvuB*8JV zV>kBi{|)8ou~y^-|9#FCX{syd2y^jsf;zD&hs!<{Z7D#)?dm5b?YS2hsJuzsz7>qj zOC6J3WsYI>io%sYQcK>+K)VV&E0MUF3qsHNk$cFvnc3L)$O!ia^F|ICWP^%In%*A` z>9#r^A_5^SWKuDeYg~%mQSp_M-dSSvc33pqy}?G#kx$?qb4B~QO|1Gj9|j5b>+)hJ zzwa4HUKP%pV<3@#>6I(LZZVFg^xF8j$H*8zyR}qu>Q&i#efw^cS2%m3xQ^xqOz{j# zOFa>;2=&OFr^WQQoO;5@Z}5_Htn*&vb^KiKF0~OHdFhe>Cu8lf0%8sIM#G7IiU?-q z{f0~yoV2EFS=4!=(GWz-RV0NQYu!XC@OI?yWBo4{p5+)1?Lj~weMHC^MInR3R+Q~# z>;cNjRJ!GWJi`5m3Fg+^(ZTO`Q~05>ell7can&Xi4jX+U9MU_aaiO2(va^lhIaY3o zOCw>{4yA-%`@gbC882ejx_3*>w47-A{t{;O8_Gm%Xn2yfNCPHs4#knSJ_LX+`_Ig(FGw#Z*V;* z0h^nNrr?Owc&W#`{oy-vu30wsX z%?=f>BAJ*jWgd$qY#hd_rST4v!@9As6s_5aHm1u-rv&l8oZ7iJuuBXR?OOA?_j!Ly z{Yysf>*gDFKkt;(i`&o)+K%ZVF5I&!cW6O>u~}sa8U-;aiRio-bsNe=N0X+zyT8+f z@Z3W2<5Xu_)}<2W-A~J&T$Q;Y9#*dkC3%s@7?de*CR^k?*QQ?{wO6mJ&N;w>v@+{0 z6Ga<2p^H`2gHJ6Pi=ao z=2#&JZ{JiFI)Z?qNwCbgt=E3H4-~dG(PefN>Q2dk@Y0K(rw9@Qe#)ujCmV7oy1Ks<23#L`4nM3( zeWbm0zC69&r{@i!jPD}b>K;DVdn+pS>-=-{*c}zA2pyUGARE$_S6<7~b4rKb8=0HtyMLtdT>rBuD0o>1m6#@}V)@I52a$I(?nMvX zuAe3~H4*;K$M5ef_m6V$&ed#$l+DYI>;^QA&p67gKfo`MUzMyhOPtE(zR6F*!JLoq zMG$sPoqvpbmx)9%4*1CQ2!}S-N64I$(VBvktH4KvleKJT3J6dJ1CUjs%kMhQ{fM^x zepiSfJ%8u$@j*@7>1a1#N`)J48xX56VP6_60YR@k48n3M~t>OiW2ci_Uim za0_ZQJXP_Giv3KDKyuL%h&-l44fEp)UekU$OH*+j_?KiQ|RW6$HtBZ`TXCjk2Bgo zl1)F9k`N+~{qd@rHsbXx(NrQL!PD9W&h4ohmYFO6ILdd#UdKLs{bVZ+ogH7U=izb?<6lOqjlVOiCECTskxh!?Z`(+LExKycy zaJRlcRHDp)1=<`aK8{{yRXSJ0rxx84aK@bbZATNJ5_#?_pycE z{?N&@N6Pv3@oh8#psqju5$~WmEpZ9MH+Ou_=8VB5!ZH5T0jBx&HJWsFGiA}Qg}i*; z_G#0j`Vl0d7bO(0i%EyK(VlSt^)qAz@SJ9An4QKg|^t<%Tyg}uW(cS%^-4|;k` z8DDnvj*=Y?qJ($q{KqI*A67M)LpGv~O!n`1HHT$~W@P+!>v1v0a~@ zv}qjFsafJj@Ib&rwc&;MRI^;+Ln;?C?@U_O`0N`EWAMyIa(Q`N5G~`A_h__jjzgDm z^{Ak)FAgGHnV%%3Mzg=1_GuY}lftQ24V=GxiEpLlx?h;>s=~`_oyn4M+oP&itb^y; zou5A(w$f_j*9q+|^Rvc4j`hCU3Gt#f^}WHqq+ef4W7QmCOu9&ir$Ep=Io)%Q5E2QC z;O@L_ef`Avq*oxu6#E9GMQM?oE>^hlzvP&Sh`C~AvVYSZH6(5g329|ya3Uan!Yt;J z!Gn1n3MK2XV(Et2!*4HVKb^yDQ|-$4S`4`qNBTNTwD3O+VUHKrh@K6{gY=moA45(I zK?z0E+NU1d6XI2nB&T+NBmiG|IYASVI~y_8A+6RxO+Y3gN-KOxAkvdBbbu|XMyJ0C z`PxlM3C32v0wwFz?)!walz8h# zS{NS@KJLMf&F{8Td8IKi2TSiL4M81+>4(_@qR|z7p?vVO07TGeWm-Uhzd)NJ;?jlB zxkVR7H``4*djj$;AjW5A^5_wiRWCofp{}WZ1^L33EWmAJJ|ukbmZho$GcPfF+`8{v zq}qo5X(rl?>k&>nmauTJ`*R3%J`1stJQB(lGnQ+)$Jw@tM=?AyQ4)CYc=`TBC(S{` zz0h24FNnQm0&LU1c!}H9__DuRpwB^I&&HnWfxbJRbep_os+1w{4rmV#P^h`$1hH{Y z9RcAa59lYseMFyxh5ET!O1K|IG9wn^n_*UTSnqTdnm9B8$2FP^%%)QvBLUgz0+A~9 z29b*DY6RiUMVI6nmFNzo$Lh(0#YAzy<-qM8A1mf^oQjO{1jpk8Zcs8s{e9dNj8$^EA_U#mY+jg8{=Q>>@b^!yGOb}8O=g55QuS^D2>0FGJ5|{vo~pOh ze-Knb0CFatv$ToN!-lF+(g$1#H7#yRZAIRb|AFiksM6LCF*<`I6D%Kl-lIF7*aLnD zJP3obstm~1VQca#+Ny1H0y@e7uM{pdd!(N(-h8{a>N2jp1a?c!758;|_xK7*W)`dG z`13#}uJQR|YWMR;3YfkxM>-aczVsiBI>P19NZw`E5eX3EO%tulSW!4+zZsowOq7MO+bzp3_g^FV&U@1Pk}~Rot%I3MB@$7`%mQMK_Qv* z#(Ao^?Y@tK@C37F@*Tke`_H-y9vWGca@3ss_rtL!X&m9D3-?n4g_Cg0?(k-bD39b- zEr>w^ey^E~3o8K01TX;u2xl-$x(5Di^PbdVAVz*f5H>2;X^e9@hKzYo@nt26n^BX< z{VhK{tv06BST}q^vnov6dJDQnECwzZ9la3Q5lfYn^!4p2iI+$N=D}8>HYQKe3~y!7 zAoo_cnm{6q0;10tFhNV1WAW=$FbNB{43{&QgZ%Pzv@#^!#O;P+AGIWiiGD%UH4WbZ zhpQ1+hfrKsF6g5G5gjb%ZSz{)po>4x>T2dTWO(#7ID(7p*c^W^_SR2Ts-N8IoSoN0<{75oUeAQ;z zOJ#gngYTos242CeSDai&^1P?1uPZpXSjV%E-FDusoI=4X4s>Td&o=T$&3b*qeSB%+ zKL~J$AH=_Xc>HU3x2W`$338)uWo>-Cev*w~kt8hl(afK+|IloWsyyJ3%tR_TjD`yU{J|BiaA2iWh;81~Y$R7CxZVyzJE&&-%@ z#{D}|>SHx3fIc#$ zTH-B1MLp?fxA!GWD&&!CYw;4}!@$EZpPrHY_Tz$s?77^UeZrG}!6QnS=s+4#2Oahb z_OM2w{z`cuN6{x9w^MVMfL}QtL^I`s7Eh?-CgT*SZ)&gBiQHs(vxu#3_5mpcw#?V4 zDhWZ2^Ev|>&`+2*^u8`46w*2y7pfd%Q`v3lf^}veRJL-9D{?NqL#}NxT)hu zQz{I_<#i*u9$ev@G%y=W0D}HgQ{#II$;-_ajVjiDfE9h!K>24g4@ zH`Ga~4gTG6z3Ye}$RnJ#EpVNk0}z==@P=MR%C7$l1hqNzk-8?0Q-uZU!{L)0O$_H#egdM=~EAY?{QEPqrB986{5*+WP%e_laLM$ zO}p$?zA=!sFAMM;QRKZyXW_&P;sHAgz?%vuaI3-d+jBap9eF<(;z&)TFu|i$`AePy zOCKbVx2LckAbEP;EUujE2p@L)74UdS8Ysw90{A65k{UGnUp2BGBJ!EKdSXV}UBKX^ zplk*Dl*;_fp!$Y%)ITE@dEjxTk6y3c0WdOSEK}l6a}jW!2&PI2`s=uy4l!sCK}vjV zH8p!5*m(R{U>(AZYMZAZIx(QhE&6F=57N{DVSaU%9hHP9gj$LSLcgqe2}%lUF`1YE z0<3@L(GY-~Wi*ri^qYduT7g;W%&$QWq@?a~bpVa@W|1)uV$Fo(>(4Rh25V9Vrw&UAP?uZoo?Y|GL_EN`rOyD5MQHC(#T3qi1nLy^SD_wol2VyPlqS4=;P63%2 zQc;j8=_MtXT&%K}K4Ob3Gn5mrjLREVjoa-1!O#0Gm$+FM9{hCrI%kPhKnNnFflWZ~ z;cJRd{pUHVotNkbrBZ*Nx*LzK%xN86pUPhGUcIwN0c)o|@RtRRnc-H-##@WKP|pDf z6PPL(0@bt7U)YzIl$x0bR7W~(DK*Drd7 zzD4CidccKi8?LGqF~H`;Kx9Un6UZu1Hf47YHyG#l5{R_c+P)m0hO zzWM&IH1q#;yF0y8_kp4RN}}C&_Wyk7a)Jce=q7^u5OB-29W})U_{lT z?A%4hMI_#Jq~hI?vrDd9bG;Ey4vP>3%J#<9tC+QMQ$@Yen}9sk2p9l)s0MAu<6!O`fh3s$PBJ`2^Tp7HjhYAgBMD;*<6c zba}id13W1eI`TZ=TMw7oacb+{K&@|J%rrRhbDqDBw&1_mv`WJ^u7^0}X%Q^CiMKIR zx;j_%yv71$r$gRlvu^@$=cmv~EmRxn!=mLuRsiy@_v&&GP_j)8Tus)lqCNpP+rn~l zi7y4HuVlNOb;w5z<7w%_Lfy%XtQlq8K&N8ML(ZW z*#lDS&m)aHb$fCkEtu|n7K5^6yL)iX(*N;ns>T0N2&}W0zQw97qaf`Q-L12Ij6qE^ zd|!ONbbxQ8kvUSrTWy~P=jMsZdel|ix24JxQ#JRJN;4smPe8Rl5gK_xcaSD^Q*6Kf zJAp)a^MeK+B_U#1M>7K3fo?|t_6=;|)zY!mf7p~XyOnXmf&*)J{Bs<@Hr{5L>D)x5 zV$Je@vTFFXTl{J8BE$YMBjks&HkQ!fG93;t&`rE&l(XGc+GGxw0q$g^rBax*<19$Q z{8k(>qJC*mzFqPr{Yf)%hXO*DTvMwsdB+>ULN+{q8ygA&Zv*BI(*`5K=)|z$9h{pp zxG<_mp}7@#v$B1p4<0-OYOrDEmC7Z67_?o?(KMa<`zC*@8DzFR*~;D@&mOx0*1c(7 zQ2_+YjDK(OJUroHp4+0vr37(9JnI6x1Ol$rmxNNPP;~`VDlC82-g=GBetIwm_-XCj z$)S)C-(yf+m|ASBHkfNaDkq1@uX4~nWdokbHo<4=)=Bs2tGC5S+!S~^6zHHy3>;XV zcK;L*8NP9p`w;7V?sHYYD zZVz%l`_XZrfT+MnPXSmiKOpL3&5vS96&7VSUmE7k+~Y~2-1bXG98{t&_eip38AM*4lV0 z+h<)hubiU$8FUKEPCM5$<8kIjf!JK+r0@!JCG5*v|Ccz31l~3Fxs@$uUy6l7^j!p{ zp!Pdm?BnpQW7@C+OS1)O5Alq~#)eUFw{0UBcA~-sSflkng_>d?&ET1m^JC}mrH<<( zLr~&^u(86^CROOxN_n{}?PcnIs}4;{=zl}2-+-CXRbxrArk+fR7^pEpG@(~dexG?0 z{R^5bP*SGJ+J-?U4SEWh6lluoU?z&;=g;N2xxWcC;x>_7g2BMfDvRqc7;n)0;G-5( zpa+42l>*ibFrs#@P#qzO3Md?%z%Y@YjXosr^-%#Gszs88q!XxN0nPR`8}3t=wEUodW0oQzw*5AM=Q&7UrS zi|@I2!niHx$C_`}9|)>NPtJI((!2_Py;b)Qg7Mw(3Gue1CGEtAT;FZ4j0 zhr(lUV>=bjd*@%g8_h^i{PrHr;|$Pvz-~4wR^3;-KZM!0yLcYk&uNM(+Ny&V5268( zuEvL_wJKDd`FU z&v6^d#fS_)2OIP2iUHL&b$=i5eR=OsO?gwgF6|q@^^!7P|(c&mr_RF?`uUq-)u7@9()h`sSlVNiP&UVbyNZ`LRW^f(E^@Z9o|!9qxELs4}@ zlU~Uvn z8HGxDaXqIEH|2i{Lk};sFd%e5!U7>drIbMx+&JY@`lzJC-7kNBQUS3IruL8@+|I5L zj7-Fw9p%5W56u12-y(gu|23%9uHK(21|8-69>t7*o5qP?8U)lBMAEdSU^d5Y`{&2V zJ;{;5>kc+Ip;&s)!58`x^vQhh*w%t583f!F$Vf_S8#TqC-?7Wa?(;zi(gDJEtL-=O zwm{1f>O7*LRUh=qS%TeblLnu{i!`rYh|+*0z13wCgdFK^=U+kNt%qW_7CA7^bs!z4 zX9rq)w_KtXHQ2z`V+8-jg6l_0a=a^0CfKML+=uR z3Lw}<18Uf<7=V{SidgBHI0Fs7-gO`Ce2oI6Zy|sFHvF zc!|2a#b2;=e_B>t-?1a{-4ro%&7|E5ix9DI$4?(=Ei0QfSAGF4LEFH|I}n}*MXdav zc*N!7IC^gdDJ^Ro##mYNMumOF*AGozImG!~n0(*XdAb^QVr|q?eZX&2_%c) zdE2{B_gDgbR^T3h7WA9_Kx_^KT~vp6euX@@@-XR@8)4Y$VJNiVGfo)B3Lj{%~q3=PGo?m%50PBd`)}1Zw5@( z3&!H1d0Ma&!^Pd$!X{$3OJEn&lc|%pj2B0>_W>M&0j}2o(2}k=o@dJwh4_TR80CHH z_t~2K$d{0Gk(_N-(mFj&WVrAcKtG^LX^-}%`dH&xo&$|TxUX?G5|ZPB*q$B{cES(k zm;y<`RIBg*G;oTssUygaNY3`Jnp#p+rX4u88SISr$_>=Amo6`qHAo&|LXf`$ubj}s zz$nbW4hqWN&|EiQ`B9Jti)=^W&OXZms$M>(oQx6Lpywc^%=Wsp!34^xm@C@Lp2(K~ zIGl!1mJ@T+aS4C|VhX&4Z-G1Nt2a+a3-r~0JM&SOV&0^uzux|(QJo8+cjucnwt6^# z%~gVP-uY>1#j*)U?knswnE}lS%pN(cy`${DpKMm`eZQ= zpzQgcxW+)$1uVi5Xn;=l9F6YT<}tr<;Y|q=rV8i^oAsHf?Rl})-bn;V+-2*+5PV#m zsf@r#;XQZCbzBHmr{qrJg@@TXtN3HOWVi%0^hgx233lo+kK=v*%YR2zgy8l+AaMzF z(XFu&S%3a)gampdC{2JE|Kj>X>Lwz&%-)T?dtgM()OeTi^pt1-cM4_bFQ|s}%e^?+x2N1(9H0RM zKu?#z7>Q+NtQnz5)$dumuP$Q-0#61!ncM|B=KG*<@{n`5rHG}hR(y0Q3!>f50lxgv zxuW*u>6udeI7nQF!6YKj>cpYx@nSIhi%S_Q3@`?;HMM;VB&7Q8e3fR*VdDkrN3rMc zUU5b&tbxk?g=(y3_hA zSbdWqH2doVs|}0e?hLd2z56s!LvZCI-)^ld{rS-G_GgD1wwG|jpcx;~_PaT^6!G*d z61Abya^b1Q-QHW7Blp}Tx%d7ln82CnNkNYsbjIV>XiB$znHU%sueBa#olih%CO@$T zt;rhT)}hkZRH&RNK?JwTNltk(RaAl`zqIuw@Y#E{NzimQx}c6_4L`UMwn~q_I~!Y+ z0sSGWhe^^U7(C)VW!p@-(&64p=Gp|%_U(rUuUB+`!vR(MNpnC8mXHy=lc#dB_)jH` zca6RyzP>HNGb-uadFXu*=|9Qn=K(zp>$#*uBNNy&s2g8zD+G3W54B){P5|Bi$UUb~ zKIV|D>0iZH9(aHQ3^?3#83q%Mm_e_Xkb(xNaJv#A;aR8=pSrlI1C~B=I_CwATqboB z@=WnNh8X^RCh?z`k2{e9SB_gR0*{J2c2DSI2B)!-1M$2(U9I|$I&-e%9}ZA3d#3GT zKE&cc>32I`HKmHy4$OHpf>{ZzWSny;p!|VaM~{B!k+0%SId@uP;Svtu@(*%gh=IrK zwz|WA2Q+Nd{9o-6j4?QkHX33Zo=*q(!C(OhrEi{05(JI1wg!J251MAraxG6@CT43e zz5s(DGF6r1pSYWNkyf1wXU|fkkcVcqfyY$WXTJ{ytOl}cpVvnPh z1$WVnNhNF^Lf+CKaSUlh;(i0=;;$Y?@Fb!6GoWax_HP0scSPP~2*4n#U7L%b&=!!0 z{Hs)Nw##Rvv@BU5PopLtF>;bD;dZJibKtiqSg+%qF|n;i^c=w7%|?%_Ln9_;T13Eu z*_#mH8B}`zW*tC_Ds;q2e6n3F!NFd9&T?!dQ#SeAE&JomWbOIH1}#LP@NW9tnK#>D zVcj7aw$poICj|~!$Gz}O4_|7@zu1B_)8Kak%_nl~v8P<-8UG}Lh~M8EfB5{-fD zMkjVkE1beHGm8GASbh=}#}h-o@bs@z?|hXTXy^Hhr^604fWyJ$nTf)oNqg{40W_rk zZn$)^ok9Uzs#dn>J8;JeXL{P=hB!}U_Vs?CnLV6HN=}F4QHc3>kr~3vtCs845pm(a z6tU!kf%DwU5u_(HEi4STzw>GF^AI$MX$o{cm`GwrjsrG3Bxgnz=mPylrS}E^@Qfh; zYzd~{J2$n#MFU>H=}N(ECUuh8Z7i>ZaR~i;bAT_nH`s2y+VdVj+!Z?$%>gr*g&n!L$N*@$r_YxLi~z>@ zA#D}-rHT8b9cr5OBk7s%VXH6LHCi&2^yTFpFXvLH>q9AU0h=X`>TKiTQiGvj^#Qwf zh0&RNEr4e-BvZcUcszem^V0F4$j@W|va8TgrP+TwquO6AQusE|kyI7F#_{EIeqiHH%qr;v^ z&YDh9cV%$CfStrN7&>}5KUkg$=Prh`B^Q6p>vBNxA=vj)rFtHAVJyWpvk6al zvrh|u?_SXB={_9v@X4kKSgu}uEd2>gMGlx*kng2fdp|kL6>g7V2aA-xn#qt^B~UXP zBRR*10M7)MbND+VS}N?R;;Zso6==k+XeNL6sgx!3UNvDy%E}@cGR3s>dNm^sc2DVu zmhyD`m)AE2(hsF=s|-A+@hFQQf1PxTV=`hmBS3gq?}v3ObJGg{#o>N2>c-Y-3nZXL z`91Q{>i>;rvGgUjZh+xy-ZT5m7+f0giyZq2@;Leu6M zi_s6`WejT_Z0Xs`%mKB%X|u0$eRswi(nPEuD%7H@bS-WJ_Fn$l%pTAI?3g!9`EL*R z-`bURc*Vju@Oq}zH87N1bAS2x+Ze ztIgAVy@pi(@c2}!_~*3$L15#|RiwHa=;lE+JBZDv4?a6K$zzEuTQj^!0#rxMW#%BL zhmB~kQ3h*g*JgNDMf%O++KpYb(PjS+o=g$kpc6HS%{1R{Yq+)y#ZwAJ%k<=}T;4Cf z7{_eeNZWHHNcC+Z+?SOpRQvOXZ)pc%ZgE8*`gARMu<^0kh;>YuvYH63%&)C(;5VX3 z3fY0-$=4HQ&6D5}Vi0L}nV4aG%90C+q;PW?_*-s|-9HJtYg-AL$<(5!B=|c8l&9q< zhw=p&{hL23$vLAz(F7QXR`L^xK~Ts7 zPVL%A)(wXFY#KwYS^Qq8M&;9XdF~4!?CZ@Brb9J_Pd>f+Brze$Ru(tS^jl(?7BLWZ zzy)Ow?1!TmH0`v(YkNR&a4!aOFX+sH216y!}@M;!$OwWQon3({Dm*YCR ze8rFEidM3<=5NVG$G$|x7?~d1#>9cDm!v!WIjv_73at=9(l)bEfBYmu3sh>LW19|t z7k}l!>X@~}Z=tyL1CQ2=aUNe8N7b18?*`KcNLn@^0B7(}r}uo4CE$4S@oXj5wT#N< zA|S26U}aJMc)vvE4h;d<{1= z7=|3+U3#+<$bH0`(_AU=Nh{5&G5Ru)MalO#Q+H_OI%a?_10w}*9=rZsgSS!6+Fp2~ zmalUe6yn%f0A^Yud=S49V73&zQR@7!M-hcOOCT#nc7_TAO z33h1Pt;YT1Z$Bp>1j1mO4YfV36=~!ZNg%;oL*m|?1%`8+Oa3Z~49lC6{KJy3u^$W+ z8`8Z97Q(skpe#!A|?`KStWK4@Uh+jnulj{saB z=Uq+qt2Y56i6Xn)-?E1InP1U{3KZ-oA_5GtrNVI}O5gV(@-^B;K4U~EP!`DGj>`xPWe{Xl>F1k%;ez!)gEXKTO^75Hmy z?(2@h7vrjo>uiKJGu-pbWCN=(S7IeAyj|;6Stsz$2<4nr;pH8ki>4;3YJl*b2{fP+mRz^ng|%h=cYDo{OTm-smgU zdDo?Of!#j>Yw0yvG)ul)=%+MTtv0Lux`NI(J}T zpA4!nhu^u_7m^_@1@bL0n5@?{+-Zx-^D6bZ`tURU059vna6jwUo;cnGXdCTtuh4_H z@B3~PVQudPy+J9;tpv1(9OK5uT98%26hmT$Q+a$;gr%{CrFA2|Dwy1$OIpDi`MZb8 zg6Y<(&lqqmyew`Hq)UIAED1drMs4&<`(-Q9!mrL zLywlMz|dOK!%(5Lc}r;db_}H60ohpwDAc}~<0)u#Gn!lF7BtQ7(T8*}o6-0xj#M;f z%65AL?!@t)3-RKD0d`)+>H;U_{)OcGaWVvb%%{LHP6EMeuR@=sPA{C&=rOAhqVeyX zW9z&kr!*Vzg5xG~{1b`---Tt_@DYFXL%AZfx!Id6<9lsZJ`UuyyShkv7r{H{_)CUd zCGp4Y6H+cWJk8xy>fmAGweNEe2-FPmnH7I9v)#Ipx;WqX8})8-i9b1Zw^#!G$F4S1a-&V&bzW55RR2>kFlv&5s#>Re{(tw1r9_^|%ypUNirJrSWdd1w$c5lje{3KG{MJX=M8WJ>Zjv8$<5TPZyfmm2!Sk6=dG4*DW* zu1X!Ty|_P?pOS8k@UJpnA^|Iw;haNBU$!fcP}OQuiPH_^r7Y7%#R77O`&RL7n^E)1tr93OV_8^E)u7IAq)%* z?6afVP5E*+q_(#9c>DBrX=ZF%k*p7uT$U8?Bk~YJ%C4pTefR|$MzqTNZ#g5!(?Kse zR6L*1Kr1dvb}uj+=v!$bI<^T2_O}KF9v==FkD8fB`!_|YB&(62vLs1$E}Ov{$;vl3 zl)~?r`Ax=1jSc6oA77Hdtz9%zPJYsLAQifjYc6vm1cx82De%#;o-t@zoaa1wZEe5N zeDI5b0?C`ED=KfxeQdKLpVHDU30`9_e{=B<}x%uH@CE_L-S;Gwq{&A{-6Zf_t7`{Vdr6md2 z54EHOUf;J0k@pvw;%gtP9Nt?BIXxm<$bS5Gq}Kv&&hV7itd3LaV*cSdgE%$OtQxV( zagm+xp%-$(9B@^VIfw~gg3Y0M>|iLQq=-}(hr2+TXi_4_wU7LbWWV&Ab>|=beD+K5 zBye4;aC}SK@VGROTGE_ycHF)7iD~J?L*QT3dc6crwGpwA3Z9x|SF%#^(!YcOLr*j* z`&-*QD_85yc$!LTIUDoryfoiDy>jJvM^&ko49#pobdlOu0VBBj@9K#){UJrLj~``^ zV84(u`>mYc&%c;Ub3@Wv$hqlyGX3B+6{a6dDUm4d3HKz^lJ-jXVjb#fPXr;R7C@#l z^yT^U2!eYfociSq$Y;jILSmfu`i(;OE?>CHetdNBq3YW13QU3ZJ_2QZ4(p;{2I>rbb_zG>f`3 za_qk!>^A!-RsoP!a;UeBvk7|zOr+fJSbO_h|77$9ZSi*Cvp>5ow!Yb`0FkSf`G;pF z%I`xpWx=N^-X4Hb^IuuQy-wv(Q-@ktM`z40*ARMP0ijOv9LM`4yoY_&7Y16vCy*=h-M~JADEIX`w6T0Sy7j;WiUK&NIwXg$sgtfno zVd=YDJD1TJe2})$=T<{sJ7e@9u`HB~I5OpA@&P#eubZ-zRb%(7K*vtxbzp_i3d+^y zEEpaT$yW)Ui;ay_wM#|JdHA}zy1V;%)9I;}(v!*tp8Egj?WISO=q39$F23gEq^^74 ziBmsm=sWOS4L>{#*Gd<5!Fz+QFxb_fmta9fXngrz%kV$N z!0+UbT-SpGb{xX8f@?Blg(t?9#Ge5FYYpdigJ9dy0&f#7crH!w)7I*{qVvh~Y5idd zldhrTR|ki=T-RC0)=|9rjVlDNzQ*53Xh(^RZ$tB}%khNP^!d$1bGSAX^0y4agP++<)J<5YM- zeu>a^AmhaN3Eb;z(!i!j?mro$hT-Zm?T#B6#w;4jO+`gq2A#^^4i#$opM) zw0cfYtrU(sOK=?uuE4<4mDYz<{TUao?gJIc@z+LPTz?_}wnniBM*BTJFQBH=)r^$!Nahp(wxiZtq&V3Y z8yk)#B1&|2UT#s>NLV^Q+4XnZg1|oXLKXm}%Zd(9{FJ-jN5Dhw+^L(W+)mmZt#W#` zq#yBtK!&NI=Q-9HK5U&Bf8A@nPPmio?EFjX@0NbUeEO5{-aXVK`sVQMZmqhx{svu{ zW!zJi>%iD|^bHK3+KJ^qm=umrUeQuxPQUZM{khdT0`hESi^cbqwz(P{lnr<4AgUoE zB|Dw@F7VD-@Z2Jr6@~?rv9ezE;BhK0Mwfmd0C+PW$8xQGX2#M}fQJ)!P>crsoOX|`YU2qqh!_C@7!0|iHuARprFLkDHDpC4Ej>+SRS8AF%Xy zM)M8pO9CBLsRJs>3d$Cr%=IW9s60xJ!`vT)H>m+fj8M~FxDY6GD0#4p;XafswAZf7 zWI*Bu^%!`XFW4J@5V8ad=aziNd%m&UK}migKf+%gXZGCzp+$<3XlyetS1r+qN=FLn zEnFXRFI4oDOLtA^gk5%M&5swA-*&8fv(7gU>Bd0LaAgxPE^msx`D*CFiB9B`MDj^h zl>D;LBq0z;4*lk*6ZI$ccx{4GBKT$vc+_^;yvV+Ac zxXww!u5NzLBeHJ}lNNvJ$rw|8J>^oUsu(Rzkc%INxA|z@rO!_HvU{5j*RPv0Q&G19 zR~(cnM(X0m))=TbKB^Ew`_@lxZg`-aEYI%w+Xj1S$GEtYl1De%sq6nAS>GLxW&ehK z+mR6|BO{|ow#r_ak*%zZNDA3|kL(qZO7^BmviBxs%id(~Z1SF$p67Xg@8|um&!_vo zukSd=aUSP!Uf@}2=B`uIB1tGtE^c%==TF-|lPxb$UcO$Azm)7hto>b9j#L+$ICA=t z@Kat+*^t0w2{fzk?BEfRC7ZLZM&Y#&u(!)-H=b5$0i&b9P4B1?nH!LZ#u?ao9eMN@ zjVWoAz_Sg5Tf9NEBWNgByb0-Mo=P)S0&))$V*5N^BL~FL*w-b#ah;jKQvI6J#CFr7 zOE=Ub^a0Cb{Kpq+3D4qRwLI!kx%PWe(JSeK4ziEJmqk8Wdezj_Am}qAii9kkf4X?JM*9N_h4~A0OuhMThYv81HxXa%5_O6VCTMlf+SaA@{ zxHcLN=qn=LcB+*TbI0CnLdHg|!L#G>Nq4>^s37(iBvSVj&PFWjqtqNF5?VIFfpSxRN^I?q+a#cbk#Jl%HMzc;P>W#kgE2f7$Q z?lme8;#w}0!QVjQh+pfBI??6lGdsX*g7WkguLVuf~p%$ zgAzUcM57mSeG7*Wu#~?#gM-O>?OidGjO|+UW=gP{-)QI{`dh6~e)JCkwvKVsmPpQV z<<)1ZJ)yk{nNqSWgBK5PCLE11AXi6^Wq7u*>ei=)kB34R>pLt(_AvHPmuBrooIjKq zTmk|7Re%Sj?BC+A1;^9=4LY~?(Jeyj;@_WqOhH#(!(w^!<7tE1OiOY>wW8e(IOn_g zNSJB8@*3WqDnI4PRGzCe7pr)U9C;?J?*}E0SvclX zA`ab2jx%YiK{Wow^cvnk{%hDV+t3#RsLZ;J_oM4~mOmT!%RylU2yAJ|OPx%Zg!kDk z@(ZMmS2E#5Wo-a5T@NbhbY+Zar<$E5rw@m-IaEOt$#eJPx*dG-pZAT$e2bj{BCo_cI7fyl=DBW`eL=xsv z#26Aj6t4hxM79GqemWYN_4dX+d@7a}oP}XVRcpVs6SzkTYgz8#+D{yvZeHZ0M z>3>Cyg^RdVO>-q4Tiv(A)d;*zIi@>(5T5uM12Lq&#!ps@^jTRs)GB8LCMIb z;lqcnCHN3M?ZeOjS48gJ94o*$*e;ZcwGP8#Ah9y?Y8Rwc;b4Bgt|S(?spbon&Lhh2Oks3RP;Izi?uC zVdL!?OEm1HS)JddR1V_@+ckCQ%hpXId_G-OrK4POlCvSrX#1n_SZf1P^-l}$sfXKv z#A(A#9BmK7-uEwV=+@L{Dknu2gY`GcpT$Z>uG=pY6*PD?t*aRQ_vN7%KEzY@*X!np=8mrew^h zC;1&MjoE5avB}z4A!PRDmK2ETXqcca0&y}fBrPe~hx<>7Q65U8!@o4J&``7dWzhAI zPei~TZbIW6!TAO#Ev!v~mcwhX$i1Q*H~nf01&=15?Xfq{RY?T}R`#2nSt8`T`|_pK za_(%6Ct|v(#k>x1l8o_C)4cQ#pu5u+=uKDGB^A*~KuLfB_-RWEvi4O^kKzSx7vnZI zqvXY2$i;f|D@}Jk9#-GCqZnw_u0~&cS2(IQU4C}fX%=)=nDNg=Pj9+RD5mpc8eAdw zCy8{dK-JD^H1WUgQPFxD^_l8=L*8ZuJBp-Wq{8*8v_x*ZBO8EpHy& zl#1;!`n4qQo$y-|S-M(c49bfF8JKV2}2v#F!}I!(D8S>Ui?lvDnI7}49o>VCvoEt=CFnDiibc-Y z=uRAg=R4smXH|XEwVE(;zx`L|x{aMK{fCtX5P}a#xr)NZdngHkqw5&Tsu;pl-t%@S z>$^8M3D>7>OqhuyhY2vONWI!iRygALsR86HvP6+fbgd1oI#j-mNgw8Qgh!_lc5IqM z^hb$(tt-$)Lk?Rt7>vKB=7{~l1Qk^-K|^$FcS`#PCFGQXm5C#WHUvTCqWb6?ZYC2@ zBgkDsmsx02R63w)n9J+Zj`X`gT(+ohwSuww<;Xtr^N=}73_}&%tB}5OZMS(mvu5Qv zsW{#sB;p6UQ6bMG$6F{vP`$t+N9BHAw&%o|SgaVlo5kRnv7s0nWrF9SewSVhr-la+`6mr%lpEn}&6}L%l)x+F_m*rVA=0imkXC1ewGY z)xE~L(Y5jTS)ukd78x4IPWvN_&>icOZt(vmH4ciE0tV8gIOZye+alKBS z22a1%E7-y868#-|<_$UP?UJ$9x+E|S5dP_6g#8W*JBhI;-U>6ZxABemfHi)tD(AUt z^gC7+=f479N(z;r3jfwrLJ}dqD1r|L&O#p(nB$io)iaoSxohrO%t`}KBw^|u{dS!jKg=~XY5RET9Zj`{z}xE}N^QVF0$zQ^Ia zPQ4Y=Lha2itLjidIP^;k|4+7mK?;ddHn+(~Dnb<7R;p}4^-1bFZD?jd7_qLuY1M?f z@5zsyl=f0~FohP3{getEF4Q$8TyLYsRUs=&rQ3!VmV~)c_-Ge=+a22$P@{wG_KFx* ze=eO6hi|r_KEn&=f<`AD$g;{(sJ44){I&IHU$EVjNz>yf$B#gk)wgf0$2*SrqpZdM z2Me-8OB~(xR-1HQOT{M1$#|O9Wz6V@u+Nk;F8vY0Fi=*aX+=fPW&|+s&AioFx55v< zP@!o@*O&en)q|EcWdkp)sa0Elwu;iW;%8*Twz?o(dNV*4-NMoP*L^vJB1*BDjja-( z-BWlseUa=FQfm+s-{Ww#7rCw&&w^xxrMtFj7k>#`)hJcb>hW>oio0V-#wN4;=ONwM zKn@a3D)3=QNmNP1YH=3CORIZ^S@oI->NqxK2Pv7Sp$`$tDDaaumMpd`FR z9If;=lkvow;YWBW$@84eSFoZt=41W~Y{S;hT`3Q@?hjMT#FByK<7<|;qu zPKLMw6de^;oK$SzC(d|6P{;tXBQ~K5*~ntwPP>G({<^P=AMzVd*J{=!)EPC1a6k!`4q7DyRkZwIbMZ9|p7gfdjSPT!mf^dXe@S zswa1I@IUf$Scl4GmOA@_u5{+{@i782>+LOg?RRpHE<(AMcix*}|HIAFa5FwZAtG4a z$FJ&k{9mA0BsRNM&&P!Y3FBI5yE|F<)!(6$0(~qPwYI3~N!Mf(6H`5ciJOoAN+_rsNiKqTnXb>yn(^hGngm5T{|}3RBll7 zGB$!rc?B*QY0*Tnjes!@oEf9d4^_VMFE5#1C}#AKsK4Je^1K)Hxk@m`us)RnfMVbp z{tNFJ=bl_aZnr^BR0!(6U)=RPA=k`Ax2E0XH{$nK@?Edzz1rv8?uU@?+4R z9UsilxAray5{Xq;gk;&1TF98<-Ysjv1;>vxanqcbysdNn0&zC6Z)ZaH9#L~|Lo>wQ z0*JkN3mZIQsdcV0*UDxGmd~zrN0E|B>C5Ak^JrAbnLrN1GQVoGrnIYkfDYK+(Uitb^9Sx6{ur?9xm~v%zB`gG0jNv3_N2f=RQA>`xXEi#Qf@6iz_!67O zEc#z9z$O{m2MjApF=6(Aw6)Ru*63a%C3uRI_Jtb9pSb?(03>B~m%7a$jz7yUUu^En zCB-$ec!BtU6xt=ietv_Ir`!{#yIorW5#HdAx0^`_Q-cmsG7>5JjUuEJR8ZM=OI>FvsMJ%mrVLOlOAVeK7Mx6iV2#Ww1) zPv@ruN}Q`S;c;5hb;J%i*)xL_;WX$Q!d}7)Lz=T^ZWV`5xxCEDHs#>dzMKJ(AmgcM z$R?2pv)u#}_L+esT*?cn-wnnHE~f|&Y$by;bn-m9W3E-CYIh@N`}>AI-Il*M4AyQguEEdqYB%0c5>1}hAOv`#tI{Zv7-nz3U%K(a?a_X5XGDp;##_Yz zSU7|GPgY`AzcpV5r_H%4aSFF@Gx%PtRuNsfKN{vfZ(m2bH@#tqzFYq;`lOOJR(X9jD#=y-{HNTJpEdUZ|M!few-T7A?z|mhDDFxlJINWQ3O8*E@Av5ZwPy;{GUI61`mz-!4;Ja8zYp_oLL=9YCyT+VJ9X$!Py`~b%wAv zVl<<@C7L8eO-+h?iRUMpTY*C#ltve$JR^&J9Lbu&S4CK+VPS6SSTCas9!Tr#m(0@O$VD7nD(xp1kyz z0F1)v8;!_w6pW~0Q~Bm5Z1CJ-#fSd8bZ~kwT3SURBs&{CZ-!Ue^wYB)Rl<8KvzYQK zn3%}{lMC1wKUbkV2>ex3KD=Zg|6Od+g^%j2;zgL}67^8UsXv}D(7sEF{GzhsH zsy)XIzVl*h$=b(sX6ou!tnf+qI3~qkbla%ede=Q2^^2B^*!KPjT$9^!T>tpphCL~S zH{xsvUJ+bZ)x6Jm!m|FA9-1gU;X0{{_HZUJgCZEiI`o+HWmIt zu;Rd1F5YiW?!|!y=rv6cYMj)v0$gCNJ_z=rqq6c3XigWUcy+$gWt;9cNDE!TCab?>gSH-~6x;fo{GiflJjK}OSk*(vQw7K7$@zLzqn?5;9=(0PPU zDMX$+GsABhn}q{6(&v6EETL5AW{scszP@weExGyNb~dSeKZ^WD(_MTq_ks8eT{~@) zYPygo;l_}-<~nT`FzKY1W3Assk0O$IJY%s3k3L-+3Gh!X%Hq<19xPtmkL%0)C&$N} zz`#Q`e6`bd=`DA&Sv#8$#jU!j>>Cqa=(lu52}l| za&pQ>dG$&roh&&wk&Y&?je(~>bXCK|t^BBKFQb%#SduPoqmGHZo^|`ZGJ`rQ)vkr` z3^_m(kik)d*zolePYxvu(;`jI=Y2oGF1bv|vi~y9mh1F$dTD7-JJ>sg1<#*hOPj2V z?J{99qBSBZ?XTkB*DIkkORX7z-AL2SA@_1EoD?)bE$KAZZeng}4lwyDtw$xi3u?JM z>tx8fhR^J&-!*{x{<1bt3A$_|>~2?({voYWysG--P&~*G5ZV^<#?7X9@v5EMUivPF zX&TlJu8J5kqb^wfh}J+d?G}(a=J|XmQQA=sg%=6#eEqos-y!s^j?D3$+FPVIa8;y{ zcGPZ9ii$Si8?onVT>M<)rtyHvZ@Zl}}UiQ!WZi>uXclQo&X++P&`)ELC|5Q?kV|PXU z3oQD6>)VyZ4rP@iAA!ByP~Up#>#vWGRpklB4|n4_#H0)Yeq8%m9UQH73iND zK2!SgtVk2C?=G}p@r53+rX^x~hANWk`kVJ%_P{nR8noXR%zkqPGGF#(405kSbYUvp zMC$}Tt@O5+yjwQFcx*K;$|emmGF<+s8?eEwRk2sxKAN<@6<5Iq6$4R3&UKsD*uQS=c2w4iWg z*Ur-Cy__X25h%Re6V^o(9F>WbW5)itKnZ+Bu zpkB%ined|%j}mjCrP7(X7mSwW7)NM8fEt>QxdWvWM3t2xE#|#!S#fYwq3WxdCwao} zsC;H(6BjOgJ?F*f``r~@lO=_3Ta~|OXdH1qCX4&9v$^x0@RjPmG&ps%12fn>r+MAr2nemG&ps1v1T6I zwD${+z){?|TuzChyiSTPv%&x4B9kiv2N@6@&h{ph_H00CT^lKGE8|_D<3m5mpA!%# z6B5}f29KF{FdnlnVywA>4#U~};i}Rq&u_hzm;Q=4LIh3cX zS6Gs%4wZ$~2=zUih~^j;%m>CBWtNUy@2|ueu-p(|);h09BT8sycGLV$T#G4?DgoCy zKMS)`Q`1H)kYhxt2@gB5Wkc(1f8kS!TaIrW1pB~7qrFT4;NV%lUz!6(>bZtvB)ZsF zI&2hq6rLdv%xs0k27hE`cgAI2Az`ep$&}) ze+ARo_4Npur>EjM-FO2tisuA~#Msp@xUG%ohE_E-foV(2+ZM72gkjvG#P2gr4xzy{ zlEVwVaU)@v>E{ciYu@yc^>0|3B?iSGm-Ui5bYOWwiq-lq8l+}R-phVz9g3>qBlcn2 z0+L~6c&On_MQb~@oj1c3VoD|PGUJhW-NwIz|A7Ka<)aiR-!#oE9(=JX-xld{n^Cr% z#&f=CP;Hsjm?;`U3Q!oC)jT%IM{`nb-a)hK`;-fg^U-NWp{zMW50jHbc;cdZx2qfU z(58F`^c_dJp66}l%xil|`^M4JUog8hJs#Q?Op{sxSGV=6!`P7*ar?G4=yQV@2{^WL zgTf)snQ+NlKL>)?{TN|F%D-#l^G%-~Fi?8X>!Ffx@qZsCDu7}SF59bp>S;r_cAtk% z7)Xg6tfOZ8MzMxB5^Xg-f8T_S4TH=l{r%6UI2&dtec1v{6LPPdL>e)!Cp6AF5ctMN z`S}d*v^!I7(R*vNfefd%isJR0KofrJMV~+C!&Q2|a6!cFe5aILmVK|I*Xg#XsOTuK zF&=<~_JU()RBE^VIRV02S7n0@t*_1(?_ayNzpH<`tmYjDIki4Dh4W9cy3j`B#o*-& zFFxistglzM=iYsW@DZw)9XxX+Ab+0;GC($CqLKaDHbNEk7hrbXq~S26vINk1{>6*c zL_wJz^x85Wwk%d2G?cFbiDH;;5>DrbMo~qY%evC}3LgX>d^0QX#Mp>d53%=0%PJqk zmD#^B(mG=M=5pn`*pQSTadQG#&Skg}PYvpZ?QPgN?9UwvqlPT5n10Mhq&lk{ByH{> z^wiw03$S^q8}D}pt0@whs`@qc1E>!;QlSvpn8{Y7Dmp9#3}$WAG9CGG;%1KEEfZA% zPaFiEM$SSO9MrS+a=py*2(s6`REx{H*v>&#x_y_XvTwrStVmyfpr%8%@Q+mrT6+#>)t__IIj-?M%h8x1V!Y7~HV7$mc9={(_<|id z)lu-bpFcF<$nS9Id~+8uo&-Mj*;*cb*)Enn6DBow5~Y}D zTh3wJO7;XceL~~O>&a8GMT*1HsPmv%q_MQ5r=(m z4H46b!l@ALN2sR16gF^If0@#@^3-=aAR1{gvyTDH>^|TH-Km0|cAFcS>k8P)%>9OA zaD418t(6ld;OX+2N=C4>NbFe|t&5DysrkY@A`*>KIn!^5dMN$gv;D>Mt!wLda}yS0$>HFJF1RXdi}V;_GM&oru$n_9~F=IUS6RA+Y^_H4>)(fkhPR(U%bwr)eE^?LNUKwwZVQ zZ_75YvfpeERwq((p&kS*qKdy2IYGJlrsJ(Y!&Ce5AhD%eQ{DKiniY!S>7KYP9tpaU zY{#SUaA(_jn~&)RXp{tcN5|)HwD34dLJdDyHD|f9OUeky&9^s0{$M&!<9f@uKdn;s zo*x$y<($WVezB2U0-8IC{VTLoq z8Y0F}p3}iI2znIa`Dp(R-er!vxlQx~Hv$E2*Bgj|$>I0O4G)f@Pb%8c_FDvvHk#f@+@2S9(L*dwl?e$( zb<Z2S_Ln)ptV2d1YAb>a~C*03EGyJUF2DXzqHH{1;=jq^4Sj+_Ah1 z52O*KMNTH0tU@#5=dg3gCFR9v?QeL(gMPi2!^i+?(20YG#HyLsh6YtbU z=``Q%&CFk!2MOFvgPP3enlimz+cv0>5Na=8_2@R$NIz}^O5OPhfhhl0@q7va7j$>U zVeN)t(6l3vdaKB^$PaTODxkEJ$T)%e@W76lGOHiaH1*K|>=O|8rIXl4AdueoZ-#;C z=&())>OYFl2tZf>$KiXiC47@VRTc0Yt7eJfE@xHqBfp`&QMP4%#G_@$;KPKVf<^>O zP%qEkmm@>8RZ4h^nRSH>7D$QpC4Oc>iEYrXvQT%hPs$%ViU);#ujp+qxfe@2IbXff zJri$qD(8`S?_q;R%CNxO;Zk;?Xa)D;9c{mT`uLSJ;QrBL7PjZ>lslKW{gT$vWihWV zZtVRVfS+Us;xkaWG!AWQY5l6;<#W2Iv`VD?d0z>PQ0u4dSw;~^H|ef7J8J1| zAZH+(3R$}fG2KBH*L{2NB-_nzdEl?c8hT!+zfIk>7WVPWZs_4xPD-Gvp4(wVP&jFK zPzy?(A_GGcgQC7u?lrEw$^98Db-r>TP@DlZFaEJRtDN^dEQ$2RI3{$(JG8VVV;OFO z3qcwPd!NJ7ZT#vA=+hZo_r^hFsPa(z+hlM1!NI{)_o=gV@%-!Tub2=+V~t<@>(MOw z|0Q9NcyQiP4{)`h{JHEd-U(gADcc|WEo}|l53Ywpyied@7DJ{#+MhN=+TTFOP~ObI z7-VUwhk7F!^d0@$eXMJB8fKO7_P$RkUcbx3{G-XQYtfU0%c=f?73I_62tABtwM&{3)irBh1i? zY6z(hN<_(6`dpx$U~N+AZGwB3)m6`Z+vu&36&~?G+C)Fv9+y-REnm6db-KOhXdo*a zZ8ruzwBtVs75k&evC%^u6jOL`>8VmFRG`p1m!LbjMLw(_f`}?rg`_h!3bAWtZ;(a9 z5m~=M*QDP`nP+7KtiAU7$lYH?Y6`$@DX$TCy`4b!oKrL}PO{uNaS9I?rW@tEz>;l% zba-mpcbMQ$6NlBr?2SO4aH)^o%x&*KEQlf80%~Kt+X(cwV1x0W|0Y|?A9i$~7HvRLNo#2HZ@1n!MajzeaDI$*)MjDeBGZjH{X8*t9sfF?68`?rPyZ@-fM9InMg23Nw6ycwQFItuOx13l2?lHXL znj@pahd~N&nDQ3E6OL!I9bll13q8pK)j9{(#m0ls@+Do64lFwPNNw z#w7Q0>ABN-!Q&T1Q6Qeu^&VQ2yOrnl8;Zl1oESJX>~2K8-FSu#v5rfUj^5yxVCkPM8l50aLAepO4grU?k)|G+F*`oH(g_ zvZ%9Wq5pRU&1a0a=wh37q!`MbgV;e0oe*@8Xb#Q#8?g_luoHXu4e!kKH+gOa7nCl( zd)KP{$3*LAg7B1u!~VflU|6BMWc6bJ1kUtfoaDeC*);u0rt*`Atk}gT2$Y~VsZZs9zgQpZ-1z==Q87q%&C2K<{AIq-o!5-vY%KF z=l1qr2Jha0`gxPRnZGj@f661C^J#|+TjrvnKo)ZAr)$eI<4oX%09_Y(cH}9kMupGV zpjnZgNp&L(_Ai*5*XIdUj^=(JugnK!pA%K(8uTO zs-gAYsi_PLEVe|UAMGx03Vb5tsI@6r4bG<)oba(NhF5cK57Aov@_Fz+B`EL-Y4(7T z**n&CUp@4VN~-hi;CpVx;?o<2kiI|MThtTW?SM8-jsB*|%SF2s`ArH4(%_SclCe`? zpfbz%*rKRU9j$ncG{!-egJpT*&SyJy9${s>B52r-;v4?!3+Tl>ewOs`Jk-$@B0;iu z5JAfN>=Q(?jT4t*aR|Bz>*P-a$b1*jD=-_>7?G)%3>*{P&Z~8p>uK=q&Aex0`*R1l zJwE4SK4d7tZR2pcQdvPVr*2Kjm?tLMVCefIP%F4Ff=7OvleFOw2Y&m#h`V2(4XDfF z#(vVgO|TG*c>UrNGUAg)L!T0!mte(|T!ZE$7w$LLh*GB+sof}S0esY|U4RZ?raOH< zjo~Q|kO$9ebo5?8sRA=mFTIB+*J$0S0s4GbavfZ*M_zq--}8?%KE<8;TOonMh_13w zx8jwks18yxNZR_x7f3T$1P6+V5moTT)DAt+_)CN`pUz%%=!r~&p$M1&Y!zrfIj9!)1!ILEpq_-#KvY?^5W-{p9UaS0axA)w60bT1l58Q_%QdA|{R=+bZ z`=?0Gsz7X&+Ff#k6l%PW%ZZ9SD~k;wM96Ex0M&j=#z?4eG>yVCwCYb3=(=7W{yHw1$NTbJSW z3R4|lZ;c-2>Obz9tgE*Wiwpr^3gmoB^CJ*n<9g<+<@lWUyW*YaFY^f`^&U(7_VZtoGT9X59izk~xVooTe{U}{ zlv70@i2RAp&E5DsBXT*(C`h*(s#A(^9&?&{{bw7H>#f@;)R|II(fsT{-7>~9NL)w( z3P049?%t>` zM7HiZ=3uynAfpzLkUW<8(_T5{r%vpq7xwyQpe!zgWb^-Ukc2(7ab>Hy@QBg8^#oQ6&d;p(e6&>I`v z$wmiJHAwVxE%dt*)^N%}vP_z{e$x8rTAN;c#8Jk56@?AwqQ5f!5cW5-%L@N$0enZH z=eqnppF&M_STyhN=&OCx9z-~NN!g;O>~3fmxv%s5Uh#wLQFq(IZBVA2t-L4ygICzb z28%cdrX`HcvOYD1GHR8ZS4LHPayE};H7wL#p>T1S0%_%nS>8-SGAI&}x!s{J3gPLi zs06If(&0_QVQ5)LJt)$9Wl1G|iQWF>P<0c6Ca61Ind4IeuZI1RPlm};db5K>!ISEL zHC~>3VDXPFEb=r)WovoEzr6vL`cOxP!KRd^d?B(_JhcvVruXhT&Wre*7HF%$#42U@ z&y?K}Q-_gP({SON0WrCp^#;nT1Wa;(U_-`q{xm1r(sF{iIX&5>txT~ELaqoCoFewG zXx7>GKG7(z6Q9Q=D(|Ffv=n)jOUvmY`?CQ?_;-V{)VSC8oN$Gw%orL@Tl`Jn!1*d^^$@a&S z4m4a)rvSgQGSQ~|ohmbv5qJ*s*{yohf+6!~{O*(!9y^!7^zh|n17V1l{B+V+&$=+E z2dboK&tonCIMYoM92Y_H@SaHI_c!+Eig{P*l^#Rp-CP$RxO@0~!@%iQh43}2UBfF5 z$DXGVsn$UYc>gkhVU;`O8z9_SBJ@wxJr@yN-vc7)IEL;sX}{`8ASI5WwCk%5(ulYpM; z=uXuBytqGrU^lE+*0XG=N0=Mrm8=MAD`&wJV z^;z#}K?$Q-Ztr?DZ#uST+xF&#?0dfMWsc_Ul$2w{x@TzRnqEC_P(N{l!Ov$Oa~psb z`z3of>gE3kjz|;xNBdral-H!CM9z8UWpKyF4jyq^5QLqF|0+c#RDPfMZnnNsIqOBK zRt{qwU8}U6=44@0NRg+b7l%5|7p6O3nH_ASq%rue58z4~qJ~}Rtn56R?-eDLyhQQ> z5`ChsLq86$Z8mTy6mowKu6V5Z81_sf>4UW+3b3=8SUxN$+I^ zLHSx)YAb)#^Wfh8O4geUHf}W6@i1*hybeI<8FV1A2DPuZ&obtpA-251^H;TUnkugy zhu)?vuv>O;a6d%d_Dgy!K&^N3n+K_xVFlfDzuzL=y-RDqha*dLwRV2@Rx;0K{GI(x zTM`s@&Fwd=n_sW#ybxdBG^uvLN2zfDl(N(6Hiz3*5ngRvv6~8TE`J31DIjZS3wdWGk(bDpVTuIH-57qH^mI;Z2hUjIS?$x0pPt$df7%jIYRr3 z@fE~7Jc#2)2f!hniAFoGP+}VHJAe8&v-vV6%?>;SXaIAUp*~F^`+{BaAYF{7PjsCf zshQt7@#WefJM}vYS z;f@jMxj+3KKqf856*dEEyrlR$^|8ZPb{S3}6RgdaQt)x}U{mb3>Pa|U# zFGKOeTWrJ;O#nYX{!e#FLcwXWbYO5Ua4dJzA30cob$cxaHTseXRaeg|gGuD}c!>Lm(-n__Si@Nvj6W)7jK zt>dM~FYB(t&aw~%eLR>e|O4#|n&xpI2un(>BIgIsCG$4PKPCy+6-UfC#tH(8K< zbXk6ZIcnv*z`Y>w=g)wK+b{RKvZ4fk__&E9dGR4xmeHy5T*w^mNZl0u6qP3kwC6jO zlBpW(@t0_SP8fAZ9=M+zP8KKC?dK=OZLI&KeOzUyVt23aJTe#4;_f@1lW1YTtz z0RDt^-g@upeQBVrsoL%eKm6=O+`?tg@SocM!A4P)SiexX!Y3Fe<#erwp4C+{Ya(Ox zeOuN55OE;MWE&cA~UV=ezWcnFq`|yB-?lNmh9XXq}X$p6W0W@cG(Bs;5giX{E z&r1jt-o05MP4z@<#G=-27llexKPgGwXe?R5e&9VJ9a`^+6VP5>k{JzA%FtCe#!!Y` zrlrvUu6j(1{z^Fh_vM05v)a9MD*L!7tdPsucaGhFcKMaB)qZ}n?YjI_^!W#LQRkuU{a+!M#U$6=R5pB(S12fqX3H`t?5jk9e5r z&wb3%A2D;?A%20y(DvZvmZ?-{Od7FyWGqc!J=RO@>0)jAebop7hG+*%BbzSWx@R`% zlF8hR++pwsY;XwB(cir*`e`fLsXfuDfAnEYYq_GG@txM5?ZjSd+=4eTube)NM=z0G zlHyTq?iy>5ww3X2Zw*w+NS~mVF}fIVLk<4h=J&D^QD~{k@ZQ5^Zwwa>6$44aOGzk~ zgM-ZDC`KR1=nr+J~#mCPnf)k0E^*(zE`ef%g z+)@4!a{4?-S3O7WoD;y+|bRjwp1D2UZ@|Ukz-$-@|NhD{&ktUzpoD3Ng<=3@1Ja~Vb*+z<4Kwwyv z$mRq4UYT`?!-A|bZZOjp3oBdd{O$?G7)ck?F_jEEZ;jXWj<<vVY^XcJc2RrluUwUET;33e{KJG{@LWhOp4d2Jidc+IpRCixF1oImfMvV)ly=0 zMfrs+l(~C0OArM$2~hq1ebaL`9Bm#kulI%1)>QP~uaeJEie$DcT*`eoHGe-KD~q{Z z@sqM2+#FbD_^J$FivZPEJ2h%Y{8D@>k~gn2o~sKFl#eA|O67iTKFzsCe)ScTg>0f< ztrk1c4_OMM?7I(YvY5wQmFc*%F-TZN^<3doa@1ElzcyIZ=&xUEz7i?n!0eTvyD!ig zc%J%&S?aeNe$tf(FTq*V`a7`Y9Hj0r&`iqx`sg(J^GJ;MVAoJ79?gQBH(bz%y=v^7 z6j5hWgE1E4HKyL{iS|mKxhambY|Ykp8I>^!I(roC_bUy(*2fLv9tiX-vUyCqyFJu) z5g<&FJ2#fNwRQVWPxtPfHFAIXQ0uOq_3*;`j;a}i0lwcII^o_qCfXCg+!fq}T6!aAA#6j$3}!rT~a>ITl! zFUulbTko~q>TU;qt4VdK5Lq1$OL`yOr_Pd}Js191r|bZW-1&StauX}dmR2pV^xEpB z_?{VDSYiI7k8`m;5;QvZw39QNQ|g{FJ!I1PSy8ZW3HW5 z^{Wltf8t`-xA$E6R5L@}@IE#BK$hywxVf!C)5>D_RJQcgor6jjJ&TauI0tsG>per$ znRE}{0g1P$EFVG~OY-z?x1IL(@Um0sxr{4wn_VA+<<`jWK8|>^sXl@coTs!i_v1tM z=urpWH#5`cQ|0#iWoQ!kp%F4NdzbihE#B`I29z10B=MU%+e7U6tlrAgE5=^A8Nj$x z|9-+shVzlM8~2G2Tl?iPO)KMhGW3>fWzVF)k?T})V7Cy8k!7L4eAq_rxm}x(x3ZDp z%iO6&6XqyctDN4}Jmnpq6IQ@}P{ymF5t(1m9Vm~>ll)BJho{RWNBohc-J|E7;V#-0 zPq-e8>&mvT-^>seTq-kqm-G2+G`EdSsnI!aVeMc0dUtipbgnE1qi^I_GyrnQ-(BPpqSy;v^O z`_{0Tra+X=F|9LhljNBcUOX_|6~J!Khv&fO5a&wM<#pkrru%VY0e?V?TZ4zG!AiMPv%Rz)SjjrsRgWK@aTEe zt%%X1HlcS~U$;|hlJ}pBKVMu_*VJyNZunxeeSYJ@m6uKi2Y7aNrx&Fse3tGU>`t#s z2X4u)&4(MfF8qmBlQGLpC&WR5XK*5FJ}78OCWveQqX8qSXbo4E=f_*?`ET6IA6Db@ zvIKlw*|oTBzHSnGiph2};e}+3E;*^Fvw3#*TH%IswLigfc45`nthyWIsn*5fUkx%! zPcPG4f6sjV&KtpcV{kml+Z=zG99VQTVdsFdZ@J=?@e4CI+@`$|@A_4N4q8K6Vw2B_q4}Anop^TBQUV+wU&w5}8}7 z#`A~v*BPT4YWJO7teQB=jNVRf{?OUr?3Vm;Sr$PN-3_*z@V2r7LY$a^KAV=$(GYpG zOD(899K^D`H#gOb3cUrfbL)G!L#wfj;Uk8_(wJ-IdwOfdO~3w69+H!$)NfZ5&x` z^S7r}?oUfEvOv&pR`AWn5c7pEi?S zvX=QA3ak7U$NyRodr)24fpG2@nN^FylwDc2-6HoIIf}UL2S*a%8$cC3Y@*-55OnsB znQ0Yzvt06Smy4<#W0roQE5ImaVCLXz_TC(hC8o8#gdFP9ep-y6Pv43vN+G>4)zW_Z z&RU8;!IjX`=nKpq!TqayYI9?L#P;sPVKENgF5HF8f*cWo{%oz>)i>d$?^KnLNm<4P zg@;R%Wug6Cai(rkGSDdamXH{6^I^U;6A5!0xKQW7$GSeRa|9Ir=(sp`p5GRI_uTBh zR)q=rTg*f~V)j7)O_!0VD@XflVe9}m_GzSbE&CekX<=iH>ba#>uc|~Go%b=r#9q^x znhGKuV@D?ieX|cT1>dSkJA@lMcpLuy#K-^oy0r`+Z}CMUo)z58{hguN5&7qfZ}hp) z@BmUhTWl79AnnXu$XSGn-a(?42?wWq;*D_V{$BRIpn{Tj0Dt+g8&Qp)d!nxjLcN&=hrLXrRrGURt}S4`8o! zNYtM6Xv)W=W)N!O$7FT?T=aj$!LAohG5z!=t2^LW3vzcej)g}UcHYt!uPr&!(=u#Y zP<%{+9Gwrby!FlZ*&H$33lAyJv&-}Td~Ep%?FpakGbRX9`w7g+!aF;o#CFSLO*km` zhK_Ho$@>)V#ZQdN7`lRHTFMw?e=My}UkQ4aO-!a;;lxF4`kZT8HA8+ot@p#F2-}v= zVGwsH#wpBW*WOwOd*V8lkHT6mRvn3~ZWZEc`1c-9No-zxLVD?vl+wu>7MvI9n>Wc` zm~!8KaEMKHT+JRxHlUA+d04F}RWhMewB252v=Ak56|G4L#i`qrb)QsJf|QK9fg0`5 zV9wEop1A5+qaT7Eg$4WguD$tnFEb~efbTi8*m`y6*v<$+je}x>aP=%x#Ur`XiNn5i zz1jtxD;rOqc3Vz(;M8bLeE?USZTmhQo{X8<9yZM+%XaN{3{@a#Jf3 zU|7^Z_*1IljDYZ$mVcN~<{Lb_P#dh|Aw4xo{^Y@^2%@4vLSj z*|gEL(lau#5K%#J$edCXe>M8?OIAEm3vnA)e)z>lE%`ga(%1)^v{04+dul4T_*0m8 zpw+)%a(=DQ5Mp%(eAq`k;`pf8Z}g3qAl--@v!pfe!KSAYwP<4b4)IUM`~#~Pfn}4% zUGhwM0+Vui%jmFB^;9^{*kuQM#S$d;>WF4WRUfmd)U?Ky;;Gb>hrKc6R@%zhu``3G zC-KX2vtKz-MR=_ba6{}($E+w1zr^&63~N4N0j(rY4|{f^*hjy~N}yV!pr`*q-y|GS zUV+5C-ptNQW(@fcP!q1=+eJt8>pmT-NDx=})=^kRHpPzFpcBGJvP^~BS^5a`Q$(;6 zsWG5bEsM3e-fA)ZaFrRR^!<0yF=m}|E(&TS00*FG>G;_R)dcIG>EVydPb@AxbJ%#k zim&zcr@BTDWDI47r!srlBAhDcwQd8brdXA|tCo-y7};GLLO}e%iuPVtQYSt0BUjSP zs?0?#-l7=10~r~0UG$g@g1XmzFJ0(~+%cMdqMyWnC5ltGAS>*-^J~Str<-o7JTUM; zmE-gMJxiXrmcLRvHB`J8<3h5mvc|DrUB(Z3I=aO4*AidH;&Za~{0YTUnR}xh^ib8G-lgT$>z2vyD(ZKM!d**9%l9X|R-n44YN!Ac~7@O6z zhwGAGZ#yO11`Q6+1pJ;u!E^z1h7bYKQ6DD zX7D}I&sJA|r_Y0d&ey>xV8&J$w<#=4XGPujp^Z^xPgE3joen&e;0SdphiJ zJzj?<_~a8Yw5=a1UNz}(Mi#fb;#xdF$)trK9a%zv3mV5b%Kz$r#%?snnDMxKsQAlG zK-Tt^__0D|$C^$)O0&PMhhs;7NZ3xR{vust!x(k{bk!BdDy?&b+OV;xidp7Lz7~ei z+EYp2dwQAJ6r-rg+MV9e5wh4>rz)$-Fq#oQ45q;G8V3Sge;x7q_u>lj za51Cl(1(t#7#N{RVxEGoIJ|e{v@W{htgqQ$(?J~Vy9?=pb$3!OGq5N zDu7eZil19lPCl=*zaCNy-5AvgS{02+7V&;B{?dG6uh7#-Z|m|ZJ{e8Ibb9ptPHB1~ z8OqjrVaqpf7a17b$alSIKg>*1jgUQQTc?oZu@zzu{FJ}0R&;iZxrOOJcU{m{Dil;G z87Or#h<-c{diXHl5)zqs<)VWpFHWPQB6&VJ@H%`lKvr7ra)aTMpN)%@RYH=nuMK_i z2Yv6@E_&GneOC3^2orHdsiQFQm%R|l*>4!GHbv>KLqc7}kADT*xBx}UR0mt7S-=gr zXHd*nnqla5NWY}^^C$_qrKknu%m^RXl=4pKa=h`QJa^1(U4D~o>zD!M^OuF^)y+y@ z-}`74tGLRSmM8wJS1wMQFt?Lu)v~^rIOASOFu%hz9V*z~V3YUhC%V@tlMf&9&QC@( zU`Z>Qm^UegUCr+NLC?%Yy}ret^Pd-BsB6krRTa)#w}f!XP{we-_Bsmcj}bB^ez<;z;D99mumoSxPs5{loO*RcgBwTDqG%&!qLp?Yb5P?8&!mKVsAzbtQh%H}yis%mWj|iw*Ko6jiU=!)6NQ zo4?caSVPdZb!kgG@H*bTdB5wn{<*z^3eUCS62xq;pIViZ=ZGqq2Yc_csXHiYTw*N< zHvc6?u&?lsFq5D!DCnTS(>SK?mfRz_*zdz93553c_CXkxuH8J|BCEdyh{-H=;8%JL@pEQQ&eu>`#nNU3U?_4*Uqo+XI>nsowB?5;T4 z0Nv;Pz&&M#fuAs|X?gM68E7xQK_Sx=w5q=Ght&H!eWTVoLwqR0v)x}Mu%+q@R#(7d zEQ3Rvf7NtFb1Ap2-*o4a3ecqk*<>ff4+#{;6X%|-5F{GRT;JXXp;YXrzBius3iYidDmyHZBxJ-@TcYyO%zD9Zp%B$#>F#1mV zMnQPNDS6|5D3)TDdn6xW-LdUYh1|1r28RHJPp!Q+VgGgcnZ-fI3sU82By$}azNel_ zO`jOBZ!Iopc-OaWpzrwYRo9l#fj9@VH@n+pAaxXR*skV){VM*l_vFvN5hf2#icTBp z{epuHtOgtuqf(TTl5APhuPoKJ#V>L$KA*EyucONzR_zp)Y=tH7F6UU%TM7deG+diz z3rrfUIXKJ8^z=*mK7q-5gfD4c6xYO06{OU?bi5rB2a#qelvxoCv#@|vnXy;-)%-+LNHR2I^K4>CCvBfpKO{Zp;1od&|qT81YO`R7} z7M5Fid4QJPPEHQyOwn_&%Otv9IeJFgq1cQj9-R3f?$G=9z;A_of!e%-<3VLGV9F}9 zK|-;(aLAFNd&Tm?T*fW*j9d_K4`Zai%*9z87}*r2Bn&@Ne93vP+gx5yZtZs7tYp4)NXTsdK1=xK3&0Nj zfXE*j8GKJP1xk>jk|(3R^%Ntwn4;!^dD+_M0lAYdvHx-8@Vb!V;fw&s_Dx=HzD*JH z0=-jE0efsIku`xIq-JhrI#((IL9TznXhMlT%Gc17SnLGMl>>9l-agaJeTCGzPkEVwmUY;DrBvms zy`M&wf9?TF#D9{Shg%BMLt0fAYKn;RjRtsLrJ;+v*BfTCxydM;#&6?--n1TNJ#M*p{)*+OAx%AI% zOlzefo|T>cxOwZa>%r&!A*e1@p69ci+Lk4rx{Zr@OqJ|u-+&nGwQuah0u|YPPyBhv zn*_J}IJP3hyS*s=0RMA{qb=g4JLntxAMbmoCFT>5Lp~#c8xASnI+Kn&_v-Yqch$)xo zuRB2UVP_T(0^9=cPzL_gvGpp%jv+TU;CK6fh=8Doa&R|YDf2q91fKC>x=fB0E&a*1 zw)+t&PtVwZQAWI%2haXEReXHUzSV8Pj;TgHEds;iwM! z3JRwWp*S^hZ_1(jpNB^uuSXnNL(NVroX-Z&2GED*L$Te~Hn~1QATZtXyzvMhU0gzY zX^Phy3^+pps|i%Xu@u2ANT$8vL%T*S*G^_?@X?yBFN5x`14a z*(tJPqUlCgWmdn2@BvW$0u{=aJb-l(nC6-0<`KT1JqJYXmm)rSwKy09`IcL>Q30h@ zEVnnVzwFbI>bO;>OpTS}%_x`Ti^(?(Bm_@f;7@-~2VCI%=%fn-da?Y?&y-e)E{$tM z8QUjPxjq)3GYJn|{5C4SxMRI&qapDeoArkPrUO;#tw(S6|4JISjVD(kjQ`n_AH=oX zC>x;h%!+@>HR{I1(jxNYd3vE&)CHyoVqxcBcyGNX6Vf5qoJy(|Zu5gwco5K{Jv7-Am5pWF*+Yoy7|=yU{+flDlBytNS% zoTEtehFPg!ab@?awrk3rzV#yoYWSwn0i%y|Gqhy2W*QU=?Sq(!N|r>a)%k{8YxBcl z@997q1x@S0vjndAr@u(6rH}eo_R1*o?P(3w?$i3{m)enmgv22d%?HPsXuNJV3W*f} zWse_|9}kJEonm+Mf`j%#uQ|d(H=0Btkc2e6-tbOBlRi)F9rd5@{M9sWn;hS$2V~qx zFH4j1oy&8~f@w*R>o=gzM?MADc7F|A??byqcKU0^zC=PcMYDR8KXN#kBzI&gs$@!? zs3Xx6*6v2!iUV)2@?VlkAd4v&OoEVijM;k~D~>im*Wi zutluHJvvun{lU{2vO{!qWTly}{MeD!3U){I+q2?(Sj4S5Nc^klgF{xVT3a0w$~Ik$ z+UQ`5rL&k|U0BG^94aw(E-G5l>|3=vRedTxZ2~2Ole$s#iQQSFq zqXPDkrBz@T2TQR2Z!74FgRx7tAa043zs)tkxB-gUC#M4ik3SEl`U{+lT`uXnBIm)r z!YHR!N`J8du2!Q-_d2$!?o9i4EqBYBMV*r{rcjU>mhU8d3!DL>E8rSbk**~4u#!s= zJMZN-$m^PIMjJp5eqy(b0hew=GlA}U6o8zah&i9094^kN0*u2?V?=`a#_Gu{4xgBe zx!7zk0{s&DsCU*D}S7H{r=a+oMMC+%$C-wsB44)nsK(^0bbY;JPEHOBu zB26p$bvx*T&*`zFb`fa;6)|ocO_MWUSX~_8=}((}cpv7UH@vW}GKDkAuzq?nHs`yl zZuFKS9re1;GCyZ{?fxt}tf%4;)V^tpw~pW#9zS0ENp3z{e|*^$NB^KRgn5Jvy1o`% zmTKkw;kh$vL+#Gg@!Qca!`k{&X4ap#g1Dif>J3yfwueZMwB%lvuyeOGM zklk~309|z_fm&=u@XT8Z^cO(LBdRh!YK(j}f>)w090jBm<&fuJ$>yD7Kc zUcN8Y2olp&p~o87B0T6~lyw5ShQAb_YD+_IB6)vDZkEG#4oVYyZ|C{05i;)fo+4>j zZcL`QdMubIXn#q^iw9aGAJwnz@QVRt((NzD6l7jcH7HoxbTEF8q7U*PPpNm^{Fr{R znKuSRv-nL}c~g#-@xb)5TEiD)m_cOIpIzD~iB86W=8QWWr6I;$Xcu)1aIvQ!tT!I4 z{VT_RvO;i9VZ0-Q?LsPQWD`60HA>#HCb0XQJ7Y3)AhBa-Q^%bDZ_RPTkd&0Af!R|^-(@5W_E46m;8r}fJ z%_X<06^o!wEsPti>6f<+QrnkUjLkCmtcc^IWBhw6SgkhQG9}Kx1+rd~ffQ~{FoREM z-~6W7*pOSJNKfYFrP#xYMFO1px}>rIDt=z9gvdP7M&(P~lMxu(svFwsjkZ53r} zW2s9YUz-L-Hu#gMC1#nPTH^m7qo9A(7wSNtuNmGq)3T<2R|g?f8LycSjNIYVER}fl zxiG$v=Xu2xnH)2ORD5{i^CFj)3(OomZEu>KX1_ltNZ!6V;P!5es7()tSxo$}{9hQ# z1)O`9?YMJ`iAPas?&H^Yo%q#&D;$4TlzfDOMW(;MySUuF^29pxN;n;(H6wNya?d^S zjXsyj=0^AGvUos9pMUka3oHboVA5f)ART^cH_t9e`SJc#ZO5HH!39>L9NaZiQNe>R zN}gjs!H5!lpABI)W3oM-D0>jOhIMuXx3)c3<9D75ffuszSk%+B;iT(+SX|5}COjyv_EnqlkYdtWGE--vcHoJFu#1J+)+H>p7D40Xf8U z@8vojU_-G#V+v3Qd)HZn^RpFRuCU1rY8n5T$jD8W9WJ`;vvqzI-_*l%>k|Ca*-C&7 zs1%n!D=NASppW(d12VL{-nIVux;5^A&hlQVx4Q0aH`N=r6%|3ks)hW{vEgPF-wC%B z>&u9n#$hdL9&45b@DV=n=4<{jn<{=)OJg28Fw}}c?5^EuoOgheix0v_1Ca^E1yP3Z z5Br?V+ket;l4+hjRi){DeV|%u^DlY;riG?e(4^PX{aE#F1AXJJ&Zs7L=x~rEd5(uP&IxkEwRv}b6eVGVJ|;(MB#I@w zsJ_f+IE5>_J{QUCPrhC29m0C@4TP2)(6asSqw{vdT-R~3pv$16Pfy0-Dm;u-l7oE#D=77dtyd@2ALaAF7Fu_JmoOI@ViD1hNre8zVxYQQVH89%dpQufV5 zx+@_7i~aJu>aFjAaO>4lRwp9tA~4DQ`e~zY3B9LJjl2+Dcz|(!X+XPT+*X5bN@Uc= z2{Z8nX#Z)<+D-TLkdi4gn6@jxP~PDgRF?5J`Pz@9KW15NPrn!KrI2<6(?e>stZi#4 zC&HN?1q@YZD#tFQt@K!`uxv)GW_gl-wbMqB*bdNJZPkT^=fl$a-9mD z*hND!%&yqy&?d{6 zHOT@6tyZA-!z@1C>VUTw z1d{3e`1kEQ-vSs-=h8ekEI$6Wr2*5A9uQ zoz9_AxP4pG#@jDN+4Hf^rQddxGS03(9o5+UfC9kj26e@Icm_bECWyCC&P=POLtQN% z$S;4XtR(b~5!``qGy;`o+jm=__%NLo_Wu`OW;TB4!^3;cmHhT6S|&~iq@BMimgj{3ZRH$aVuMAFYUcI{Oshha77AJ$fh3Pi87>|4972ZgBDSh16h zT^cdK=KwPP3Os5+x$;Lw9^v9IOOXrTC}#mwb$;I^xmQd92fz#6USqJGiy(`ii134Y z_-@v!gaFi4-RO4tEUcsNn~FPMj&n3cb*@!odD@YHK{^A|grJzb(k$G`v0*yUA`3p(Pezbk%v&<%vX_ERE{+TZ z@vs~atAH~2WEG4GKE`5^y~%Cx9#y@$P%EE|$rsB~itJ=r`%)$=$}>Ylz`^045oQKJ zpYEd(9WeZ!pqs8fJ-(rqaqGil9`DK8A5-husDHjm#zyL!Kr3L}K&t#e#<~OmJ(&1M z(&iAk9TlT4NyW5HHJu(_hpjX%z~MP_?HuEc`<&6eZhivU3%|RhCnCto!}inuJMU2` z>Z9DMHXRd`*qp2+kMwQ%HEN36cbSM%<44gCsupQLhRNud*JlFUGk-!Hl(SKBz_LeF zMXJ~Jak%`h{_!=r>~jg#Y^dpo3$T@{1LN;Oc`pYai*}aA7}av}aZ7ZUlv&Nq6Z-vJ zyQ?uoQnsB_Gu3mzSrR;B*{`j(PTEE#x3E+d`>6ZQ{rPq5;Sr9F=O==Mq^va-5eP|d zRLIW9Iq_03*Nn@-$}z8zCIZH3Xa=l$qIqF;EMNDB?VOI#axQ%@z+1#3^n3fr zcSq1>RO!rMVxSAAviVOY-Q@Up3@N0?wd*}FM^P$?o<`3P`#IeQp>YXY0KhG|r zYsv#wuzI7{M^j$Ga!aI-)5eq~p2?HMg>d>lEIkqcGBR0&Rl|Jy1{vUbWt}}sprqth zvHO`2y!hlSeVQoSE|0VWj(tJ>BO8D`SaV1Ci2ZA0s1SSAOdWVUlkZ*xB&ggpXYKZh zL8O6R_U``3x3yP7^%D`@+28?gmJgvzv6QwH40oYBi2^0Lg(ZzK&>wj;)gT4HsVl?N zE;x72-N-vJc=7z}bS^TV=7M4LgTQ1q1#PQ;%ll|rdq5fmNaXw#KbnIJ48SV(Qb~i5 zqZwtAYL&RX7H$Mfv_wQ!W@;sfAe~m|sO(DLu6M_fe0nuR-gKVa9RC}U`T-?X@<#k+ zf%6f%*T-8a%pTmz_`u>HNw*Hi-moy0;2=eUI2eHZ&gAa1zzEj3*rcM57Ns@jFi+{TPWacW!1H~C$5dy>fNMF!$bLXv`;#0cMW z)s{I=s+!hUd%;6kmTCyg{}sF%x6Fz_cUDHQmH9vV5H`|d@hzV_PY?Vw-|u&Z7@wc- zu_=txs%Q38)E1u#IHfa?EEy1=7w+r%qqJwp+V|wZ3wSl+ z8K)`^h+^V_f)JB3R;;=9)V4m9jZoG3p;6<3*f$+Sk8^jfU?rIXnB5pk)P(f89n`9Y zcTMA;kX_3*r ze>FdDT82~l(SxM8W~Xd;$oV@QndI+GGHLsb`fgMNg>|Hh zl@X=Rp9DnZtYhv!5_N+-q7hhOPvv}|{?;1>SeOsZU~(QJA$mIgid`!{f!NW0&SnL{ z8W{uUa^dTLvR+gN5_qbL*aV11?|z4SIsAXsqexf(A7emWBqTpJ!B&%NK5n#1eIGhD zUX6NS9hVn?A(wlc{w>&<;*oGp>5&4}3kY{yi}I*g(=~VNV?N!PDv0}Kf?$W?*j;^Iaj2fl^h^+7V~@mjfxh&t%@Cf zNuK!6M`cK{OU9L1R-0UYM{j;Lv~MNrU!@KusT{*q)uVTX(!?g@u+$jGS@x}Bd~&?~ zwiozIS^y0);41vnaRQs3blM&*>D!i!xPBij;o5s;YjQgG3ZN&->x^*|g)bDDZ)zJD zWJm86Lv(sginT0q%;<_^$a@kWgaoB_HKeZ5Z*^A#<7F$HzG8UCqjMINGPW#4E<>A%vm*9?`c!f?BavNW`mYt;}6_mAMAs(DI)fqMfG-1$gS5zwARJ zt?%?xufg&^9dh|AebeTykDL4fw-oHG>CQ&Q|9@IucAJi?_;b{}C%cr)4tlR1fW+r+ z#Pw{CP0T)Q=GpTP@BmJKrnSMXiE=8hAUb#yLBfuI8+g4AiSJIp+Sld4^Ro^SsWioL zTG{}@8?`-36y2cq$@b`I&eQm15rbgx%9t-2MRjX0+Y{gii^5JP4qoev+Tz)pBV>ny z*89%L*@tD_6?(*fk=m!TiDb%_O}r04_Mfw`H9?m=Wg8OAaFZ$M-rJ4K;&EUzH87bA zbZ)uEccEH39DKQbWy;=-5Z~r8&W8tsi)king4s#vz5jW-=Qde?4rWI9;rhP>^L@q1x`8-tth|5#58eW#AZ_j+w;*(i2Z_Z_IysP_eA8OB73Egn|?DIE@#<7shs~fJCCF}ITU@2?oftV?0G{IiUpw9tVb2#=wsKS0PA9&K z2byF?oMpwD)?qO23Y*^Hgy4R7M$D?jQBO565-U*qJo8eZlDX2e^@!{U3(9g`sOGU( zhBVbdriVJJ8}rXipbMkOS}Rmio*zoesQxd`Xnq$PpGv9#&&uaM!O`aLHFjihpi<0GF3)c8+@q>=uIxR0@QKdSApI>Sy}M+zK!ZH_&-uE1wRQ z-L>q_!G}lJ%RQ#X1E_X=ys-@(Hz6R!qmv%Tx0i(?aF+8UOE=o84%j}f9<JWd9$oM^1Ecw5SoT<6cj$a}Vzjo^#&P9YK~4O-lx z$D)$i7bAIPW!hTM=eb_<;&1TS{^}#g?`C!#ZATn_e633>3C6R4FtJjvWxwPAj+c&t7k>l%%{tcn&-M7N%CCk;k!iQM9i}^WbYiri+?)rVLu767!EJFa zH->95kEA_Jx$57;BbR_seJie=bN)mSb*~zcDE}F5^h*!-hv8MxeKUDnat6*G-BzHG z2@PRq?4`N_afcOU(98!*A{s^Qb`OoIf%u=~eT5?QuU7vJL%ADAy8!j@tEJs?H|xM# zgI|9{tL@3faevzzsLLC1(CBvVjf?|sCSLUxaD$+pcc0&Ic0z7VY zFY_3W9Z*FetjqR_#7ps)H^67t8Tnr{gn)AemINM_O4BAWY$j?vI6?5}swxgoAG}H* zXxI;ahK^^;JKBnGj{%wI*Tm>&p$n1z?CED*Wm+YuOfXUmlKfKK2sID?5|p$8_%1-0`x ze6!9jzlp0??N9s`utM5{2)od7c_}U)IL)g_heummG0+Uho?p*F5V3N*%jX&voOTp5 zu<=ukh0p+qJ|!i!*$7kdyyd|2agdeq;(>Ev{0t^}pibUCGa5V%Syg{$dTjk-okm{* zu;APGeyi$y`Mu~6vP@58IZgl6oJ1%`p$P&uiNE%ohT$j*@oYHvUV=SB+W4Fg3hXC| z3iE*pW<2e0j+~Go@uWf<$!cb++63c*iRy?tmE*d&_!~MX7(5o2;zZtx zS^PBt-i1oUF>y?C41B8>w1Xm%+<=sy6T6M`T(jHKqX^d=6Z>98EkVDFW$Y$rXxsva z@8aS~to8CpK>7mGR?%7<0R9xj*+!i;FXb1+JV#zNM@g^I{SmT6o$~*dm|*Mxg5@yy zrsJec?CYRunca3)1<=6K?osA`&70Y~z|!lxjGr#Ff0ygI^C-{LA{tXSD|ldK?|O+a zMvZjyioB(pXPZdjkZd7E4+$qFL`oMJK3=#+=LdQ|U)9)JK*!*YG z`uI?8u;gd_}-d?!<5aUL_qqc2^0o(JEwQnMM?@vN?1_<2LLhQ(YqeG$%WaOc< z1YH!wp!KNYdz=X>gKl?OV=C%B=9I=&6~v+p1jL4b1W*0}U>YW-T4QfnymutC4$!GG zGIr9W48ri=|9UkBfAS~;?ZJ2r(!n^5Y8hz5pTJuR`!)pSI+ST&cu)FMm^ge{AcxQbLXsC? z!={SZA{-QX<)HL(Y#dm+D#4(t-s?(+E}s8wQO5X{11RU9!K7Uo;n9(00Lc%_&^^UB%J_&;UE^Jo zl5m+5qzH#iyFY*M(+MAqXkbtdqeEi805jSB^7L&7y!3gCcz@1hDfo*i9i)lCxpADm zMWtwK+o885(55#SZ-|g1v8Y9D0(m5*UT4Mea2^(B(Zvl@c1SzY_Qf7;1zrwo6!is( zx7bi+0Shfv_oRAE+2q)o`s)hDZ=sW26J-Ht?hDzT3A}8q6JSG!9n2Nnp4vV)6Vagb zxa6puLpxvuZyN5@4fe#WZwX6vsYwqQ@OIbcBD&e27*1RCpB3^jQrr;0%3HpH?Fd3i z{Qi3~0@xNvVc8X%M2g}^v%Y2M`n4;j`)Sc~gX7B#N z6LiY6yM6sl?;ZU^+e8@`XyYYSpyY&aSt~i&Fv(1CE?L6Zo z3fhFFPLxW?wV|s2x{C1kCiqT1aH7ETu@`T-uXRG-%iPFjhG%U;4j-&8z>!XAZ-4lw zi<_FGO1Jbgnl|sV)fst2G*CWRcBvi_iL)^S@=OvudN7Ou+_-5;G+$q)$Gl75jbJeDcf8Ad!;Y(!Xnk_qh$Qe@DTq3 zU~VqWPjV2lKStKGFdeHsa#HBaY3FJ6vxG~S!j$ex{MzjDgp==B-kbt|czrmOT*zz_ z*uV&6iBO1Xt&~!l18gdOv(nvaDTaGK=||e$)IOJKza%ujvG+WlQK6Sg1h}|-k`7WM@;18`fgvHGhh)$nvF+se zk8>)bjc&5}%HjLa;&^4Bi9$Nuq;<8+h*N@H=wVZ!^ViilA=8(|9)1S-pZ3b`1LZqH zCc}sucFT_!?tJg**aH|IgLHH*i}uO(3A#@gidUH#yy=@k_+pol(~t;JA$WG<@3||; zNXZ!GIm08>!|YT0lzhRb-|M8a0JzN?aK8sS6;WkY3d}a?UVPivXOJUIEe)?v*4hPG zdu9k|yMzkOl$Uk*9xlK7dW89WcoEmFnApR%S{ZKL;Uh5ZCXudLR)GRi$!tKzFdM?= zNOtXJ{a=FtHZh!?2Ng~spGLjs7iUac(Wv>i+Wx<#NR!eY?L0j;q<8{^+9#h|JiXb!ac zkurcxxR^f_Yye-1mEt%Ss+Mr&LXJM zoW(P&wvU*vfc|*{kVmEgC3Gt6=1ZLjGfTZYoDi3~{J#~))A&GJ9IzoX&gB)u4miur z;9Yc7ypY~LN7;{LgAJ3G!2HU+GOFr>X@T9K3VX%M{PyNDL=Qi^^b^oUqY8FcWSC3V zr){TIk@(cFm`P#Fbg(RPP=~H~S${ff5|Y8+9m?mu5(0hpbkyW`rcIyJf-n>!=Z}F( z*>qIo#c^rgM0Pak>_OAEjALoOpid=>wm!C2V^C08$n3>45kSUOjCw!wT(7wpeL4oM zOWeywS4pq0qOXaG|D++coMzrq>t8xSd%%4Gp(x7YgFaB7*6y}E=8xz8x(3N2otAD} zAi;A0%*>W2cdk^w1nLpJ<7s`2E4Y3l@O74v*NG-KC1 zVlfHYs3^olp8chkak(N5!qi-Or&}LxKH2OLV08k?H;k;00D7pj7CL3Fi470GIw7`H zeR~zw83g^covWg>(&tGnA44OqD)i%y@YNx9(|qh2S^254)1l#wp+Wlq=o9+()5IT9 zhqwEcd7u-KpnaSE9?r6|P7~BYRwOfRd5{^73n=9K1Rt(n(XKKJKx7Wl!<{)Wz5r>A zIGZ6_3lE3iz0O|@mFh1{0t#}u|Hn2CmXcR4Ny7K!K$n}qkN{CK!pCigG|quqUx-FQMF!2j%3Ui* z0%B6C4#iau-eC({F-QcEuN1+L#L&x9JNe{c1HB>a=E4WB2BCAc&bfsNP@3&g6+pxN z<%DUu5(6ycp=*ETg=@3h*xm&Q9;o>*a!^;$E;J)9k+k^uJH|FSmHRknEyih|pyHdh zj*R9?_7V2F?<47fD4dn@dJ%2H3VZAa8LJDt> zauifo;1x!;8#HeFEG#fDMq9l9z~D;UGa7OZ7a?tX?C&Dc>P%6Vkq5Yftxc4=<}n_= zof+8qE?`x?Hvi##UuT|SYR%FTG*He8#=}Bj7!TtfmMFMiZlKrg;To;tcLza>-=FCi z%RMf!z-FLH<#tIEU?2$8a|;L3YuPX_W@EkoF_N$x27nindE|3ykka?p0$6b|fESED zF*?FvYy%28oT~pgP+U&W=1SPhc>Fb#M*Y6qUU3g3)PUaz&v1V63N%-hgDVz%Y9xS* zmOf&*UngUccej66eGX6`Xbb0Sq zEIk;geJ>y*Y<@ohEn+&NqWYM6^$`%|1+eTPZw$6)7gPjHL++))_i*8k>?`>|L6)x;QUZ0a< zhK^2k@U7_2di76l_4WwclaiLPs$76?%Ct(nA?ElG9=GDp+qb-u{5s=%8|H0}M&Pg?16b8yD8Mk35}MMUvu|F~w~VI`{#1_NsF0Tz=)CfK%OR{@ z$k*yzH*3-T#FA7|H)@}cc-7Lkv?Xo13*1(m0j)gIqeDR{DUrb1#hsOa`bchsj~)mkPTTV@BUv85vil54mpj#bvEV!El~`~B$#UN< zLB@8YuOs7kqln9cB}l~1!L;p#NY4o?2Y&DL7U!PPd){k+9c~;?1oPqHRed12lNxV`a29$w==gKPpIO z^;wY+nql3Vz(2_I*=;!(-O|hgKbXpIry+5^W;U_xRAARd$r=t$vwXi@^5wA2W6{)S z!Vr=&VYU-YCb)h!{_Y0c!{6|WTdnwrL*<`s3h{-+UAZr|WqcAYj&IXLEn(z)nLupBd!^27Z9Od6{RF?;WCMPkW=$a@DH4k`<&3z~TIZ zEZyb?Z$fc*B4;8nPn|wwgLQSmMu{2_O}~-!E{AJ?BfxKxEDD*LW43t>W#tb%Hz^o6 z=ck}i*wFhK>O0?`(j$w$vZC6w=M}+iSn~)1t)W`~$Er?6Fsj(LQtREyZ$joM#m1+o zL1I>eu;hGz{^h-HCRXetjYe;EIXeqaITFfD!v|!Z31~8mN8_IUuoewoEil4&xD-Z}&Gbx>(?gXof^}w$}}i33To|cGJd_8W`vBQCU{(-g5FrZYOT5_|PSB=SGn9 zy_#YZVT!G0y?YP9mGYdLkD~?ryuB2#)Xtg4BYFNchKiFD^|0C5tp_qG&Qy>DBCn$< zgI|O^h><^%F5B9K#VI@SeOLMtUh_m*RS0PQ5IWcQ(9vHKDQx#&JiX9zHpGdVZf!Yi z|1%hav12gwU_FRy6V@~c!lT0!ebcK|^_?&1poB{1J20Z;QnM!}(#i8d{^J>)i zlY?Tg-hake%sJ71$JemytIiWglG&~i?b;?N4MD@MPIdB@aoSmjbKUwo2HReD#bkQW zxuh;Wi)A1%0%bB+kBrKj-yfcAWgv$wOtYf>mv{mWSI^(HQ^df%ax%k1R=*2X5wQxE zi8ZtlQeOFGL{i<*T+-C|6UKwVL5I;-zhM?aB;J=Z;L?LhX|oYz(WWK4grDC1=LNv7 zV%8f2QzNU-G@fMDkK+WNsR#_JgX!zfiu9DBtTLO#1-EiR(Px2Q?n)7RdihOh6|(6= zIb0@km^xW#6wR3!j%;uqGB=17MTduV2yO2y%SPa435=v>T=s4-8|`I@U;9)^&G2}* z4`mdVPmQ$v`9Jl&0M!f9gA&i_{Zj8t`r(Z`YclU~PJLrjKkkBxyY+vQs<_sWE@Mwp zH9E03hZ6#y7D$|~fVrmRzy1bX1Nj}VQm=O_EpxnTs6EeS`UyI0-8xBgc@mK&*y_v7 zwW%uL`k9b*`sR{x)W_Qy8k#9eVaEb)UZR=-gxl#JpM(R|Zo;)+UViIOh59au5RnYPFTy81FakI%R@u;8MN zwDH_}-~lS#TNtzfBbeBo$v%Nwf}}k7XsFhF^Dz`P8wj(Z$gwb^)Wl6&(<7i|4}uRa zD`s&P;NL|4L*uw*4a_RN1Cw~zR4pyQt}<_#lCPRNBestdihsmp@EKd$xCvh8Xp7j# zm8@#K7f z!;3uvvR@Zhb8|o!bK_KBdMDy^4YqGTXGz30Ao%e&HtjQu(cg;?tLva+oHnd(X3sUG zI+X83S1N(iS3%+={!U7d9JEweev`k{w2Vx!V)!y8{HN?z9k)yLe|CPX4Y1lte8(K_ z77Pa`In(5TvE-8|KP!xUX6TFTz+=#G5i}Oz6_p7)3-pbG^CvnnDFhdFb%8r?DrM~x zR9&!=yvag56=6b@xBB$`SyNVJm}}%mQLQQ-YOm`-q-x$jfuqqUBHaFBzzivPB-2~& z2J93wTM)Qkk;UbJPkd2q;%r6*JN$&xv*#3t=OmoJQ^R4F!1aM+Zlm{)Gy5yEihs3x zLY>)%7x>T;BAUM!BV1j2LH4ssx{Kw1k%{38w&}jUHdx8}Ks8l$XT76GQ!V{tZ?W9E z<0M=aW?QD7lprPth~6x&Lh*)<%+2zKHhq`v#E>?!mmF|2Va*}6#tgH_h>!pLZtiV4 zTDBcLADc45}^Yl@qY3G#bJyn}G3Ksc%0Pf?<*fY0p*UJqP9W zPRCXk7)QZf@dM%VB@IDaUSE5re2Q*3{L~qi<`K;RdzgT=B?Qj&L9hqMqr+aVstbW_uv~PW3>42q+k2UR zPU#~flqca1)Jt!Yxds%QZWoLs9&d)R33Y(h)f3H40F3KYHYN15D#%CDP@MbeXpadB z3zHN-voaq0u8+dc8lrO=(a0=C|B7)1m<|$uWLsEx(S--(X!6BN;D;GrOcTN&>lquZ zajqR(aJd|dBV!HY;?Pf~@rPF;PgaK4OrH8)g-J+2AyL3RP2lQMeGFV4^y$PPeBYt% zjh6A(_W5%Z{}ydE^DCpo}GiNKk|2OzWMDq*bdk~_Ni zSs@=gfo~!*^t4jk9cIG8lvCi~3YgfEE|92DkgaCddX*_1n9tK>03b2cx)_uX?o?e|J`Ec= zLfFnhAjo#M|I$}z#&jK{K-WY;A_Bo~4(YK>+r#E_IXz|LophFOPJae6B~W|ROMItid>nEr z?csWb-rRYW^i*8Fx5p%vonZRfh2Z`}qQDew)$gg&`GnWXTLK<3OKADslKVgz){rN2 zt_Q1Hj+iGNx7e5GI`u~!t2n)Gz)Y-7*=TNT zZ`o^n3!Ojn@ScE2-5m9scU8O2hUk?_+cx$gz8pNxa5hRtMhPsFf!-Fn1J(ilWK@8 z;Akf+!CdjG^k+f+XT+)a%ND}ANu94=x>c2$&AS-R|1JeUv9PuBL~<^kK4ZY5P$!>r<#xgRZ7XOc5q_LPG@u%ufp8SWiagdrFz##&a5e!8xwl zKd(}|5)?=wrz~v!6%{&u!QDdLyTtRh3@S1#+ln@2f5WTPjJ*tC9zXB}aPo9#Y zWtg%uRcv4zodg3ZVS+?>yO~W7dagB}zqB-(-~9D5we0^R>b=9U{@?fU+m=m;vMDm7 zWbd8Ij>sla8R51vvW43UQL-}2uI!aPvS&8gviJO5cdyU;`}?Egs18Rx$K!Ec=XqY| z^>`}v-|6t|wzRM`x0m-{qxqB7Ep;T1#Lg%@>33CA^Mv0{+#sR7DJH8foUP8B%6>U0 z%tB+F?d!LTov0<(^$$9oCoQBMCS{ zeU9hUU-ER74ZKfyFjY}T#%XrPwk`7k$CK^n7;VQl_7^^2oqx38M%84P?ETIapRgiA z{lOcignB zAJcqDd>g#9WF$3Nx}(+Xy+|8M?KqI$WWW1z_#vE2PWnCId-)UDx5B6aZ4B$+0q9Ig zo_OFq`jv2bH{P;2Bg5R(l=%f+?~MS?=lb$(t?dh|L@|WT6D@{t>U|Nstw+ClYk$b# z$jLQWz&RC;`a#ysQ<7I3ny-`y_j)}-Vw+~3wp>Kfzu`^q^BTo-Wa!`7EJ5t!zm-iY zD?^C3PRVJznTy!m2~9rNwDjk9MlbpRRj=@$*J~A@oIVrQ)QXz6XX5p8%3=)E)_rI0 z;=teT@;7>DhRfvAotuqwTW}*n59wb1!4sjrk#-W(+OBt4rETFvi!k}=dvL||-o-*tp=9K$)*$cf?L&Xs5io9>U1 z^fJLAa*DIRzsOp)q;zB#8{ZwUReEg}{fTw*N^F3r%a$E+?SXf$z&SF^{OcNiX46^n zf|jmw?)@+ znHR}&d;_|<6ck&g=Ogy*2?8yg1-}=w;zM(LM{9nzi~ug3JF39IQWc+(1(u3>Mr2P? zkzYFhcH*02b9Kho<9$qUq^IBUn8>TgGW3a=onE8wuQ_?w>;wkW4G1#hsOwbVv6?&a zd?3~zzp$pyt8*N5_wx6+~>la0VnY#J65sxLfWSGD&E`!8g zNuT>W)e2V9NPTbcaTfxB`EpRLMtW4o(-LM@E`x4zC3(x#e8bawW*=*VvNaL(RNV%& z=js`L@n5e4`ZAtB4qi#O!&8!|AYgHPYValZdb_u`ay%P89{SC3eTYy;a=$SdP zHI{f?iF=}4z{uD(oQ+g5@7ES-AVzXXFa3yexKF#C)38VaR z8ivf?-WI-oV#B9RB3=b-(#@q{l#f3 zx%Js!TOrs^KfCQJsrqAL-kp0gT@FoNabOUJHAM#JG*t+Rfg+=n6mhB(YlW}$Q| zL+-oH=>h9=+W5Gx4=dEE$4g^nz3F_NV~ljP%p&2#3sfCjJ$#J1X6!eQ;OGhQt+^Bb zG;sT<$m)X_g=>4#Z^sPQg z!S&J&^5q?~TKC);M461@3d8$ff2?w67>uF3wB!5kzn#-*2ss7&b1+TynkJlS2R8@k>VDo zd&wLBS$^ub;VG^FW9bmjnu*z;$*~O*TC>)2`Rc)iSEd&&jB^e2Vux?4NMIfO{m`Ff zsnqlG{P*{dH%!SntM0~zL)o=<66J9V2|1C%k^{iLZB8+A_WV`2Rd*|DI)3B}9*bAq z9)PNl_6l#!o|4O|+o2o616v`L;gBH3<;X+&0}Fjh`s|#g(yi-ApXg_RH48z4)SC;c ztM6(QK1QxBImOTR%rwN7y>H+zTR>mc{-Fnv*u{c@o#5Jww61UEPqM${$sFYy#+~|T z;{s&4Ycw0L)G(?vUi|leCbYBxJJo8n;&HH!#dHD@=!+-$q}u0PZhmM`F-BK zHX~K%oT>{yDN6s`E{49IF9J^S`MiRv7(ALxEP7V-DX8OOS|AvI2@zW>7d?@DF7~Kf z$eeA+)srwq1p~gsdvC*=@cUR)Y4zuRZx?e}UkV)cx=M2q+dB{`fYV@I*BA^-4}(AV z-61w+ltKko@IBtVgCCl_U_bi|mRbdXmcH*ePZJ z{iBA7-og4fEX$BV`b(ZH_k1{XY{C9lQZW$nRrxomqWs*I4oK9g3K1){as-rxZKAN( z%MN<}Xft}kcjRt)|4QV&ID^fnp|4F@CvSN2cUHl5D=7)y44-vkV62Ep+!4CPGXW1tdc>90l=8eyNfl7`Rald?`FG@{KU3|+DbG{PUhMF!tY-D3&6SHVxdXvl5 zZmAS$^ZIG#8h+fs!mFE});bYTPA!%23E&pO>FL4^@Q7pK286SmTTwk$P*{)7tSE9R zhg-tv{|~@6U^&1EJ{mJkr#Q^ z>#1L+HOBqHa=5xXqDjGT!(-_K+-H8p4>>Th+yq|x+VS|#`8?8?!^z9@R9H;xy2)1m z6aBL=MAQ=LlwI=3;X_6uc$(71m;Nnn1Bb%(-BeIlIe0SvO;X zg-7|7$x4;-x@ITeOUz1HDy0~y=R7~l-_4hbUkiAoA(HKqk9kWOiC;Z}v63wJeeImB zX#KQJ%|eNZUb|L-j+z0L@u778(78b|Ogg>a+V#_|cj@GUyghQ(6QRNIb!2|ab=r1kvf_YB2qR^y@N!o-X-Y;;0*+fir{1^s z9!U?Hzj}MkUXF2fXg9m_KX_HCd~i!pzZq}50GnD2-_<GAjMHgHL4&GZJx?4?(oG?zbHJ3 zk$ZGTWd7#=lcuIm9h zfS!|!5(!%&DujQXD0iUNBIA#B({;9IBEcs z;QzsksS2mDK9#Aux7cx!y{+)5$JPJC>(tu3)v1iGbNDMbf z1rbfYY3h7?emIKL(cXH9>ru96y%Dfvq;kAM%SzMjJmH;^*_2+pk{ywFb!BVlcnS(= zyy;d9V!G?3AAV5bWlpxmR{v*M-5e-U~^^6+^wW0t-Q9P2gT9yWiST58$gBa3W&C_ILJKVy5+^pGRuN@81> zLTLHDpJ`V~#o(2dIan{B(%Sd{u>?lhiZU%b#NhV9mF*A_i4V{_Wuvm+%{$75_7xl3 z=i&b4ip|s%jsd(1uen0Mp79kM8@SKV!=`O2SQ*KHG8#V|gVtya-IwAcSOqFJa0sZB z1f=##@Oo|g@+rT7m)WFNA+M~A4-yNo{4Gs<94cPwcZ2M^wj^u8?)96=cvcs(NNj=N zogJ)VR_yWdD)CWrZtKDEa`;o@9=ZaMQJ&9lVV{aTKK&*cTd`Zw&|A8Y-hQrzaV5Ky zAXvt;aY|HXEv(};BS~bUnO)6eB=)vn%bdnB_11JSK5WU6f|lHo*zp1N2pz{o+5S7D z8otq))_Mss4YTZqroVp%z^z~c?JFuQR4<>RA8;9@nul$u0?2>hZ=k2DRkDVU5gQfz zOO*{*{j4unc|Y=t1+4ukCv2n|YiM%)%#)z;iQ?)lWhZf^sl-Um^jPQh3vp7D2=tTF zt^Xq??d&y29`z6e^HFNJ%1izIur+)`_~fhJ*}6wXSJ|GDlq}RrUy6{KlW8*$q6aKE z>Ht6qE|YIEaUbTVa1meQhldpXVtFCY{&OZQI~k7uM^T69q2|FDYWG0gq@ z4tQCyPiG5bJpG$fbzLpL=m*yR=xD+gnVtGD&xr8R8r+gT_fm@F==aaI9t#0|HvQB-t{MVn$`#CtFFtwl@Wod^BuRkl1&BWjh9lmc%m*&l#5 zz~q+Xzr^SLO=)!M@}F9C;UsqladWA3{1dkrE^RBTY0D?>s9ye4ue;K>!fSAROs6iz zfTXww6!|N;l*!iZVC>?OvI2$>R8Tde(u)thQr+KLQ3&bQ^KMykk?f887Y5Iy52Z)R zgMA+LLS}XyTbMK*X%=eEa3KYy^UjqZcVR7 zHCowZ{~|3cEMaS4L`TJjpVDvGPqC{AdPBe>uWMrHhL~)f_(f3Z;TgAHA^Pgbb$50o zzR-vXUyVt3ef()460|)WCWTaw!p!ax470+ZBc}{FZr#P{DeHY<%pR>UKTF}9|7%{e zf`iR$_iqy{^VoBr$`^|yUpw1mD#v)l-d;bfkYtZ2R~TivC@QEyi?3VG=a{499Ej~7 z@l1=Oi-$7R3JLR?y=C8=kc=^waJf4tqlo%D$3yI=x#}QeeUeG4FToK-G4y_H*%F?z zNPjM(6}uL{2HR+Qzte$KYx_?+=_PuL=iPn)Yf}j|ho;xP?P;UzodXuM%htNT#&jK7lo{z6HuE=T1R5X>{fAB^t) zS%VP8ypo`h;DGt|djs{F=keARy4t-9uV&it`fhogO0*+AqumM#2NK~yZ`~U-@(&2= zK*{=p%JJgE4bkGU343w7ukV*Z?57`QiuJ0bo{>x z-(cCol$2}OVyT3vJ4=4r!m!CSxG;+^*%veiwQRm9#dKyyufpw1YPV@1uX-|UzWWT0 zgy4Jrnn+24*IT20$UO?hk*+ueT?LkS&7xUhYkG^n_Gh(Ixv45-C-`fXJnhq0uLB7q zUh=PL-|jr)+ixORSN^;naBzW&bSw_KK*XfmKHW;}$#=6*Dr+2%l6nWbHyfGLOSfq} z*SzcoL!|ebF`q@|e#rxS?tU0tT1I}R;;3xYe}su4a#29_O14^CW_X$RAN+UJRP}Qj ztH0#o2!}w$tjlVWk%8dSYV4ns3;ipgDGdJp{wAQeip?WQz&dE1+9GKYh}In8QF?^l z!u90jKwa;F;cIz2vkk@k%lnCE-WtA41~8BOG{?0;tfE~HbN=nC)cGnBqYd75C<{V+ zmyhxE7(NG%9Z;OzQPbyYZNNX$B+6cS;R0iW>4-wPrjcbjR{H?5ZA@=j~m zRDmtca9GuY`s&wWjH37R?eu46J#&AuzsYDagirlm`GGl}Qpqhla-PM3_rd!U%yPR7 zTlv9sf263TL$bSt&xgdJ91uSs1}*8tNd|?&YGK0b`iHw_eGi0YVMC}JYS@nE_W;uX zeS+R0_7|<>9Pi`5tR38XJXSDNu*mV`asuo$qnzIzC^k*a*Jc#6iV+~Ym4h=3gxeo~ zJ1Bkm)P9)$e_4Q%Mg=K}S-dBFVzNE7Zoj07R;Eq}f5KIoA3eB;Z@R8IlyZ)QZ(%+} z5>fm~w$g`cf{<^SNLid?AhJZ)SRl^aHKEirCt)q4i;rlwGR8nuV(#LMlKec=#a5|qqVpeKuCE4Og7n4MV3c}2lg#d~WHRo|PjmHZpx5gpg&UiP zt&C~4;eNb%jc)tzzzV5~uXQ?9S10OwgVV!AMNZEnhMCX<%9nr19hJ%NxAJzIFo1v1 z|M%b4F(a10N38$JCD@B99VY(2bKAKp*l$CNByvsK;yzIz2_uPzo}RQ)99a6ogMGf2 zm9A;jJil^Y?p-@CCf?>wmy-h>ArqHCSzzp&Sen^_Duij2;aYceoH zwQOZ(DsU7GeSH11muL3F&J7@2Uc6&+JO(vSwwHtfTU>e_wNuOr=X5vwPO!X+l;rlI z&?vay`kH3vQ3323_xiallfjQ7ume;ABt8Nj94@JO%YNv%F7`I{-rbjL-x>r2L>GS2 zM2u`qg=I$-T{)lFYe>6gzd6m&eI7p+4!#55spp(2`^RL)vdyZ2@5oYvQ4+(I@y528 zt%za)OTIYl7?3<=^ZOQ1TW)q5SH|J67GtAos0#8k0}B#Df}PM@CC@JqXrV8?Ggwjx zL~yyx@VOeXbWsY&XDq@L`2kjHsbBMXd$l_IHyogi<=&!n_%Wz!%Cz=mx*(1=U`!Ww z@oAkxn&aQU+&+n60F6I?$|6k2wWDEUSnzlTHo?bg!Cn}D_$z&27lB|wV+I==wrc4M zDhF*YAI5r*5AAk9#9Tp1gtp2JuxIJR{$-Lne4A3de+BOg{O2#mE@)qU@Bd$oG^@1+>aDnof2Y<51%F>6@NIq%I-Hw6104|q|hKO>q z7vkf&q$^N12bExD5s~imz$UdAH?+^d>a<)xS}Seg6gFvo2XTNkf;DaDPKu@iEp8fh zNy5K5#4wNAbg^|z-<1wRCxgk(~Hp9}Y`zoyM; zP5*3=22IX;sv-~4E;Ik|DrO~%BH8zPLoXoWt<7@0vlwV8LbZhWKKde9?S7vlw)ZHP zuhdOnJ>LdqG0dAU&J(}4wzdP0I!>ubqx-HC3(w64T7%iX^;6S4UYK?1o2Z|ImS^jv z!l-wcxm$%=;LOy6!N0R50-GR`zNt9QcJi%m?mi21LV-?>o%W$;mqV%r73lwuaV&bO zj<^5w<)iglnbIcPp*{5Eiu?C98VfR)p!J9&|FVVO&-WNKSXl6ut&VkRPCBIG=-;$z zqT6w|;hf#WAOTCIRLMg3yF8!6H?J7?*?a?V z;tTG=s77|gr2d7$9l?PPRBsOa4SBj9hKW1-#S7*tF&XC&zy@?V3KFLz`)8#|`^q;p z&qf4A*z;vStodmL*UL>8Wh<(rv+QMy^$^{3pr%Waspb|T2aLViAHGePe- zktht`3!}d?(6h*NgyOh$PK~gw*?S~Gx;V)iu*B&TNKAAk){kv^QmjG``hHavE{uvz zEkF@H!KC}&wg4L#xDRX#TAx|o^NF*~b=*Nqs6;KVHKgGdYtKhViC|j^;snr6N6rWi=ohX;f@MD z-ajdWP=+rqN|0XM*Cy#JVDwQt^DOLoWWxOWVL90UdAE>0eYGN*nqZ`2o6b}Xm?%D{ z=8mLKRRz8+Pv^)m>6vUeiZxA7u6D<~*X;BC1eRHjG+R%{<8SbyBf%786{H#?^{Th+ zF*2En3EKr4@mbssw6(EnKNXZkOQj_UAyXDi{B8DCp)EEiBd1+=_uGGU18T~vfnR|i zAFMcV_F&kQjP5>+b(7PkhHJzQBYBsT|3eXp-T(I5uZL2YnO(*{83sSLv987Be#wym z1%ZYa=27NcvOM|x;bB=}FUe&Wq0@Dxl(x$RggF-rl%{IC0$ z6B$Vc!2YOF!x{1DVEef@0wdiXgIcD|)mR%05NX`EZ^N6obUff4uq_d$f{##?kw#Il zJ`v<^fhVQM-e4%j%{kp&4wU1t`ax2(M&!XTxIyT=Z_`$Ag##n&}uN_kuo2D+X_G{ypoV(*Gzg?oHCV^3$= zhyW|$BR-p+;RE8k1$w-2{Xh*BWsBti2#O;O@~LhgnZS*3d*P-QP{FWtRowz5r62Y( z-n5)IqquYweYDk+Z7aDCI=+5OJ%v)|3O$bQR5|;o;*c*>H-5 zD8H4ab^yM&IlX3#y>?^)x@IB3oIamXR2%8f07K53X>U#bmCB6{iLN@rOPA{VcnmCC z%A{k9g5*DULz9Db-^O##7maEzhAlY(^-G&VHUqBF>(#p54N~N>evO5V_~V)Gf?_G1 z(pIWM39W!^?`xm(EaVwAj8#*oth_Y)@GB51;^Rh*yiMxcXIsgLuDAFYuMM#0pNUcA z{hCJ`AusIZu>}~D{rZ~CM>I98L(G=I7eue4jPRDoqeHq5m!Gsb-?D?Hu3(dY^Q%&V z%KUv-(g0f2>|U=Ke?Qzg=`+qX#-6d1+UkTaOp)C!3JP8#ASJ16+i2-xj14xx%kg## z4%6^B_Fuy<9DU)QCG?j%HlT3{kKQpvXPLnQW57E%jQ@uDk>VuEn?nmpNs$5>9?%QG z!vA5iIfEeLU{!qXDgJLf0#_+IeLcNa7LH5KYhMb038=g_OuceN4$h~&&woP*bRuA6 z(~q;{nX~i8`z`Huy(a5SuybAAD!SH4H8b=&j0@9w-(LNrH^XA+8xi`9ge8;b~89 z9sc<9{OU&=g>_}7v!x&FA@-(dTJ+Eh9{!K_qTU#ll00BBp5Az#3AK>m`&g0F z&LeWAGvYdiBr7_MTeNj|5weo8j9`OqTnv-)?{tavtjArXg>~9ZU7QYXGk((Qe%;iQ zJ)#FliV4p?^VnTGK7J0e3wlb&qo?xCjd~Sh5902|ul~$##5&n`VElkk zG#&_tQufud`|2@WaqPx#=b6QBXnCqWG`EG|%RDb13jPIPcF>5(sz`e>sv;qoiWD2# zIG_GGWo2QR4rT$xW%4Mh3Fis>k6RxbT6l;jye2?YiQic}p>Yep`g~uRK}Ru_ls^CY zX>p;o6C3J5{~c+i_|Abbui%Bib?^7IJoPiulVjI4Z@(YyzHDbnKI;3L)h}9I&u=v zT`C=#=Vj$4;-87H`GZsZ%-V)ppN<}$Vto`ja0iyP-}XfGk{{!H+Fhwx@b3ck&w|MQCj{4?T5slmK;RfRk>&Gm2`HYb;EcoCh5d1XMd`k z3&5$&?EYb+X;gII>*vb{%|9nMI7j(|l?$7_lj<2GJTEPds=A1wV@sC_V#=VJON18=E00J!j_CC2z#@-VuaKrObU-EoR+s5fo`m??0iUKRj9s}^Era` zI@Qkb*Ll2n>bWh~LYpo+z*Q=$Yg{&AeJtcbPb_ZutRnCYaH;?ERY5q1f*x+D<)a|q zd#oiV0Vp)bbyX z@B*qo;G=$7=&FVfejPf>yo166ku;&texgFPb$v~0;Kq$exEODBOE8d%)t!Cr>wZiv z72Z3Td~FaiN?1Qw%L`ByV|w_^#VOevL}-V!P9zVDArA{dX0{px!Z11}%BJQK4E7@$ zy)K^q$0{gV4*HunBvPXIx8pOD9g_aZqd;p1qI_o}e8ym_A633kygBz2KQxXQK6%|1 z^f?#s!}XlhE<|`m3-LvP&MAS&)z0)%EJR4bBHfg;>B~6p;De4KA^xRHGJ39uRNe|r z%`D8LZ=fJTA#pkuWb1DK`0*j3(`h+Vv*Gfr)Sn2o`#(Wc1tfooCRUAiI(9E9QKcU6 z0GSJfwn+^uud_{K+w*r5m}ClhT+A#hC2%{12bBb1d=(w4B#EB>%K22wfpEI(@-C!Z zX7!2KS9&Vp16fE~U&I$kUbvrLuzX(;pk&=bZ5EAQg3e?Ey;{7Ff9ss}Wvor=389 zzd3DXSEN;Sv`N3N&FfA&$Wrk<2PYEfOMTx|ZL!nA&J!vqxZTzJ?(r8!0fb3YP9MQX z2o7e;Mcc91CpF|SWmDMUn6=#LwFS$f5C))UXLr>xHL2GM{SQk8eVeZ^ibI|BR$*Rvh`-zCnY8} zN$kq$JCc)^fAx*?7UnPmbtafKrl*UEfAA)7dQ$ltn%H`m$?QI_2uR)rX|vrha74IS zS$i5gVMRUMH~w|)lab6^mHy1cAT53%l}rmH`A+KjA_Xi7?1n%1HaoPp;LH`U%#?^J*ljCam2k0`&fHHu6^T-2>1p#L|w$1U^t&2OjCPqa1KP z{2KZq)L*TRYZ4Lccih^;^33AfLHA1N#@Zh|wYQs8l2gy0$pgnn2pZKQ>I?#E(W{YY zXdQVlV(L;v>XKcp*2N9{3K~SK2XkQe6c!ft^u^FujGefZBTG+vEuKG6m%-`x`{Umz6pVvYXe%6%4Bo9UG)? z<`i29gVT%+AAqTxNexMcuorW7pEWm#0ZF0bJUv)_5*ZbHU5o^0#LBxs{nb}Z*rkwm zcq?o}^@k6?mauLk+$%biFx4SlX-Dp+-{_0GuprG~n~!*HhH?=kOO~EzA57>ejvYu{ zK;W93R=WRA)h&-7#I?&XWkMyJ+~6yEPEqQW^x%I_!$t%4J35zlz<6=Gh;eV| zHmd%5F~fuYLIL)cjfbhT&a2q$1V-?14vzO!Z&6CUnMA)nsdggzkK}g%!`&wmhGPIZ zs)l=lq1f<^(0-lxyS~R@l>qD6>!9BFWcU3>IIX#QVvWeZWUxT$@7wIaUs;m0N3dR! z687)dqi(6ckHpLtV6}H+)hUIivW^-1LUy6S_2mZp`%-se!ep5jE)GSIYC`LiHQqkN z>3wOss7F}uiI|u+D$x4{XeIgzK3XNqX}{o>`nIhFO?FU7&9Da>8A%ysY`tU(l;{-| zTR3(cfBeX$fM6N;Vr-G0X4$9d{k~WSPW`%c6%E7;p%3ddPuDsoyF<4;G>KqquTF#| zl^;4=0Gp{&zk;aISa`Y#5x9dk#Hbb1DPLKXmGj@5 z+m-VsRk6=5irEZXr6d`IdSf)l*si@$Yh4K2j||@A6@uh{+A>O>AwsFG_Z|Rl?c{E8 zaQgW_G{NS=wZH9CAuVYQ-P9l2+OzP+AhsKj-r-)ayZhvuDc0okXN|`bjE86S;hgbh zi0O2xGZlbI3i$z4S-O&i+S^hRSfT*|0ft6@+FRS3K{)dmpRTP=c6WGseq_88N=8B9 z3n7I5)rUQ3)xU7XWbC|qb7j)=BO8UZyJ7;KzH!9_U)Ilq{(AH@KxD|;bBRD3-P^f_ zzlzhry~|zm9ugt%zsZ41JDm&hUaf8!NXx}?NUrnowPd>rBBK*WeFaz1OlmGHRA@8o z4%2HUf#Q;=sMx&wS18&_A(sti<4W9EjKVmHBO&2I!2=>Jh+lI2-fNjLM$W^Z$d=%!orD}pOT<1(2X2>N1li)eAn^|%T)Z?gCH zCGgB4Ay*2!gpiPME5x1$p&jEz-Z`j5V5kr*%H zJ+prkn50PI8DByZ||^giqGlB?3J5f**zH3dlBqQLzJXeO-k{&oqpi4XW`kbH<+ z!UyVG4+$4Y{6#`Q!}a3X7nE9&`>NF@MIP#+C7=_ojQye zE6vTDkiu2Ia_zuJv+$Xd=mYrHYn6x8-a}>DTdf5~_%lH80j>g)UPz5ur+%LQXdru) zg<{6ygU=Xr$riEvG|!kiJaJqZnvlpRM4%M1lu;F^BebJ951a(nBZ( zb^%(}O8>XRdFCzj%YBef9}Tnq*(K)w2S1A{GUGdBtKEysVhI)dpm3RiN%d|cdP-g0 zV*y*yC|!|1Zs1k%8TTXl+?CxaPtUG!kU9AFEbsvIDo;cCFf;$Ikw^K3IUM1SJGmZw zIc1YhYQ)kPcHmott5eTRP0D1JV4?UKmj4Tf2Y$ ziG}O-Y5I@gl}JLVCQ?v3wn|sf6&ez+-l}5mAlV%^5$i^^BT zvYe-z^*NfLO> zg-a^3Ab9SfYO0}gNN$&FfYC`wrljx%V;67JqmazT9aPRc`WXtGomY?7m^Ag6LX&+; zr=50#W7jF7dLeZ~bjFZ(++j>4_JN}(Y$fjQ+wAJ(_)t`(%wfWLHj*B4Vw62&vsr2% z7C+X*a=GIYbd@n<;`J9%t|(>kRG7yRmBvV^(mvltez~WNMDx;H%sZx|8kjf0AnGcq zt=~A`)~)i~Ohz*vAXa+2Y|Pqg=V-xl`dzj{%2A#!tt?>^b;yHN_cm4q`fVm`ReRC@ z%L4oxkBdKC+c4ZHXJ&DOsOau!#VgCY3j42QxK7q1ncW#|QCG-hzLsDvWx12w zSALX^|6AL*pixru3jrZ%OV!}>$kJ;0oS&r}fIaBW0{}D{eYYohWfUJ#|Lf-{Qm5M- zEFN~=msf0AlE@^HF6`VA$DHOxu-2p0`!!z@twXcp-kK>(nq34Sb+GXf?@=m#eC!lC{$q5gl@}kG)k+S!dj`Du{)hyQk0LT zbN;ebViZd#Z9$YupW>rGb!$A?vIWQ8TWZ}(0s>a^bwPgQQ6dK#OT?!AGvGfj2 zWUz0qTb>*eo8VSbt${9jRTnxJ8L!A8=1xTl^MBx$Z0OA0-oTi_+5$q>UgblG`-_

P7K6y{w`QN_k;!52#^DsIU0`{?~*zd0YTBE_n*Fcv}D?gbW5_&?%UB;(MMs6 zV@$^*W-ahiKRr-;D4X7YQ>^(qa7Rd^1_Kk8QT;w)f0`g|7^TrUbHJJqo4e6eF{}Kb z3DQ}_-k(P2He?LLuO`oP*DhbAC7Z=xZWDHYn=GkXD+Hff*1dFJp?=}x5Ju^ucz!jf zHu7CcVZk2Fq$E<^CJ-tWQ1k3X{PQ%;C%QO2?2y|!zrFW^F|AxC{DTl!eog4EX$$^6 z2%)sAx}@f3;ttNw2VRlKL(e$&PLh8&r!&WJRT?G1XKd~L{`oUIqq3Im=}SFVVW{x3 z8~lqie|g5a*UN@LBKE?-Vdw~P`4sL%zdM6n$!^J1eT_ji!yQX zTM={+Gw0p?mKROkKmlLm;CG%hL(Dzhb>g>=xUL$69F?F+2$O&aGGGOV5^~4Y>bfE2 ztlq7%?mnkE{#NvDsNA^E2bH$7E`r zF4;e*t)65-m^Acmyo;o_mZt=2(A&9bxVb5!+2;*b!Qo=f`UA$AnH#q?_Z}PoXT8PC zW4OWJjitxIgo?GqbS0>JhInns)5VY2zElgRl3|fOS}}8*BA79-#(ih^fC^fq;PQFeI&nC zLdEU1ueeovfmy>7IEjr6+>gbbCi9U(aQM66aUo90COe*ZmBUydgkF}_*n1$7HG|hF zk1?>0GzW_T`$fE&G4y`tAdtAZcL!q=d9%A}hs3(u*gz))Vrsw$`vl1L;zr*lqm95T zwX}tx^QtW_0|UMr&t=j*8aw1@`|*WT=&I}ybz81HwG;;2R2XD77MCnmN<8yg)c@MV z(X%eK^1_3}<)(l57PjW{@UPmPliU*4>pD5S|HvCq$qFz8UKkZUL56X48napq^a_v% zVJDa&E-r11go$CCGE6>feyDVZ3`{xxlUr2<=C(eiOJ0A{3f!J_Uq;W4ly~Jq77%9D zAofa0`TGg4WoWV_#QtTj#JuKcgL)+?n=5M>M~++9+Sk~w zvxvI?QSLUTGy%#-ZkNA0J>j5oK89s-!^d><9OXqbNvKk!LqHh%8DSAiuu|x;lS{P zLLZBIwFkz{uf5`Z+zoAj=Oo@NVv0SQ^RHCb4xIlI$9Z3lavv~6?-?&|QsigOySOfM zij()Qvn-19Hf8c(lDxlsAVK(y@HU7qqFx|^V3b)Sf zwZYP^GjcM-u2Ssc@+7Zbbw^{e9}3d}wl;YEOJPh#P(JAbRN3Cc`AkNo)c;0QGStBL z)zHv~p+^qNbLp$ot$OmUnCT_|~>AX3MlTjb1 zv7{HeM}KeS%;u%z5py+OjuNfl^1-ZoSX#&78dGi>Lc$z7gzc+9TeM2Z9zQBgByEq;+7t{7nZ@F3kHru#V zZh^OejLOj#~Y*!Mc}_&X?cec~S4nTEhv zM&{!uFJOcN&T06p1*&hkl>KIroJK_H&;^Xj6Oe)1X0!gpl28=#Oxa&@AV-G}N+6Tl zix&bdTa%qucur4nwSgpoTP^HaV~8Z$?yng#*Sk`ukq#sx#cv}vn-Hu9XMI5G!{-I0 z_KDMjY03wAT~n3r?C}~1xFKiRZkln*@6FT2gzthaY^-e`T+Rx4Ei3*G%e?4_Y7B z+)d*e{3OCWe-f3y%lEJ-6|tOyAXuXDBSc&lxDE5S{~kSeW$08Su=2U4)7M@Kjm+}6 zoP9ecv6a$<3-f8c%P?`+s{QW}hNxF3m-pQNrPdT~T2sD!F4SL3#ycm?F=H9oPP9^St=PyGPZhcX6Ko8dYxx;*Y9ielqA&Qn58-3lQ`Oj&{ z9=F1EVvNbCE|~svR3}^cLnbMulY01Zg?PR000%!gv6rj*!AT125kf^8WTS>(hxA-! zCXz$t>0~JpjH_;i?S?6cThM_QwvEijg9R8jaxjX9_vsmqn;q7U<@X;CxB(YGstm!* z%+G9o=lu}W^NSp^3YK{lga_}5)y9g*E$g4y5~M{PfypoGk9rglL$PrBTRXS*f>04gP_&VsOXiA2I7)` zx;?mc7JP+V5u3BGZnqZstCe7OvUACy8n5$L$K|MoTM$A?gUL_0iR&Pm<3Lfzf&iGY zHHqoJL(Jv)O5NbP)16F8wX-!DFR5tz#7y*9MJ5j#uNt8|YAAiev6mw?PJd2h`#bDn zXve)3P*js|J!06sJ@%XH@NK7QaM8TjMN#6Gvfu%D5`mUFhwh+b?E4a zwT&Z&cXR=SyH3egITy9wBW@y!posp7heHDDJ zCP@8fcwONB&MkKop&o05RQ@l4rV|KiiN_w1)iSEkOyRsiJ8w!`9e`L0)KII04rW(B z`JtTAjy(SNu`|yyQ~mC8jn})5qx`;!c2id0zdT01vn?XLQ&xDWB7bF76~l=OQ5&?T z^v`_*JBWA(PKLQ7RA{0mKbr&J^>N*(c8|!$Qs-tOh$8CWW!OAroP?p^%VbT$+`gKH zHed@rn|(Qg?lp7t#r_?C17QZ37&iuyE>lb74rPB)p%dWNP=iLM{4`S8)5P-wJh zv{)lFaqU`Ln79Saw=#LuvIa*%>^YQHMhg`<=AzcXmrOj)^%;9M2orIwn50Rdrh z=lMD6A#;vf$C?P7Dh2i0oDm!Xgqx$YS`#Zvg|E#7nkRz=GT_m92SX#+qIUx4D)>#% zyrZUUL0|AK2uc7+GKE8bSY#K11yu$`b;U#r>+--4iYH+dk~{xBQCLUFHYz1x&kkx4*Qv76j^lR7fE^F`t$7MRBz zCm20Vde|8zXl`S4jHP$iJaLWVULs)cS?G;ka6R3N*9CFz&23!Y)`bimxNS|}{;57_ zjJ;`P?Lc&{&6z*K>KVk0zhmnADd^e?`1X&niG9#1muNr6n^N09ZV%=^1rI|7@bN)^*x<*!vi+BMl%qOEFIf!Qe!+fN~?jX z7dZChy~H`gsKzPvKNnHfTWt`dtfRZ7CT%gq@ssDnWgRkZ->X@2{hL_}qF=>ML)JJd zs-76bYyqvb#a0O*E=EWzw^RP}0Ug%#y5;8>lszK2F3@HR4n_?*`kOxbmE?CfUKK7` zKqR6CJ{fWkI;H;xaD++M&)HU$gIw&Qr+~PVLnh}&cs(>R(_AO?pC#sB#Du)-g;x&~ zH(Kj1dAYzew;)`0N}G}m!!EZss^pMiK4#?a+#8@l3{C5t7TKWRfBywq#J9P9t~rUH zi!8YkzX;SJvttx%O-k_)@Z?xv_JT);vR9RCr`qnK!>p@R2_sK7kpQP<6Q*0-rVvb2_ch( z4MPN8)9O&7J5d@pdf{66_`(y{7KuWqnq`MnQX%IoW zQ9weayGtYvNOuW`?v(oGyxuoH{=C2IbLN@Zv-jF-t(oV+b_X4Udl6vE0J*9PKgi1v z092i$V>#c`uMZohbs>DfjfWn1>w}xx=23oJ`cTxty1lj+X><8N*pO}iPxUM3Ef6&; z`y)I!>BP=|dGpF~@ClJfmKHto5Y3Vy_YX?~XzX9RfX;EdWu%GA? ze^nE~B8}-yEO_w{elt*>u_zlu+z}j&6-3rK(>_dl1sL(drR-4fG6Qk1^}y(t0hJ0s z@LQ0W*$1__N@V4Wx+~ zwFsZjV2%^S{uJp8=I7pKx)z9v?Xmy!~lf_L>NnI(j%cL zHXw7!5{uDG%?o}IIou~W(+Aw#v0-kY=loU_J9GWmH$2cpR;>Ien!cPUsw^M}mO5fl z;j54ETf;y<&vGxKAia9253xtd-C0NnUd6&LkI7gkgC_=Yp$p;71&H8Ke!t9ZBGFFmBNAJ_caUysv_oRAT-0 z(TPGU!BD0ueluv|!I{JMQ^PyBoU7Mn^ae;!yix{7W3Zetawa2z?GzcOj*vH z1g}@=x8{&?dt$~9-QM=(jJ=4b?FEnye$|sbP?rCdB*8LtF9Ca%aKS}Ct?xdcPdvv< zb?{MBM;gL2t+}l+a3h5gxa#GD<21OV=%Fa5Je1%;vkVCaJLaEb%lg4I_`Iyq2} z*X)7TU^)u?{y5B06fnnrQ*85O)Su^Hcr%+$WeEe&9M=O?P69Ove&jlN!(v8_p&hsu zhXQ7LK&XNf*`V%53!h(bY_v?A%A_2z<+GcZu*nNh!20 z{>sn?HjF6{x+>R@QH-akqjQ7Q^)x>H4inG>tP%XMDu3R?7ng#!G7|jqAqx}PFLkfk`BD#O6Lr{?2 zf=ZgPh#_|dH>lt=Co34cQ@D*S=k=oRA;1K{BXnF_gG-srrsi@ACoMq=J)QJK#k;NzeU);N`jwZ12BM6E*@1JSY-a? zA2G#j#Xz}ft^NvpNH5z&`U$h{z|}I2_}(V;Zk|EC^v_m+ppMdmkqMZtGC_?npkLuM zN?HLS8kB-&z+=W5`$tW>-U!V349D8XqJb>JHl8gI1QrVMyyZ`urNE>3A2z3?po^G6 zNnds{xsgf}G$Stsy7_K^`sq&;I&b(2_3gJ%y4Ngu+%yAGAu@8X6mF#D+ec(etwx-F z48YHJ9K=haTLO@9){Pm5qM(O_FHRo3dkYJeE*OghbT0g$=2*NacNLg`c`+B{`(4V2 z9%w5wpm8tase-nnz~S&04~?f_K*!Ko?TnTH!Z5k<%_)E>>2)pLNn{X&cJFhI5n@TG zH8q*+LhGXHKqt%`I25L*d|rBBH@*ljG}T{2XkJvM9D$jqFeaRVFYX}*H;QX6QG@22 z3b@=)fg6|I&EM!Nc)|UkodOW|=5oAYvwL)nVpuY79zBl?gSfTr#Hzd`cRW}uC@9&#i*M^oawEnj=6To_ioB+7{uEW8 zK%qTYFQM89y++ochK@u!$EEapnI1~Dfx&B%%>rVERkIYrnrEL=9@?O=ha7fk(Ou=0 zCn)BB$bEsBN=~$K(Rb92PxRtBOf+XokB#oVh_AK9u)uXy+mcA3_(U!O!KV_H@2odv zXv1|b?L=P4So?1+S@hm?Xv~|T=HY4B<=-sRQ{j%dACC3<-07)zv>db~AC^0){8sT@ zR7c+?Kudq%eHbz%c%UxLVk!e^Oyn;W_x)U?U!m$K6qdWH9lpICm-&rQ|7_;hu|QQ; zMiP{Vm)q-nUL{bmHzv|E;#rLNizf+wcvyNXacw&eDml)JG(7_;M_tvTN%k=e*aQPl zULSgEdDPCSZ@&L{vf*;QSt_8R4LjFBlvpX^8MGOBVg2Q+yeM%scn_YN-T2Y8Zh5gt z&%hP!*z?;KcJgx7dmYW;g@cx#W3=`c_L`e1qg`^d^K3UJT#mb?F1`w_dHCc0>u6%8 zi`SHzpIPXVF?3HM2^hN(p6@Byngi3B4vbU5lDR{G;*K-(^8 zq1tbnSRcf~hb7YFl|hAjl0xk)#+L$FWN{>^n8I9!X<>7Fw0j5f8#id{31Wd0c%o@( z4k|K!3=d+DhchU3`OOtfNL%L~6Jt6LWL`6*a*WpHpp4P_qz?4Oo!RR<)-m2xhkup7 zGYz=&tua%!oFNY2HLY%O7rf!V#lm7z`L>%v@WWU(H!{?bXRR?b z8%yEmM}LYmZC0# z=u=PPqb^7c_R7>mU}$Gr?)5@th?Y*hGJ88u#f(hyt@KODYR8vE#%QL99xC?Y=w-~=D-j&geklWns%A_b58sVB1d=4r+miTEF4%8Ga;z1 z_la-EpHWz+Y;0H@srA(w_!e z=7{IM*OK*uI50~6g9S9(enMv_NIyt9Ct#6TBy0Ppf+)kqaJZ2^ovq^5LLIyAj2HcRy`2dDy>nQ#hwih&Kz~IQ`>W z6WI={MS}~;&YD+y=^r{P&nSPiIvpk~?Ii!o>Sb>|G{<);{ug zZ?tQV(OZxE!FpIxj@IQC#4xe>OG87Nd6nf+1_oc%MHk*ecHCz~ssO}s#I1JEHHs%^ z_n#^*3h=z|vpmbvKBDnZIpnXN+%{`Ah2!LC9Us#-74ui;h{T3(5-Q~1IT0Bu3q~@$ zf1`#L#$FJ9)qJHWYQz6yvhhKbXh2Halm`CQt`tz5lHPSGB{#=W*fCq

w86XVatYU#(xjzsrh< zXxcz(t9+dc!ZX!#fK>~7oe_oTnadvgvoS87iM=lo1Noy2v`Hx^VxCLu1Mi_sAl!ui ztPa@UUD>A?D)iLWLSJ8mfN+N&VY0M%Q_SOl70{rKT@s8z)L?T1op>I{g!jT9ry-C$4OFv&-3v_j{hcu)zO=mlohEQ)BdL=mg+`y@xume+JYXt-1FvxVZT zDyv9-2|Xkj_EyFX*FBouIaR2tmij=}r4lG@?OB)o;6hpqN=6WBghc-hrSRfndkW&ALMaaM(lLY_8#*!XNmaQ2R^2pM+LGy zVwB-!%z8tEv}BisHk!c&g(Xqpg-YfdsfdbQDjcL+H1&J6;yFvBG_g21t-2$B&W=g6 zu(^h#B%~2ng~VG(kd)qTn#vl!Z6T5JR)%K|?l&A_O->KZDIat5@imD&txz+QLQ44c zD!}_pGlts9s53mLe}%)7&2aW>R#(ffUmzsZ9!jQdmnc}oF)oK&`!ncOruy^nIH>gV zfZzOACYooi)!_G5SVr7E9?GcSt5l?zDB`ot@lk+o@+8xWC5GpHO!I?WwOLI%`|6|mHTtdul+zSGt`nKksF;BWs3L8%MYePBe zlF~KsVXUJbB>&8&oHcpaK{U)2?jvgN5p(DU9KGt&&SBO2A2 zD9}lm&3NZR_;4r&<%B%-cruI56cjB%E#x{}jh4p^2iegrP!hT}FaWij{Hd9BcbDt< zjyzz(3X@(1*NyU5yvP3U!bc$s*|J5|2D`cfj%+fDq)q&fVKD&Fur(etWtq)MNQkXf zeFmjpxOFs{tNb1^Z`apOnt|%}N3~i90G^7|Mc*=Vg>H2=u>#tqgS27!#^mas@f}59 zKmqf7b9F)kRe|F#%Igw?tPsSJZWC%<&!R-~;2@f6H55`>pUDS+v&m_6&p3KY7#xdoSg1tU8-ts40!`=tPE4DxS^s ztH_B#78!+EAFQTquBj`f!?NrOrgwZq-?tJ5;U30OxejvF<)^NQ%tEljGoi=7&|IFR zHRwcJUw2mdVXZqOFlcm!`~@&008Q=oSVnodtb}N2`xNwy6tK6x)KZR{8~fhVn*N~h zb~C)y1~$&~Gz_+MtBG~3P1D-R$-^R%g9Z_jfx$oLs-5&b3m67*U04WKnYJaE;{Kh(5MmQ<{EIxd ziH{%W|AZ*|HApq;D|iM7nmq#rtzLxWC;@EtGG?ZHud4$QBEMdENMKZ1T)V84@C*$N z314j9UpiR2G7zukeGp3&Vc4IfvYC7MOO&GBgGCQLxEohO3FNe^QD{J~jJA=?x6{*h z1N~+=Se;pHX}+G+FJossvX5A+R%Pg3H|v|NV8|YU72velaBIgjCAhYo7#eGYT{{;T zl>K%}1wj|k2{E2}x~2=Ow8fGayS6jFvD145o_6%ru=zZNr`7jar6d^4tno{W8$(Zh zk!srE8{urz_R_$oO^C$73L{VT@c)dGq0JmxKQ(s;Nwvp?YxqI8b$_zRxgs+hqwV5~ zBPaxxu2Da!tW6T&i;aHE%ho?MV78I}v;_6zn#)cMblB(c>hlb zEcaJ3m|uDNEaKE*a)@U>io&Z`gr35Gim4~W z`#wVhYtw=@->z@`YH~E5TRH$U!ji)^cX3eebwEK9oIMRD>||l33xx98>0*n>)N{UC zqV~r8iB{Z`@=1EGL=>oK(P0%}y77qJh7t?(9Qd)0y~i!cXZbQazbw7*@D(h7js$&6 zJKtA$OIvk6fLd4dJBr4zb3Vb`7>yj`B_uskaN9du9f3h-9P`4&kQIDE7=uUiZpr-> zhbbN3U}_o%mWRuDG?cjFs}+wq!Q1dF`+flKY@aRs1g(wX{S~jL02!+;Vd~^t@2jgp zy@sDrd4)*vI-Kuc1nc2p*SUzZqW1J*%W^^YRB{5+lN=O5FLHu}cA1|S zR1$R|P~VnZ8B1`kIfc9-xM2<|2I%)bQe&;|5&o7DD^2T2q=?c}qozq3axqzWClHoP ztqsQ4AQiK1PLJrDNZ}E1d?dGx?1adxb%_R`(+`M;iD``@Skjw-$rcQ6+*)$V;b(*C zN*$q*#@AI9NW*5vN{7QA?chQ4C+k<%V!xsM*0E9_vos|xpL6k+;yE|XYfzLhG*@P5 zT%lK@^jBqL0I*xxya61+LN;pK?Mb>6{x}3;#S=qg4-tGdVgz~NI z^O*%@wB^&^iCZoo2x6Xvf0!lt0a{AIb%MK0otoY}hQp_M>Tr{H0G2R zgTFzIS;&skv0Iz10oz&WO*tvf%MC>F{rAF+XPe4;lQVu?RH7?gZ&|8PO!iv+{XpQ@ zEmejov9}O!hTnsXOO$c{jYN}CZTTAcdc~kyJ~=!Wpx8rD-~s-%TmMe?3$w0NRKEaU zW0iCJ&raIi`_FQkrBXqcMlSq=;7W*H8%HC&Q+m$XW!(U5X}WO3$Y+0fommoI1}d_; z+xiOu?A=k&;28jX!1+escQTsAW8S^xHri3+Nix{q9x^l903Zdhso!B|Y{!uY$h;-Q~7tcpyc4LY2 z+zwEQ7pf2G_g17YtxLX^{05EBimGAO<8So8D5$M#X5=HO0@7K;P!VHdNwJEQHOZ17 zKS28lD&*SN*2#CO&e6jNHIvD=zL#U!7xUS@oU7R47Z6K508xYV0a?bs>|BbP;Xf8@ zXV5fRvLbB#X2sn=OXmd$zq+CiBZCT1(5SqyW(3T|LiPt_9S|~6Q$}?qhGTQ>o@L}O zq-WpgqnGqNOc+*FP*_c$yUPF@bgz3r20^hds`RXwDn4<=vTTm6DJjZ++wbw>7yz_= zMt9*^mO?2I6c3zzuG-LC2*^XKwVbc}MqXEaWn`Sc|4qxf;8cjOG4FImaZB#3XIxjb z;tLosI5|_<_@J)nn8#54mDgR%z1#WR1=3Qbli+J4KCr_Mg-1f%YX+&*+X3R%!iHJ5 z{2F6Sb;6Q5IngB9#^*ZK_*}%#NQ`-;+MiSIWA&oD%36Bm9QaAp15}+^UoAXMr_dbF8P%FErYO8+~+S#vOHqY}#szGKx0iiiYDOzGZgU=a? zBc_Q@_e&B`hz_ZoW5cdXdL?MGh~MgtEo)iOL1cJ6y@~nk@o_b|ER)JYr1j_U^14^f ziAz%XBP+mY+*ouDl|W8Y!P~NUoeLBR+A|=N^nJl`fZfy-2J@Y3Hi*P}c(1H&d$k%O z6!&>Qf}rq&Ehniu@WveWJ|jiWD4=y1+g{3Uxq;^x6*L|WD)PwR{Uj9k$ff5kcV3TV z9<5E+dz5>0D|W&ox3W{RNGk8#?s?@pmw34xQ3~LE)Im80@J7Epy@xwKBv4G)T)#AF{S_3b}Jds=4Jh)Gcvi3 z^U@4a$j1zm#HQf+In1@BH-*8t0nLn@DC0xSAEWShrLmdCA5(J?nz+(En6;>uc<)< z2YMnkFgH1WcD>X$Cizxy@9i^UGA~95N-C<(Ik)HcugL;yy zF8@;UT4jCzv8koZ6|bge6#Mk|+#1jz{zQ;@!dAoHO_;KuQihfB99~dOI5Re$we(61=T6l~8Q@DAQ~$>V(G2h6<|xacPc0K^O#s zg)9Ubr{!Op6y)r`57gDyb~@^vonO=^2sv`?FvXZ#|4dAJe8k-BVZzn)=S2`hsg59S zlU%l@5~NjCeI2 zrL_E_-Me@aT~sEx`yC2v)-~g*eH;twmfsVM2V@~Cn+6k>`gZn_BCanH4w@U(J}x5= zV20JZZvFC_b*cG`$bVh>&Hg|&*JFq<9`w3!Fy%QxPYzXs6;w5sy|G0LorIHaaJmJM z5Jg7Moj!c{0NBy#a^J7Y-hg6lK0F<3yeF8${ujGdyH9bFb(XWg5!NC!pDjD@68`&W z`J^Z7_8g}jeaGUl)`<;d7D)#W`@74?>iU3=&w+>*E+ZWw8D%H*SxgU$NOiPVhFP=( zP$Sx01ukRiwyRicbFgjz#cz8w1)~CpAg;PpL*HNvJsj#aiCEZT*Ro#&XD04o`%Tjg z{!OB;vtIy*0SLdo%cgz6$7WTUDq-Nwdt}_bjkdS&_hnc~Yz0Wnz%%ztf}H2=z?5cA z1#uv1hBnBkzS_7_X$a~Vz3 zRc4R_uxSK7z{x7Dv!DgMs@k@DO^khGz^r0LWmgoeo7k=teZjwcY;yfGn-}&Y^0a^O zO-B_};OL4Lg$A-;eI3@_;3}c72iUf6x8|{N5msTkdjk8~ZO3jIbLan*_tD00({!Uy zL=mdklA{@*iw&`5x=DEVH;e;m=4Ebv zoKJ>4%fe#{0Ij}je=4U(rsuTLb&x!NRo;(;i?(5zla#*%hlAXqYxCjQn;qSnifREl zJ$O|HSkx_lfmQrpNI8;>sD0Y%h#$AMJ603aMs>XVwskOuo-RQQ8Rfu zY^*|E8li^2?eG7b%iC`ad|b*w1*jIrxjZVnwa3)+e$)Fx|tEli)eFi z9E%B^q%MsF{a(Z~d(M7p5mk0=kzy7wH2mYFsuIO~>+7&N_eoRB0}>dL=kf^jbMQ#_ zo9q*+z8QUL5|n_*GfN@GJ|!fY?mV18`d6T!??h>-g92fwyLD~*2DCH!mYE$5WCrbk zxOTr5W!56TB=FPO8Yp(CmF=f+jNsDq7!)Snc?a zfW`9O@KhAOy;q$j#S9a^xM%xO?i4b;KRFISvE1FcXX$|maiC9X4xmUlk;@sDdck6h z6KYGd0N-%fB*P6c2Aei%=8!A=H9QWNz13Bght7}U>QJ~#JH!@i23(^RC13@BDqN3X8vH5R4NiDBq4(FIEzO0J0+x(_I~pD*SMlI!VSM}lfg<9}q{ z^iDh8JE8kK4f%dA7TMqQobRDVSeO0f3pU{9npO8Odg(4p+he_&5PytL7Cdt$>K4m> zo89|vdGGQ({T~d~L|VQ#%>dJD`U&(Lof;Da0Mo2P;#3-l{eq9?xd~Zc{*mEy;t?$` zocUfD>Oaxj;`*uj^v40MplVlCSIu&e(7Q-Aa&03Z%RV#$9G1b4zfcVM=l;&b*(%z`tyeDUp@w= z+6w1btqTeoSRaHq7p*sV86t!l52V4BamGe`h1|@fq;6AIQhuNUMgl%~?#k&E-f7<) zRvq1IYHiPM+fiM?#{lXs_mFXIwO+SqB&Z_9bcx{=eborg;4Nb`ow&Iw!K?`y>U$j6 zVkl|gsNDeIn$h*Z1I@5M(jXUv$i=zs_ zI*9p5<#6Vgk3qnJnBTPI9s8m$4~jkHpXgfR1=w!Tgm~?NM`69ray6i z)~@8W=4ZLpY}(FbhuhKp%AzQifHby_CgUKeq?8FbH89~gz}B2i(CE;57A&lH-Tdlu z*)03X@BNsAjGYXH5oruj`$f0nGu@m+R|uvox?XTBnN*Doep?idsM){KQIBmj*a0hd z^!HX5!QX~&h!afPjXCy%49wUBnehqm!m~&2P6}_@i7X2kouX7g|48H4Iz60ni8P#T zyOYLL4ua0hVap7z(cIcKmZ?}gTib6QE3e8f$2IPz#0>ge?|0AKH-X$7BqE1h2I)0( z)qjJMH;fe~nVFW*0eFhJTPkPsHf*R7%-0Qrt7Zu$RK>;KaT>#aRtMUs;_*t0?8fr) zZ{IZQE|!0$OMB!CvC#{2PN-CXR+q34^nPr`l0_wq z=+BH>_JF;EQoy(K6rv2&8$``epGcB&hq(g4GofYs4?fiBdpk{^ZQI*br((IMt}?^T zW&3j1SM*kSYacXQ{8X`(rLA%9zMkK!S||6m_&YZaPwbb4Jg`~L(G?(Y+kiO?OL{W- zcv_^bu#jnbePvZ;4@Xp=$*463Sc`0&b3*_^t1tUL@+Dchu|DdBt+2Yc+4HWE$`{fw zRMZ!LD;^L970KUDEjXyayn?CxdXF`xk!R6I;e7pjwSv?dmycQOL@2$hv06FVGnr5%!RMh{a`V7fO6pTNZ^1lueTk)F;+T)+bSp%I#=}wr;plz?_+FKe=gbuv1 zKVG7lG_zc>1JF&Fjp96#QY_qk0q6bgaUEX#71TZKf4I1I<+;6jkEj~xOkSR6=L$03 zs`niSTaA4P)40H7~$xGX@EL^j2bw~@4|iQXw7Yl^#Cj-KiuzqMTwhj5+*Cm&*0_)L73$h5K&< zkQ`%P2X`?YcpUV|KeBX~!sF&SUhMF1zS&|sf1Bn7UBVRm%p-v%-cgVK70s@k=iAq^ zHz4}~)_?lyIh}pu?yMxBWuGt$_5E^=PGMh5TVfsEza?!f{NyxBqRmg?XG32V;Wo}~ zLc^9rFipxu42L?!e)s+;`82Ct?PE#B9hNMvM^i-h&iYjV8Sf}6O#kVn#Hbh@&U0Ns z*bch$;=80k6~;RC@TIdKeU^28WU`6T3%h^POmY|9v|&ap<8{u}*S9OYR8_SBK+EAX z4)+3~a#mSw{b~fDeu!VD{s3Ir=0}Hv875GlDr`D}L&Yv~ zRV)fA?X$LnGDdndr3Ed4btkEX#ebG`RI>Vp(&N-S_2zOMYTLk z2VlM}%N%>VM)wWN#+LNJFssDkYI|}gAtwQv1>#L2wPmDc|F(4-GODmVxwH&McA%B_ zWwQbs#i6;{E1(dgUhpS6dhWYwiYgrx2uif({e2DLp(@sjxmB3>cv%d%i@754;}~TifpSY~m>)rC`fH?7>3W(T4@9Re zicRVK_R`A}OXGQU^c&0{9dZ|4Y;#CAIQ6vmW%9&k)tw$n`8D@=VUm73FSeq=H@!tb$+AZelQaqucc)z4?bcH5V=ZH5Li%OmcYma8%NSCi`ocZ$& znZaqNN9M_QKeEtXrlsb{w~h-6pQ@8oMDHSI{jc3}CL}=$T={AUY;WHx0!5z0H{bZN zvzd6x5*=#C9Cou~IU8}{T%w2;qucl6Q9d%5Wg$yiBk&^uPXlQMXgnScOT+ZO=a-EN z)u8OQT%VgS_oQ6r5d{uk5i=7bPUZa^nY`pY1;+%vsN!$hSiuKRC8ME|HPucT-;*sTvw@gHrVaSY)3uw>ERCdU?;y`r2y+l9ksO_>>iIr+($bg$Ws z(ehY9$tGlEH3u5rl~tc|3DY7h`W@;6TR6$HbZrfko z(4N;1@s&Hr_hHM3%~m7=&E;z|w5jkDhsmaD9q$0jkyofSIjrvLPqQc&aXCm*(ESC! zfaSm!zoDg5QsK zGcpy*3XBHuVVM|a(esU_E||UM)AT9B`7>oO!`_v6&)Q=IXLonk-%i&g?-Uh0`$SFn z6oL}2znVV&Kzp_TECZlU8B_U>esmVneso;yE=(~Jbji+0AQIT2`tF$+yBO`mO$1BZ z{B|vq=X!^=dZ>rU97zz?QwP8SwQi?BJ~#uj0&xyk1u0_T)vFf+f*Bx|>d5YcThY0& zbr7-J&9Ekq7(moO&-cx*lUb?$0%ru4rHA$oaaR`nFM*o#4}cD63t+e;F2d=$OlyMh zqs40w=rLewAUPtIVoX}3PhxkWdy2Gm6qa9;H%|YrrGp@D0#9X#(LM8Kj@(N<}AhrT)cJx;xJPN^JK%brEr+ZWnd}j%OzXh-c zI5qT1*p1I?FsX$+z4>{P#(i75y&S z9rFC{xin!n=5imhA5mz(fq@-fd`(;&BC~wvg$$|_9>TVU6CH2`xnKGs28gH~G8#R{ zA;--`%Y4`NHSIbmt6iUbCnxexb~>tX1oRtYMTVdd)ACfz<6d0;cz>R~rYWL(Kz15j zNB6@mNKHnskpyjp-xG2|P=0jRW03EkVx`E?!CiAe;_>hLSiKIiBi!w-UQsj*!Xxe!1&!Uqa;Z=3iG2(iyk?exGj z^1XT)>1Hd@Ub;^(ec4EMnIOEtrcuPKsWM1F=;F0e?gC6MO*3zbs5v<)k`1#tinrs% zcVa}#fU_Ek59z-+nB;}%YM=IBx7U7ao%E6HOQRHRiw_Py~WPRw|*CP}3_hU4n^28kSAi<=0NTax5$@JdrszLSJgU1G22e`NBeRMPSV}pn36~@K{MVHlw7A~!qRww zD~ysW*Pw-CBdDXT_^DdMGVD*PcOfjWIS*5$M+3niisn$JyA3nQI8VmXS}ZKPBNv#r zHO^)@Hz+18^HA*CrbEQeQnhuiMA=e`Yh=>+m zjLk@?VKy3=SpbGBzgpFFs^Tev%qYI73h%~;1`70}Y_F~&sZf<&dUB{OjX2byg`Yk44EgFgW|ub#+kU(_RuDgN=Nr+q+g-W zV`57l+z#6ylI9N~YN@Lp;E)s}1x8##!4@O$;atu1Sw@K&j!`k`AaMUHkP#kmtbYHm zt8I2TMJ=Tj`4i;y(!eo#?$^HvZUVZ`vJdstHP_FR5ZOInEGCR_@v%eqwyLMtVsaKV z%Wr1Be+w{f`c$ro5F-U28fO{|H2`mO@7SPQREhk89Ezwo8d9*?vZN+RwkQzR?!GCk zU$l#G_ip%{nBZPL!~Hrf%Z$`%_t{VEWq+3G?oTJ;w@;br!*%1o=C4On7z(z}eFAZo#xh#Zg4{d_RtGO#(T;f8rfEUg|kTutIr%S~I5A!Pu1;D)PwQ*K`FZlR=UhQCCDI)`aK zyy4M8Z4Lu7Zbf^eV4!6fAAoF4g1B!Tv`8g5$B; z&mAN8LSb`RH+NU$6VZ)d0Kv1&=ypU*V2#H`OcCZR_PO@eq9$H5T5}kpN~E!!yA?Yj zm)onuEhP*0Vx|K{1uqqS#)Sgn13UdJt&jomD)y`Lx z{t65-eKk+GQF5vI5DVzI_p^U116%>{u53@OJ?LVdy`en?mSterSJtXkTbN-MS%8`r zS(Ejy{(eku>7?6HhN)Rv{Ef@AR7n=AlaB5v9W|!67+-{1GpZpE;}Y3SEgw0{ko7zo zCo7mH!lo*-Ep_S>0(svO@|9&(!8wwCP~XUEUIaQ7(t*smJjDB8K{>>oLt4o_ysHh5_^zTOf%ai~%D)pv`C zc;WXv5Hapu{=%p%74sgND9U1oRf=&84RViv7l+{c7|5UH`-^5>YEQmK$5SD$GO_q> zTbLbcHsz=;l#XtXpegd%=;ZKV`-zeX&*b7gvDuAhY=)$&Bfz6TUyIUZ))qK&aYR~j zUJdKMCJyX%L>9%B)K>>~D5)Cv-7oV>fI$e~w)rw`QT=N{yNYMk71quQ7n4-uI`t#1A6svT@Yi$rg`ofo_E~-$=Mbkx4tMdP7F| zW1`bv$l3RJJ!I`@tP;r@aY1O0hQY?oSZH>dxd;-LGtMWzSvVoeg}h3fn<|;!@k8o8 z!&v5uv7u5WDSDxg8N|i8FV+@TQB0278p(qdeb!D!g%fKe9aK~ue^q5&yH9yI@y!GI zQ$>cE2dJdpIHPA$(%nsiCgaWVk^agRRUf}z-4I;wuBb1iaI*mpC0eb>fhSQ5hmjNL zW}Ie6uYEoX{F+f?bzBc64iq#)#+atjR<PrM=;E{X&9l>|1w5e<(?=Xtx5Oq zzYe$&+$1!2EZ`Jew=u56^QLnrF|8jEW?B5Ov1;0SLieWkM5>T&7R|6IkR<>8mvG(> zsWm&g`tpzYV*NcvU)VBHjMn|*Oh1Vuc9h(G0oupd)ZxRJIF04xd6pQMA~Mqd{u6{7 zsnB%=to~}`8Q@G5b2yH0N-(h4JR3(*1$RLf^Totn$(dSl+)z9|G*J0Id`kT9-zIQQ zXMI$xw_2N{2o6xm$P^sir`4``_5qpm-#;kbEyx_0Gsk%d zR|xh;@6~v$uz)EZq`#|!hLd@8(Tu>f_Q%R?-;qNQ!8yL1f}l-E3XHCAwr8~6Rixzj zzrS6PVgJYqob19XdO13iEB==Uego9yF=p0YixR{Sq^>a7m5?uFm#Q#nj z!Nm+|Fk%vDIe_YLX$4bmGU}`X9wryv6?Zsntt-r}676}8DSpeGSYWz|P8^|UkJQVS zcxHTCwKUWp@t?mR1hwyS-}6fp>+X3EebQgBa@{Py|{HC?hQ^o;u^aXZ!EdI$og*$09u{ zhI@;nl37e)=X>BRiqKglH5?!zNQFNKPTFsw*|{EdtN19nz^_fd=f>6c2QBXQ;e-8M zx47vrwTgc2wshlf_uJZBhyPu$gl_O?+{|6j0f32jRis^8s9RjEYVkF+O{ByO<6j?F z2R`3NprZvj#xSi9Lj$s=+NAFsh^QJVhZoac{rP3wyP`dCLs~;?7Z>4wf-pfk385L| z)WenV|C1&gz~{+=CzIG^+W7g?`($_CTQuA^hj;k&O`mX03DKU_)j-D=33ZeKv z3mOrD+~YI9^JcmbP#6$~Ir5d|yV=KW#+pZ>#@(5We}EOph3tqYY$&<^ooPBk?rf{b3=?9% zD9g;>=g9vJ4QXkXS}cAd9Y`_ZppjBLZ6F)a!{Sv@{@*auN+k9wXfKHXMi0?IVEUiu zN?2%xDEAd>C0*`zTsmH{5(h-deL>XtpXU?yW10x#yZF+H5~9>1CI(FzW=H?PV1$tT zXHp3gq!Ymn&wjKhd41}5!eLAzJ(lB$+W-5EAvP1_5xnU@wH1bU42Qf<)J~X~Vm;yi ze=M)7ds}GuXZ9lNA7c7Jgt6wgjt!*2#{A#W{4SO>VRItwaELD*QAbq#f2;9h${cb5 z5kvH85+*C%4$ORGKYQB>VvRPJIezffFS@d`n`~S@~9kn{T z0h%8kKlSCirZIthnoD#Yyk>Ob20==K6o&tOae_!Fhsi^Z2mEO0NFsKAEk4UwCd`MT zm_jmfdH-+N-J)Zye|XUNn|g9^tkw@4?T-!)%rfUN4kN-&2r literal 0 HcmV?d00001 diff --git a/assets/crewai_logo.png b/assets/crewai_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..086ead55243f3b403749ea1b45a69c0d43035237 GIT binary patch literal 99658 zcmY(q19&DuvnU)}n`~^`wl=o?#@I zHR1BIV(>6nFhD>+@Dk#G6oG(%0YC5|6vWRJTOkMICj)j=6cYlfn!-8#DFhj-OPI*W z08#zGP(a|o$Uva~LHRj>fU$tU{|g2Jk_5*7Usw^C;(u^JfPg~Hfx!L;NAoBD&msPk zeyIN^2h9cke;ji`{x3E#AQ$xi!a#@rA!~>{(f!Gw?Zh=4fq-C9{*!=#(ldWr46I?U ztnQ>PBh6)KYfYzbWNTnd=Vop9A1WXoH?AMh+So~-z|Gpq#*xd7m*~GRxPIXORMQg? z{1=LoB`=Y>j68v`t%ETE8yy230}&q#0RaJzgOLfB;vdofA^$1y5}7$U*>Ta+ySlp4 zxiZt)I+)Tka&mIgGceIJG12~D&^o%?IO)65+Bg#bHyDqCua$?d*ybP)NJ_xI6p-zJ3aMeu ziVZtK@oR8fsah#G<0^#cc4SD(^@ii76QARo6PE8jpy+I+5D1VMU|BMggh7&`4`8L( zHo8(X5EC*I!JB+Tn9|Jn-J#vNsvFhFKT!^@zyO^RNfQ%84MWz^Il2r1M^N~m>Fi~L_AVIWd>y2-Ixg;?cslanuia5ptGIl6Y2UKjo zDSK(A-`JOI!#*l8_oLSeQ|pbo&$~og8B2WRoTNKRJ-QUY{y7B%>Fo}m3UwkH+M_Rt zG_)G0->4!?y%co~rv84^8Ui=`QeHmjb)jHA#(=x#gG{5&!1a+Au3AgsQ>T~|&QnX9 z-!2e$1 z-l=W+R!i}^%FJE&TTh4-caV0aX|$$bW}Z)p&$byK8L2VfG_a1WuX9>+HqsehbN8p_ zrFnJWeaEhO;!Kr=<{-V%O#p(`Zffd(R2zXX)38&Kubxk;5ULVpuLA}_bqnapA@R4B zjA_VGj~(&;b|mOAWxMtt-!g(_O?0eqOr{?IZZO%`{m{5R4>4+Uk!|hH625YkDLQVr z)wG>#?KuB=1}CDPIb3=R4WIszkT{6(eMRW6{BK?Ev^t1YRaGVuNlm&-gWwmnO0n_# zY$owzzNl`iROs>TRB}Aqfl~9>)85}^d-YfubgAAixy(3-<*sqV9(JQjf71RMODZB8 z80OobesB(GcTId&8DZZY())n~M-sIN__VaH+b1O#a!zwCYXgbzD?&Ty;pXa z@v_f5>&n|ql``APLLVdn!T#;i8Fv)KHJgq*G$};3k*uJ+pt_;CplUFc%^v8=;{N6S z#-cbk7ayMnZA&lb=I)lOyjn*A`1d;kjUJ&}z_97i*hYpu07?n^V=()Gv_s94?ive8 z9`LP{Ps8)tVPVX&&#KBICNbfO6N<`hr5qnZ7`D`_3@M69QCE{P0V$t2*iMF z$CsgVP00(gOnTw5w}Z9h_rE{o8zldu=y@D7(!C@hO)MC>(?E7E-$uIhcoevAvQ{V- z0{^4i7N*1P`fE~0K3`oSiB=7v>dHkbfJGl2GWSM#Q+}sBusc5Is1uP1e+)?N+nvtT zi>+YMu`H%4%ZRq<&{WOKio3?eNyNYl2W{d5Ixv*S0H?%LZ8Y>q=bX4h8~Ek@y)W89 zT0&;{klKEw>k;nAyB@gRJHJNPVXWa4bu3|_AOFV2 zMe%Ob1Pw`%DG?!bq6385^o@m9A7wD zvvKm#(ZX>f9>vSqi{btj{+`;I-jg5%VdHYZv`#=Rknkv8D&AJ(HjB#x!!Q(~AJKAY zCs}ZEwpB?ZDAhi+8=so)9_yt_di)O=u3TY>`_3FHw##*AL@RT%{#``piPAS-2a7(> zzlExCmIq-&%fZE4gvl67zwS7u>k98yOs}k-b2rPW!A)Y|ashjd;MT1_DIk_Gokj4q z^>CIcbb~wrNAZmracS8RD#ye(vezyr8x3eoWvrYPNHwH`ztn@%=tjilgsmjeVgOi<=f!T;~3~Sf$JLAk%Dj?dvVFC=n?vqtC zNjo-Ve39U7k1sg?URaPwwdNP=8KW7mFSc88Wh|H2h*i7Ze2}mf;Qg9y$?CMniRO8e zhKTT+u)?~ebCO5fn3=KY%u9Bz9Aa#Vack8&YmGfYhTz`TqAWBR45_%D&P+A2T!FT$ zDelGoLK<%Gv?1}T#iO`6E}q+&98HV&fH2Z|GJZeJC?|ZxPt7*MYt>6 zXCIC1pZeFQ?(H0328%BC?VPG|uxcucDuN^s%Mra!*lY7(hmWlM zPJ)H?_au9dP(C(;zQyfPTZ*!nSXL1Ga=?Yc@c$g` z#;KzpQex?oBL;qq+AKa5qlGnJj+(i&-dj{Fc$1dOgxU>|kw&Q&huIBzWuBoyNw)W(fiL}Kg1Gfz15Or;9||1H2aG7! z)sORJ2oS1U94r*$7fh4W^D%xKbq=Jt7@|6wkdTGm5J9f_4dEFZ-PzNMX`MPbJrD%u z#qC0KcH@7A&w@i5=xxp~&75W4XAEp(;=!dzpDo%eimOnu#Va1h1^ch?hE5aYbu0)Q zi4MaaS;W;=aAJ1zrfLJ}l}c6E;DA!ub=zMTfay<1-}UEm5*?BME9jJ`4H(SmsRroO zyehUO^1ySv0(l_J*o7XM3_}j>glZ|61t5%eVp4Z_^Q1JL;Rly9jUO~q<0T%$4H_%N zySuCP8#7gni24voHwfbU)cLLm$xVkRqHnOAWW!5nN418NY5t6m)&y4)^&6Ksr!2x? z5R~yHEj}zdrgy(!xM!YPPWEQ;nSccS#%JKeM&GB|fik21=@_eyZ$~?k&{Q-Nxg`-y z`Ndruh`@J>z4eP}^&#JJoyFGqYmd{Bp-l`iNEJ?OJ0@Dbiv_m5pHAG?+wEu^aoW!d zs!AF3%KdI<#&v&J=$X_t2k3xO{Wmb-?g+Xn@YNPy7^e#c@Sqfyo%6m+-z!C?E+Zmj zj9C7;R$`6w=5^_OX|0VXaap|#0g^=32+uI}H;K8iD=y>pkanN3qlpix#x!{iGl#(_ zKPx9Rzg$+#S1jD%m2POBMWY@G`Ap`10?c&2+zlLkp-9)LYjUco%qi}m;rBGeaG45n{ zB}2}9j=;EVnI^~%S+o^mD5z|HPJCK5zjnT9zG?H`&7(U~R!FBr&Nk5rzg1A#Z%&)Q z>Xk6CIrh;ez_**I*3g5N+T9RSkI*8W!q1T(|1rw@P{W8;+ zlsz~@ArIxL@u9~rsR&{lwV>jW?R{HLt}Pz#{eeZte-=c%xUXFYF1f>MWV3E&C(>^2 zo~EF?giw>~V=5SYjK3TkGf?i`r3|{Fhek+$p+JuK7y;KBkjew-TClvq5!ubd=igoR z-^R#);a$Zp6k@a-l|jWCe8wl+{l;V*UxD}VNB#xahW5c3K6r{Na}%-R0&^BNxifDm zya)Zbo&DI^ zR}j$ZX9`kcCedicee~qs(P8N~Z|CwjBk%1E0svk5!n%5~uO_xN+|4_;er{hu?P#9p ziUyW~-gMZ1HFsEOuudY`y}|L>7_m~m6@sFgR2!dnznHz^2Q7Bnl69Y ze^AI^(iQjZhZ7yDkx_Ieg^uL=IxBhp>)zpCHaehpQ$41=a)3tC))rISRT)gE# zBltVHABwPh4h}nW-P(Ke>>$h}Rtga& zNZm%uw2pZE_MIMslYT58OfH$fYCIP=ee;C^e+s@A$O3uNHq)@PHxxm&izEZ7z{2=j z-(vKAI$V&4}3>+Pv582xme$>xE>F43i$a5imib+Hd)xO z?eINI_3OaUZ$1Z*>uqw+wN00L2%JK0d zkNc)a$(*yLS7^BJg*;jZIp$bh5Wz-WYWdE0*E?R?B6r+3eaC|-8x&EQA5^cE6DdXD zVv*(NPo?Qw-7ObMbxG)P>71yg3nmIa^md9zaaqgGe`&>6AP_~{NJ)$e41~jm9q&NP zy@9kRf}dz8w;rURIkVyw_`D`}z9^m6YmWNtO@23b%}$;=9O4C_4Q5=1E6l}9v?t-L z9VS!Hnz+UjRZNJxc&B`yNsosKs($d*Zx4!m0$iA5R^#udeUPV7&F=5KevPUzpUg{H z2A2WN$EExYNpi#Pl;MaLw@=5}u%b|&z&6}A^9$zx3=o}6$EaMk*BH)V;Ts~{a1FBZ zx1ZwvK0KG*`5YIy>e&4b1*-^94qL8SGG1X0X5nQ8FG0lnBVN?k@LYRi*^FEVWNM!q zRG!Mt1A0&B4@y#(*ZbSW^Ei%C0!f)UymW2_c@M>Y*NR0aFJbrcGJIvgsE}+R<9v{` zgoug^L2B80=mOtI;Q8xw(okZ(jtcmy2BvjwIqv{B#=BV%4{tJygz;|HifQSnoh~v) zlj=<}Y8ChIG6&~MundVjQycld}dAJW&B4aJEDSjp4Hk)2_aRfqa1z!TA|Avn9pUv{o_)c-RME6PdbqrArZ*< zvQ^@^Ot0*D+6z`65E;hqk(C<%!{o2}r0vUZixb}Fw@E&gOE8JTn3^9i(0;QqPhMkl zD9K{6rR_eZu`S@S9oY`W zbyleY1o*GPNQ)4qJPO+08Ls@$5L?C|Ov3u;EoO@y2Sl2hV%*;p|8U2q-p4w`f}~f1 z@G3vb}^Vt)D?#d7qVjxZX_}x;StyAMbN#S_WVW5>vejB z>wRszaqPiW>15S(T?o`Uuq_{j{~S3xGbWuzc!Qm#AJ}tkThe$X7X_;Y$Cm>PGTp6u zG5l$*bVf?>NfL{+ZUbaiIh$A~8Eg1rc361Kprb;f{i_4qoD^zLVfAw_bs^GQ$2}b$ z2G@Ym8QNOaf_Eb0Xp%NcWW_ z-V9^4+__TT49ArWA6R4r4R;mDj1v>9y6bN2ugAEhd+k)+)ebqk*iD-%lBEKaW25Dw zRzhYQ_~3In?pAc|)xHt&0?uOtnEYuHf-i%7oPhY0sVS*+*qF@rF!=K60<}a{v#laZ z?MT!b&*aSRpqG1S94hUg>rfMXSvX$jTdJUxYQ4KeP^U&~W1+5&ln+M{@Dnh=eOspQ z_iyOZ@H8CXQ6PH4C(YS?&^9G(*&^uOkJZSq?gYP+7BHOHuz>1rb{>us!Yn;45I)}_C&0%D$71TNQ9e&GO+881czh4N2NI8ofE!X_d$S8zwDUQ+*xwvddWf^TOh z`@ZY0?a|&}qwJXW^-FXuCvN!*!`bwL$?fc&f>i)~l9KTPPYc@!2q{5DBHRcI-2l=$ z4jAF+IttBJ7}>t1yh8ti3Dc5wB&~2@(4d6i4s)`dA4B#~k49IUo*|VdU1;)+b(rE885|pIsPg z8F@k&Nuz77#;!5mGJJt+K-VUYM*F=2j!tLg0*EeZ%_V{78PJq^Pm=JTgli(HvpZEr z=5x2C-O_2o&buf%pXY=-?1w232iC>mMTu_Vt#W&O^dHeJ=FkQ;A1gs0U8pwQ$%rL0 zTujkLLdUj^ZOJ~1%;|cC(0PAke(nT6vFLs@FO?%mqS;ZYAA;`q9gvub{NhTwbHe4C zIV42uQ1GPDVg!vcz(W~4|2A_)@or7T`8KCws2I1oyiShDX2avR@`BBxQ~w>p?+IbM^6F~M#k$okbvO0? z^)22*b-lV=(@>rp0B-(hC^uGnZ)a?(X@!$f+^~%XU1wRJsf(e8l3=$tV$Q?h!4Z=c zCJ_A(6$9-K*`;D7jOkFaV0p%81YHxZjAxpFT=0>Fbz_b+l88Q|{oMtB;J~?^(Q-yf zG_l^`kCEw67ZGs{RT%HV>yyq<#IAa0Ia*4`AvdkqhNY2;N6g_U3ZoveKN&DWnQSfL z%p|DKf)h%0p@WeI+SiIJST*kkIyxs1=?p#M8T)x8D|HGHLkkj@QwM&qP&rW=YF2bE zb@wU7ng^@1d?y6!FYjBJl&SQ{U%Z}#Bs3ETy&l)gOit^K=b94DGLPuEH5LcmCoWU# zKW^9UWWVO9utH(esa+PGo`eIgp;@_1#0_aX_i%v@n`uEXWjH}5)@Aqw7pdSR0k_LB z4yP9=&Bx2A{N18>QO6*?xkTYWm6!2GT))83*m<|Om`N?y49|t`XFz?&U`~2niYj0F zT1_#rnq8*WnwYJmHv~mM0FRT4rDPr9ulvuRk@; zW3$OL_=J`NoW6chjop_b7~_Y@s3d&t!*L*mT0Le&a`~XZDNbgeY6y3zDY+w8G5fGj z6z9DUvje}&9h}dK4@M^hqCv1JK9*NTO2{becZJtDk3lrf$ND7PSSE@aDLDC|1vFBF z_J{?QCb-1DtjJQ6cjULfJ8Qc7;aoBy{wCpiPI|jSrW|cLE_1w^fqnLxbysdj<`Fpx zI4r7Pa}MHI3RR5Br20!F8so9}AhRUSb&dYMi{m6xackcwhOW;(ir4Me`)ZX@9pQ0q zru$o1PfeV=T7b5~j^n|-TqW0?O=A@Owp#+YS8yb%Fe#psvy8v4po{8UGn%{hBpe?4 zp5iZe$~08;M*S<766ldo6m1BIw>-Pvj}zun^Z+jq^KpXW`E@~%hl6%*!Q9XEdA7xh zGU|4{+dk40R-i+Us864H;Nj<(qz^K3L z`UL(Bn7yS)+cfVs0@^np?;vWkq$H1^nAt5_>EP@VC23j1Q2^OSO00-tJc|{IG0Yg+ zB3W%JwU*;_R%;EQy|01t)%Mmzp%8twCn)aVrKe7bn(YoKqEBaGeZjDL+r0%XBj>JH zgCjH~iAxp6-x9>iq_t76-b1x&yHJiG4la8eEq$g;4$BEiB1;vW5zaS^8Y`4_6No0n zwss7XsntR12zvJ9PHlE`p)Uh814{FojyFV8I#I&=Yr&Heub9-WMxzFQ86ES=HmOwp zf!PjmF6}^*&IC7l+}uDrUf%5za`>LY@MX0DP5a!i-7(C>A53|r?Yq9z8}ZG?lP#~f zSEgSexVz#xNbXdp0>P%F$;Z2*t^5mmB;XgTl0t*tu~Ebc!09})3^jOG0^^%qViVu`F+9tX30hmpirhSw&v+aq4WD3*D1z}k?l=jE8lwYvyS_(OAxOS~Fl$Kp>HcuoC64K5VvQul$%c+G)!=nkJ8vq~Q z(+(A_zlG!}ecng?pS+JUg(g(iwPh@1whwMb>8&c2Zi?xee3cO9d|pAo^!vF34sdEP zU6y7pRpUX_!SAGMq!=A+)%}-v;-eSZFB$=^V~!)gTdl$g<8~Z}>7DxWI;B6=wrPX3 z2U|w|Rg2Qi*^FAUS5B0$G$VOyUY6j2pF8K5Qz;?vvzasxLQY)XPWJ_>MI9Im)!2%$ znY}H~-9m#7#vB-fD6gyfY3#Z{GCUrYPbs2UkP6Dxs=!%XETK!VXmFwh5QHloK87w> z^$zzr2ef{uCu81LKpNW$RkJS1_~bW|FtTH)$acRGSjQI|XeGa5Ftr42#8vf?&od^! zY`~-H_WucslI*}AGDUXojV>5rW(Q|kUV$kim>+gUvasXiODQI5E;1Mo5P0#sL*yG) z-SMy=jH8=f#CBA-raY261_}@+`_D_E(gzR{dl+6tr5)9P&%`t=h2NR2`1bP+n_Ow# zxL1zJIy@6}k7$q8e8v$sn`@t~*7Zs;C2R~5fuknSJQyf)Y&_2`(hQKc_eRn}A+m8M zDDBn0pGbhUB=?{oZiI?hvgz|rvSE=@sMTn}H&5km&^|p*1$Oo04WnPEM|L_4ZYa68 z_%{e+Dw5Vlc+uZG#hfnGnyX-Kd_E_pqssn_4%?s65o)X%=Q5Xy`R1p+5YGslc1Wt{ z0&OQFNIr8!av8gWU<=XEYWD(n$8Zu2$=FZY%^#r(XH3ycRI1uW(kJ=5gbgh=z*s6h zue43n!y^Q9Gv(exbJ@EP2-ZtwW!=dvvu+7BON_UR&914dCO&Nc5-+*9%Ut>IJs|8!BXu z_|i~MLbnykJn7y$sHag|X>JvFTJupM$=#9z4`PWcB3#8HwPYJ#@YYWAPl$%|xmBF@ zT{)_H25YqGya^sHdEA(C;6@d;ML^m#!Yk;t4k=;UInJSYi4sB*| zaDd>ntnCZ(f(3+nNoceIIBVEP&S<8nXwJKbH4J7yeb8`8NZCugsBcA>PwLthkE)O-; z$Nzl!Qq#I+)Ars9w7tYrDx%XurP`>dMgf_E1W^$@fu+pxVVz#9X2p<86(Xrk zlk%yR;S*jyI97XN%t8zxpdaz6Smg&8?lNnz$t>BP$%}>V=w8sAZNww62EJP{?oI?+(>D4!Ny1I|fNoA}EIo<_05)lfb*2 zOO7(_$5U+#qXwnmWxzFfk0ZoIy)gyB4(eHWwF%JaZSe2@S!xwZmr~M*`LLFuzV9NN z=5u&lfHnDa0|ozMXO_SP-{z8$Vs$Q}jDVwBQFEN;H;%7_cKEbBvZ^6=1<~0J7;Wb2 z2#Q)_{+GYrWlxMYnH{XM{S6fS$t6W6n4-|ACjek+Z^rxJ01|1^7?o3FA|q7QPV+)^ zFU1X=OWdUqt2)g(c}sYV4oKLTw8xXulRgm6XlAyXf*DfkvhaIh+j(TYA6vG%-(?Cf zIg(_WjM7eHHZA^|Mb_(;=VWHSa_9tJ{w9ER0&gMmPsOEi^s^ZV-+wvlbXBO9#*{th zg)6INH2ZxJk|4caW}%W~a=h9)BHr-5!(6e^6`QWnMma-y!7EpgVq(<`21tVsC=G9< zz>et*<%A}_)+ZbIjTC28|4?ZCa(8IsUUpC zmp3&sXM}uzGVgBIdiXb91L^Kd+?P`%D?Pl@ZNaL%s$pt+-pH>*RH1he6BgG>{e;d|10Z9jT#3EDV@sJEzcPs5i znC)&Zh;3_yP4Rut>8cUWLpWKSwV06oEkm!Ugi`rREJZ~nP8e%8Jnwt!i?ue;SYF=W zhy79+lCrcA!2jB|8vxA??p_S;SWozz{t!`LDMPB^;@6sMwR=pZN{&|_#w*>CsJeRg z6WcO7Mcp+blIH}Y>h9ftwo(jp3|XIP>m0M+>y9R7`4vEDX5NkpPsciDBF|l({jdep zg6GdVR-4R;yYSv&x-`2YhC=ncW||PNnEB41!j*RKhAd0Bt3ao6_@bG1^`XkO2g7K0 zEof9f+MqVCe1SImgc*-E6-uSyX``1XcWQPAyocd?sO;X*=Z|{ZMKSQOM=p@;N-wZ> zP+usL3f1%o*eWz*Wf6IzQ5sDD-B91Ois~Iwe6&>?n|{Q+L*9B7%x1tH2JjyZ|3z;} zLb;Qy3nIlnp_SqKPt?N#bk5#kEQ<|@Tf->ti3;U!HAJi4s}AGYvsd)^DSt{IAD>b{ zb#_@46UY4Z)MUo3RicUdFqdg>5c_O`eH7{qPZpN#UEaL7DUu`pn|k$H8|=32c8tXS zFhN-SE?8GemQLMs5#@=jC3$15c)9ztec#puKO53xS@5VGf=f^ z+&c!+zKZMTyD7mByv^DT6xK=QrFz~8yv>}F^6%T=p9c%KSm4cfY}SFZ_l!KTRReX| znaS}ZpJ8A`{r9mV3hfGlG1V$*#{g-VvK!cm@i_m_TEl&hi(J*k-L5?K>0cIg*lqF! zib!ayp4NX>fu9>i6u6!(Su&_&#|J6`s{690^at-vz5?h&c%z^B-0=NhFO*_7L7YhG zrQ*wm?~U7+oc3DtqZsj1Hj0=ubE&I?EpyLjBp(SZeY7I<^YS2^u+_@_uiKmZOCgf& z&F5LH%4ZQk`E!EIVictaZ&a%$F<^PvlkFL`U$@SX2!6Icdr&$(^Y8ZyZuc)p$#&wT z+0$4)OI4+cDf1L2soZKTShaZ^v*)|oYk%!x@CdGsOBuk1TKGOJX`L*zVfIleD>q4t zxPqRfZypgu4UT5tq@VWMQ@GMrAdZ?%)9_R*2!x^~9ZNM9DYsubCYb^^*aZtkMH)otwM z!MFw=&3V?AN|E8!x*7{zbWanM@p)U;)t@Pd6^svgmml|L=%AoNqmEkLmJ>Jtt=<`& zEvPm4;u-4T2p5!#o;zLmW1gQ!-*6~**_*d^+tBTM_g3z9SRlvv)vAB8-AcMKrTvGh z6SKoiGEPS{eptH{-t+f3;_`?8P|tP0A=CB7b_F}&?>lUIlNro6kmrV>W4+e5M%}eU z>vx6ep<;2lw#3gASLH?f-65?3?my1t)dp_l?*|hy+tR|1?Ief87Zsnyr8hOZZw4r$ z!lTrMqvcrEAcH;q>)Q3F{q>$RzYNsGiy3?{vN%t_UbR8Q;Yo8Uc&KkkYEC5}x7^ai zsIWkBa_z0~$9prf=aGUnJvBKeLrBeC@{mV!JzMK15d}_%2zYQ&HqMy`1m1 zqKI7@9$Yn?C#G)QsdlZTv7^qX#`DOQ{e1+Jelo3Rh>h%h>O~%0oz9S(uzNI=lkt2f zV7{mWd2f$lH+uPY4iZ0K(hLI+iaO%zP%6*P4yoAZn=$_iKYAJ9iz|kR*SD5NzyEk$ z$Z>Cw)X4~|u$96_wO-@9<>mI)$R2ja#&h2bHt=m?~z*k!(G6u}Y@1uv*`Q z4(Zga!_~Brg50@1WI|U7iOB|X>71FJZ9>fO*D(g4&+PnXWr#?z?hEv`ie;~|UBbs@ z=y}@8uwBz!Dzj|TrNFg%FSb>~j_kN>G|jI4wJ`a*05f@q;QJI>vntCh1AxwwdQJJF z9q@F-Vt)T@{fMGTu;zUiWMVdguU*|UG_h%ZrS9o!)tqdcW-er-Qk!7ipL+$iDQ2ZM#HXKu z1h-xoj*_+p=t`mgSbW-SF||IOUAIyzXhOzG)VQ&Q0KkUPB5^qAbzjzAJ+E5a;T4UH zA5HI*UM@#P#xzxG)owhU!P2i}C*BAVqe9G8jLxFzz zLW@>=CdIzlb->Ul46UIGwQ7ZR(#2sKK|C9>(r83PgW4Ns!e*Yql>nR<$KA! zw5Z5rQ7_2kFvn(YsJ^r~@X+#F5Y@6!g!5+~AChg%@M9-`8TzAJ+xdMb)x^~iiuk%; zYv^W^&}w3^p|YNB?YqK<%Ra^CwIkdlkt17^WjaXpmgvw7h+(3%g(vo;b)?p?aJQMv zVb*o61yhFt-So8Ua*$BYtF$}Z zD{J`}k;3@AAwZ>=Kj`o}C4xcMXJK4Lok!uI<&c55!Kv?wH?F(g66caDm|BpS|Gpi* zUCnIGRgE@+=-LIBaqxA?WVM(@erB;Y`H<9E?Io_eGH1K`AfJ?ZsPxvy$JtZ=E=N~2 z)fC=(aU7nkH>TIBu-X}ghIVsZfNePUiS|5O7`ozbBFsmE%$liid}1L_LdCE>6#Po&yJB~30IF1c8A>)jVRNXzbI~DNK){4+5SU2ZP;zbiNN&j_pyaj ziQI{jiKg9SX8Fv#)d&LNZkR{kX=;YkFF-bMasQA#e!cuIs(DDz%U`pJ9CrK+G@72E z(0BdCrp}YOX8PhXC(+FislHOj z2sx`*@EAQG(uA<6l^?6za}Qo=m@jwzhzwWb5~X5V9tZxQP^LA{=xVkh_}qA`y~p=T zU1GJ!q#A;xz3c14ndSB-6+;yCF(*>dqMBY{JfK2BNgdJqkLeMzThQ5XvPm(E<39b< zKVep?i(`2XX$Eee_u*kCkY|<`Jn&<>J#aR+M#Dt{%i4nqj5h(_5i~Q=hC*0LSwC^u z-!(r$PmeF!5OIj{A}+*)YDQ>CwZg+s(%oHbx@4D1?Zua!CXZI|0Rh%Bum{b~2-JpS zJ0XkZ(wb;K8CFgCugoTE#ZjJPDelgxv~LgBR&QTF+u$6whULq%2c0JCDsTL370u3V zp_4}-HP*QI6||SJy4Kvs-q!8Qtlh1{&G^x3?Wz*1Ri_MR)h_x!YzGBl6hZA4+h5*KrqIm!{ zc3apk!_V}0VIh^9MtF4rVdxMRsck70!D-IU$zj!A*Bm*O$zg;2__7}Xu+?ID@mXIq zX0}RgrmN|Ev*UM+HGO|>RCs4KzjT;>sXqeGBKB+)zq37j+4^G3c>A`qcjfc2i3frW zxYa#W4AD?<*=@FJ18JMy@)$=O5z^??7FnNK_it&eF1g%g ztF(?Ukxh>*EL@>?+&fWNuUUGuPK~LqE7Dn&z0t80ARy%%^WzfZO?wExSb25$a8nvF zg~Qk5<~Q%4)gPqMy6a7Dsvx$sS~v04w^S%i;{3WC-mmwkX}YW}PzZv4D~qjL}r zO*dY?JRfNU)4!wiEHh@->2xZqL^=d?DPyifR${Fh0B|hUU+fUI&htgAaH&;moU1ig zm;MeL-=mF+W~YrJFoWDEdB3eT(Y>eeI<(^^Z+T<@Z?7aav8ZZ>)G=BAgp1a`-ecJ| z*_d&EjjlJnyee5qc&4s0Q9hdmTw4adW4H2)-E?~025?@=jp>;GIJL4GAFtVFoR<(< z!OOk`U-bmz!Ue$#aPiL#>tOy)Xp%wrS^eXQImFR;f(ivfhld|_OpeGyB^Q)7y>BMk zryT*j?6%RC{zeYi*ZS_!hrp|ze*hR#Cr!#LROizT=zS%PmbzZ^nwV9KCKk;f#3jd1 z5SC7A_WE-~Pt z%Su2sAGuZo1A!v^%={kmOg*Z*}@DUba%jQ9&|%ams211@UHGMUP%_)8yN0J{jpsU+W_ z$gbn-4dq0k00Ix27b2(YD?M#b?8(%B%zET(P;27YTPIgc^(L*`u_h>3dH=5|Rf7pn zfI`(@cF;EH+gpt9iFvYo^`d`=xAMvIFP`r5PY>=#)LcGx=w0bAoSF#uA*_&y#0{D% zJR7BqP1fqR7Y8idxn!Y`{Lka`6l%@fsNaXJky?5Ztun{ z>)UkXh>5 zJa6ihkP?BI;O+TwSSd{K^1YtC6L(+Vj&{3WFhQw|@g@i#O>p${^}7$Z1s6$5mrvRMGExr+q3z*tE{lD#<># z6^q?;!MhcLmlIyyM&cFDb^fA-JWYN*X-RAL5iTYF+liMXmFKDEPvDGNPOXi2#$5^Q zZ(P%xi6eBox@=}l*m=e7l!{2D5B&wUO&gWEvpban#LcSZdKG2tSW;lUUBYl%aLxHb z1k4!K%kZ{}$Y6LSS@F8Vs%$j#`e*9mVoWuJ-9Oa@L9B_TXK)drD+=lP;Kt`=C%pIX z-8&hNN;k-p&34LW-stft-!U5XVq@$yZBBRlm_pB+o6W8Cl+KS4?}@xKOdl=RsSDFp zmrP@J&UgWU&)j7~Em&st0W;aE2x%n*&@ zIv;xqWd&vBkT0-Y{dl^|Cm{yGdx^4(XkBZrkl9r2M{13(cobZ3O2nu? z3({~h1ASU%FCNPq<=y_2r?oD4GV=5Ou#j7&HDc^XdyQ6^5*oqSv!oG53$6( z4B5=c(fe2_AqHR!t<4(NlPTQv!8$~ zoEHIZtAF9{+^~)JE(lMjN9t2*SwEDA!VTtgN# zG#?S{_p{FD+!w$gueRMuA1aRiy)rJhk5l1xy&*9Z9#%9LtEIeW?Q|L0%uI`~tu8;D z`%ak zwzUe#j|K=0*)8Fl+BkZ-(aU9##|@B1kOmVw9A#VQLX*$dZZ~;dI-vgBlpZitRT@e5 zr;-2#{~()M2FX{%nnr`;6rTpz zXQQ~RTn2|el-aVG-NBt&Q6%ps{?r`v#rW2Ohv^J= zgxmM6kbcInN5E~4giruk9(Dwm7c9PPx{*nLl>L3bpJ%y&S?8D+JzdKfLOrRNtRa;3<>pw( zPMObhp26szT~Tw8>FVB1u2^7nHpxY_;gb#7rr6y>{ssGYEu?Mt8z&krZxmyHhyfTk zFD&D=Byq}xW88r`@P%Ym_vm>=Z47hgD?+O^i-G0$c56o?*w+*e=e75;gdcuWgO2kB zlpS~n5o_;eMW63$pL>+qI)o`lhQL=%-HI;@NKqgsA(1d+_LP&@YyGd%NxQ)5uGR8C-RDl=VQ48o{ z2oNZl%T&xHkWXeNbwiy*bPcF(=Vqhw2Daj||3y2F_NQ15h)=V1jWHlfxSH}F z+iy~t#ep<3W+PZ*TXv{=o5o-;DeS`?bd{*o&w~`Ee>iXph6Vf6{s~!BMk|Q-1I!15 z4d_{wCRU&gsx1U97;=)=Dn$DkkalcUw^b{RI2@BRWSVHQH}Bq^ubXv-A%e~dPm89p zt@0H9bS;A)2Hq3d!k0!+8jcC*TRan0dB zPNZV62gU?AB8N}YY&vFPs@2H#Rjl2p(Zt!<&O>X=3NPu-yM-9V+tW6k6>NJ5R>|a^ z;1-e)a5%9X@|-{QP1d{UNF!vnCQT;pv|4mDHxhPIoc>C~G4rtc7(~c)X&Qt9P#DDN zOF}49bQi5&=CDO_KchE0C4tDnh_(ft!-(A=I0tS70D$ZW5sDa;}Zg zx*tXx*>CI>k*34G7CFxTnDuiy;|1m*cJ}`v-T&& z;PdHZK+|ePDKZ=UaoF4O^@7`D^ZvY^3~}>!nFust;g@H*B#qN$>j-*#G2UhxEd&Xe z+kM2$xSJT=`QFobP|(z-RpY+`jy7)r5FI)vN2$oZcpBm4KR@mTyXxlnxpt=ohZsu| zCIG#Ck@YWsO*Fe&&$C3CIC|pc9L6be9lRjQ0`E-1FnGoAyQHMjO^ZIdTjrz!olqy+Hs_{~;b4%dh)rSQvUT`B{9 zxu5;UgGcNI>FZn8(JF8)LVDHNRWx8y#pMnR&YY&<+N(el!Vp+X_c@q!08Ym0E=amhyi9 zhCq40;Mq?mqco3>pZWtj_3BNxHhXY%`x&}ScCk`H6_{EYu#%i5FC)()uVGq7>9Lce z5L=sC>RgdzoDH^2Ck?k0nVB~e{r<#dEwb*X4n30xb*z2(@=F-`-Z(tn;$A#kJp_RS zvpBW}{)jw}`3Fwn@SX#vc2K5t!Tz6phPJfz7AjhdTf86xoJ&8`GbLWy2iL3w96lC_ zX@g%zr+cbn*6zbtxN|={-aJsJLTNnQpem|xemk`~&U0qq8M}{Q%$m(;A6f!KI<`ZD zsukcDRsyaCX!_C9>AhZ)mu{I&GEwEBY~_`*Y~?0A^7N~?$fNH(|11VS`#ka&lB+U> z*x!);)+e*Ot*tq|#?kiBo%DJ&34t7cjd0aJ<>08A=q$hrMjxPTO%1zimIjRBot1&FYbc)lrL60(r61 z<~;p)XY4wHj|c)`o^E*J!TZs)PBr8WF9jFQe4xV5>Vx+n-v9)}F%~-6eLbmdr(^k= zO&I&cSnOizF(Zay@N*+kA|!-crf?cZ-xQb;$FktMfXx_d(@&yZHh%)gPSmAMSgNqvnn|zLD z3~bbf6<)-Z7%V(^5>wXh0>=|0Pk=ujZC29+uFprCZUXJ(%Z{GL_(dCVAw3C?cWaMs z_tZnN3T5C{ya1~t1|$-2O2{|JUH4UmE0d4<2=?qdjE_J09zU$xgr3zZLK+N9BeKF)EWNE@{c}UyWk>eb_H@(j=E=G_$kc*MklF_kP@<#GE zyoO90N^8t+TE7$J%*>tAIgdFdaFYc1k@0&o?mI*->?5n}>VwoME>i$5UATZJn>E3V zgGcaP_qOO7Ucx>~icL*FI_>Y-zXz*A>S7e<*3#sPi|5S2oR?n4X`P&-RN0~}t@GMyfHUfpzb)$s|!)_=Y40S-)ELV=4 z<8(p?kK?WX16!(zUMDo#_V~^@Wh9c%)ZaxeiPmL4?vL;CCs+|WdEi7 za@wganQN)HYF@L_lFpIWkQbBxAfMv7QXV9C)%GuwN09}#cOn7r<2?5i&-<6)d&8%7 zr|UY8IVEtt1gsB?%RW3ZgKR)oskqO4{L5LOTX*S#?)B^8^_rn5#*tFecJ<|ZGR)v- zuT4O+jT_N`W(7mkEEP*&`ND;m{lX}t0ZX0pCAaF^3>`UvqK)>(?;m^-Z#>)uKI|k* zW>2{^OizNMfNsyZOZaBXUTi*a5{Ib}zSOWL3Uhes6HC@&`tX75SSpU$t2W@>WvkJY zu?9wT>3{|`DkG?D3D~Ipy2p7MCoX^_q%-VGrvp4R; zxvUg44=srfRmPOhiMBiY3WMiVKMUi?>OM16(>EFLTaa50I+pPuSG2deTF$J zz(j5zBky%^J1(1&MH0o0?@9Z09m!wE)L`gL@ZG@go7N2#UK zY2P#}si`td7>*x4OaNYlA76L@QvdX2vrpVCTQ#VO!E|E^A^^G*a5)PCQl;te=fglZ zmlIPkfB!Ms9}mGsJMo%x=P-tyJVQFS1_cx_t-n!534oo7a@vS?Bt-5muf} zV0u2d=JPk&9Ah2wD2vGrB}F5yp!KXv*nmaW->5${Z3TA0~&yU;|U9{2)<3!8oV zv)idz+OPM5&>;&51gg@qa%yBcE|M1JNxmT`I&_VU+a0g})8wyd-WiHv)K*8bGczA; zyK=QtoC}6*#OHHoqv!mMs8q3{eRg1;BbPCe0C>ad)tEW{L#*At(+r%@%>`-2%7);X z=5-Om3e=mXBE@(}qukY@Cb>(aUVHK^rf%Jb1lp3CB;vs|qCb`dcpc`1;F~mZlrYXxDlXlOrz~& zvOdoQavg%NLZRN)=9?4knchiG3H&(;SRWOaMR~?JvOYS2f@}{)+dX>lARZq+0yVF=wqoTaA;kO@9a>T2sTCF;@m<&@;bgqJttB?d2EQDIrl$-YJhhVxR zon5^afegy+k(S})w0lqzWjVul#aHILbkZKI( z+afiMrBSIs=g*%HRjCd3Y1<4T`Sat=Z>QmhA7|k7+BHaIWulph0|EohQqL~3Kv?VC zr=2+qzJNe%X~Y8NY|xdAL^>_!x&*?w+U)V#6;XpRD$)gHOz%ar!qLO~ zak60-bh@vbspxbq$y+1VbUlC0Hc^3r(VH+HLj{^?E+5&7JgI@mQ^?P>R+b+;g$tZ0 zREXuPXQM1Nfu&k9x<|!=K^!t1jP(RyFqrx)LkFO8NJ+#oCWh`)qF^v;RR}{#zMBtL zti#q#+l=|65L+_7nFL9-THt4Klq*{b&1r6`U#UFS%w2+y z*;3jtVFKdVDX5|30W|&OVI{9?S?Aj8Is3JENr9@R$x7Y=S}iTkt_X9=I_Emmij+=W z6F%fxjF|QfnaOM}Tyod-bnQ#Je!p|eDSt1H_2D#sGo@weP95 zxX|YJHMuck2Q&^Vi5BF;oSyx|ou|=@okae=-loh7#09zn0Z|Hv#rm-s-lItagcUD} zZ#dlb?Io+Qb?-sOAZUmB<;o(Ewp`a@1>nw@0|GYVzLcY|Dz7S-scH7+Y`L-}QI_sg zT{<*lPy7+Ap%=vDnX@s0EP8S15Zv3Y9V#;nrQEh8kDO${N!NFfuCIMo#T3K&=6+kS z2)nj!L-P)8(X@4IGn&pC$g!2cE2>Ygfno$hnF92<3KSG~^(u}QtcNxn9Vn7bIA;sn zg+qH?7*kh6d{Yu*IHeNjSfkwIIH2Vfj&uv5H^QnzC$RB&BuxhAv7G+R)5x`a1JICD z!8fO!Tov2ookvltaB&pi01L@l`(&O`OF@zs5Nltf%H>gVA3~t{JEjc=`c7ui}R;na5VPA8e5spcr%U}Bz?O%vpqtLxL%Q9`?tFdC? zVw9^=iK8&P(xt7i$%F3iAZukHx2Kaeu!x6^^PD5eb@fN~KIe*40(X=EKO}3Un*2$3 zlFO0>#MvX&6j-JXV;nRJ3&Xq3Ya+~{b*bQaaRk(_3%`Q@oAc1ILkDAH)Q4+*yedp~ zVu7V*lpK2!?y2YS^P=^Ldh0Vh_8`s4EFej2ZbmU2;Xd|sZ$EPp2iU2$B{ItN$g5{Q ztawSpL`L8Q*W1vo>d_8OQG-tWy1LQ8SeR5wbU)o&F&!?(Ud6(Vf8di@zaWywM$z`$ zjT&1B2OD_MZOxV1uLlPq=s7g<&77;NSjEk8Jr!nYbpmR65S*kPc-Nl8SWiuO(y}#7 zWv4NZjzD7$7%5L6EKs-zH8NRjZcp1cZNl(|ZP@)Dk4NgeV$$X;yfbPPN2ZN2<{M2$ z`E0Kqn8LUWv-)&ITh4DL7cynfi(Y~Hd9GpEfl zJ!eWYv||Uf?%5MnYt=FxavHE>Ra`oLBb{f2ym2p&#Yf+Ni$T?ZZR{8*NWK4uh{MLC zB8NsSrYoirWy`x#^RZIowu z7&+`gG_O*QLzR7y!gGW-0#T9OVV^<438-5S9mNc4zBBe8Leqlz@o@K!s2>)J;2?qk zJAhmX!XC6WyHYb$N01$Wd667IAW+c_3=j}pB%5ez>@r2{r`EWI8rhe;heg8}6sAgz zYN)`GW`#KslDTuyzDEx{dT()bZGSH`wdd|*aTxmbBCK1!9F1DG z#148R^r44HA!=e1?rV-vb`mD?IrWUbw8hT;bq>NN%ta&lzj81Hve<1Vz|2w$nb{Sn z5z;cUXsY25X)kvhrp@{duHVc=?`|y_zL9oX=0$RMTF`6;vFfQ+VzpXa@ zTd(!*f;RNoEm0sp+}Pq!ybX`Mex^b;Ef40`P!754T}bx2zB=KgW5?6I@XXostVB*> z`;Pq#vOmXo%;?;Xm2038J6+WRDOR!+g7XKP{sn!jZhUV?5A4U2t=l2y>=``sN-vZO zaKlCVFFg0o3_Q-DPmhiqX@CFrJXpXhfi@)I{$Z6DlTXK=)6|et8qW1UD*+;kq5Aw6 z=xW<^s;NnqLG5KX0YFraASEAKfsP*wzPuK8(uek_yDyIJ-i@7{5h3Qv6}!z-Kt=Ll z6R>cxU=%FikGugM2=Mn}lRXMqGk!zJ^ZkrFkqVW*aB01QO>Y80Sa4ocV!>R;xW3PF zq|-;Qj7ArB%qhfS7&Xe#_ur2}Uw((@JGNz9+8PKe%4QUdNbTP0_g#eW7C$aOHFy@^LU#Ak$que_p516XXz%a@m%hP6g?nl|ko3h0UV|bUgsKH9^hj~jdYUVH4%+Yl{*iTS9v@{A{ zO+;E=nh1jF{XowOcYa&?{OllX|9+G|xwz?b(hMW!`(P(&MS8513oDJ5P3mIM1Kn_% zfr1VmI*wzU4P_#O-$(FD7WHV<47D5A=Kzu*{K5!n0e)$Ce%t^AxuqiN@FjS%wNQut zc4to8&(b|j<;$gmgspZrk?Yz+_M_RUtklk>Qv!FA06#8$GyKlXFyZgHK7g#+-Q5aY za-O4_hSaQz6;ulr54;bJN)<7~gB1dDQ^a{}ITk^%%S0GEK0<2OHuG$$ponC~OQe1H z^qw7v*t!rUYByov?KarTQA^eL97L;nRqci8+6QjVlRJ|@AfA9QcFB6I-hTuePo6~4 z(xuID{hc^`7*jv}1g}o~0yVfjV#okAr4xU!zn?KZsPGFc?V2i^<^-&CUhiN;BW-RWJ>6T>*dubkan# z2+X>761^>wI8DwKYQ+&}&LQ&58SLJ$2^-e$F^;9`778IZZybg?MGB)}K0mDDXuRcH zj$_&RbH?8I+GCI6)z2s5^R8_$kkk64uwptL8;`A&`}VUJS=k*xSq^9^Q?DWN7b`*Y ziX9sSS*mo-5V%il+X3%DPqgUR2$`%bPk%lQ!#mz&(MvFr1zR zYZ*ebNxSy;+L#GrS+V}|<8%ys;z@JbtM_$8r@A#zn!(aJ{icyIFKWL&uO}0YkC$N(>A|eWBqt4;b?!DMDXSoTmq01B^x2o*PPF%Vi2KnF=)%al? z6k@eBtd<^=SsHH~*XvPMax1&3gYB9GnaW8{3H&Pw@FTM9yYjPIN3KJb{mR1v-fbH< zpv66n&?3An-fYtVm*P3q&jv*ib3^O0#jtzL8a(jZI@IHA2<&!XA!C8Lfi(NU)o9vn zw=TiXO*`<+xc|rAR{&^PW$!=JO-v923`4`vC>;U{Dguh#f!(WXtnIpXt-5yW8fz;m zDo996cTU3eFx~xsp8J0Hn{S2z2G@<>f4MX7_tvfV+Y#k`>-TM8aIElB8p7<*5^Ycqo4bAu)Rh*@$&v?==C4ViQH4!;Qk6{kaA?{O#q> z(4jg9ow{_yU*@gBl6mtme##U`OYruy&Y*VQaxA{`97Iwz`kgh~@M}sI&Z1aZ!PmF$G# z3JiJSJ=}TOrFek$9)h@P5iAu~D+q<1G}!ar^UvXNdJ^f!`&+KOh!bBNB0_0XgZV0N zLB*CKDG{v}_K^MX!zxOq{J7BY@28UQd-9m!B%Q?-R-xt3}x-C26$-^uR6&wEJIFTIiGWB;Sm}j=<)FAmmhq9Gw5OVvSEV}>Rpd)cHWDQiNmnY?J4Y7kLP~fhHF3i9GyCKrr~i% zwv&0dg9e0z`KwAZ;8T@`Od`^ghAlFAK(+DE|11G`dauyQt_>}EZ#d;Sg7 zb?%0Dmn}yW`J}c^8W!QlM zNJSI7vR=%a)HEw)5R<8&D=&NUL=28JMLo82qiE~)-86pl94!*jZUn#Kgc&nTlI`VE`}D}RCh-8L>xk_udm&KJLfII>ZMCCoc`JDB&@vkZTc34 z8crgsIPt#y)RTB;{?C}iXII>KrC}|E6M1UYqL`wD1tTRhVr`T$PUQEjB+vcFh08HN zJ4e~!WLmbHMCw^=qt4{Nw;~b`;|eB#a2L!4dmpk~yx6A9TwTDx-D>#ho0|*zCVi{E z)Q?C!DRCbS?kD2Czt6!xR;|Vhk35WPZ@z~Ld#gHgg=(cJu9_3X60dyUbC6dF3epid{9NK=hrkq^q=n zRHQgC)Em=}rTM9pY_Gt|wOcWQHeb&gHIRnW3k}6ZEi@euCB zhX%^kJ9)u0pBPN3gR$iQ4ySdq3#T53_;haAB=5skQo~PC4!D@<`_lDIpI+U_uZ%`S zNQgniZQGI}rU98Y?ZXj9v8O=tD1#{Crj0x)jM#d*KFOR|O+)eNoe;r(ZeBh4@JpCT z=?ogj#P^?nZji3NM{#oVa*g4hHi5xtO(gU9DdXv~J=9Db732{ox9^K-J$stG0|GSk zIbYURqOQ7}$f%qwj~bNoL(7YE$buk+x;qKgKmLL%?!6YpUJm^E%Bk#H9F>`TYi_#}KmR)j=r2Jq-Z)=^&? zUyq`~N(aI_ifs{0MBJKQMOt?x+$miXM3@j@lzMPXUz)<3{HU(NnW}qDWi`J3W*&0) z7Za|PxaYd-aV>2!>s#!^?vdPf&7N$RXr?z(4ScUvp*slrN5fGF{1ynrGLbr&mQY^W z+>nO|@n;h^t~a7s_r-O^d2}$TNYW2O%d7%A%@Td_m2&Ns&<)J!!JZ()t^73b4gE zgGmEMp>u33E~9;g6R4%NY4vI>;vwXw>%##`N1II3j%g!#jvz%G-J4YD;NBQO zx!6$VW0&Q&T%Cnb1Bd~$$t%w#((ORI5<%o^mKT+o4YdJ8UY;R6DaqW%5xHMQZ@m$O zl|;z|<21s$s=L&K%dlhJFDNBDp`<8}D#Ud1$#|_e$;ke#9DW9KvXJOkVqyy7;(Bpo zj_ty4@FE4MI@zFh1~L8ZXhF)UJ)IvnCIv7wp4W-n28%!Wg#IHS5=7 z`Qkn!rq4~qHd(@!zmRb_2B-LSSTWo zwutP3@3*HA%aKh)R!a~Sa3Tr!c4{*r;+T%;IjAQF#PvjU$96`#!cGjfVAWB5v4(wi z+x9)^OAplTs5x0tRga2bZw#i?h)jWM;xQYjHeutl#C1CHNavxh-zXeKyp6zd^yW|y zV28AbR?*a{6tuLW(2)~_boO&|O`$JEuH-s=Q&ukaa)T$G$2J-!ug&=p&nG4rL~Rjv zH7T;R^=nYFJ03w)Lhe3s8u|{Qk1t~4{j|3bJNZ~gC({mmB5J2HG?-0FdVSV@d_ey0 z>l9<^(7Cfg&Kl?k3-+Bw5?#7=r)kITm_h`6>wWhlirp#(p{{$ODS#l~KG?@4qd@)g@fh}JWh-o;M6c+o0!BpXB2el=l9 z85T$VnvsQcg``bec_wzQMNw!UMB#uaM-i_Bn4`Zfga8MQ5Uqkmyt?LNr*GwDXCrOJ zDr!{t&|U(mHQH3zTU1W!D-eUpCoEX|D^7gzRg4`s&ROblP%EVdqL#zeTzZTDx_!+8 zq$cjhnh#bYannvbO~Lt-`*g(++8t<3ex2fp&nsSR1(D9uguSYVk!97nIcitWn+C%> z<=|2I;(1D?v{P)XIfjiIZK6xesl2q*EP1v4Q%og`GK#^K6crgZK{*j%1vdkQ%$trd zO-SM=TA5X6q0S{cDW=6g{%8yYO`-ROq4bJe#hrCgQ39plxqR7L44^xs5b`PaChS8Y z#m+*=vdJYP)t0%1xhkm%q>%Md2&WMssR+lDEu(N8?Nqc{@Bs$i{TwdfhND&{N~^kx zVyz@%*XB`Sem>$iEkkPZPW=4!0&Jc$53l#{&5g4%jHYBtCoa*2Xru{e!`9p)W7YkO z%dc>G4RN&h?E8bA&BQ0TGcWPoRi{WQkm%@7Wfuj>N0*nD7{n+gmAHaT?k`Rx}Zu$dGr)}m-DOLtAZK^uP_9gebgM9Q7 z$|jE(J_w~`a}*?{qJl~y`#Is&Z(V~P=NxJ(b?U%hHHI|M6`bXjb8UW)B3=(*9KAd& z1k{f$aVrTM1$%F9G8o zOqt!DefyevSc=x%9Og$z$c=zaU04etCLRh}R)+DgA(F;>evdGOb{kf`{Suyd@Ht$2 z!)097lkLDAdT+sm1lX#zo8iwP8x!3QsT;PVdi+Sr*GA&mufI3cT#!edlRZj zf2#b7KRSi#yW{~e&(=ubr?l?f+br>G$;%adgmi_lLb!Yv+zq=VnwyAT$6)=a-PljW zd)AOXRLxGt(~CCZ@=<*;y_`Z>n7q&BiK(W{5#z@lIY?Z6LJ5TwO)v0(yO41( zHOeGLXG1jm&hHWSB_!gZKmV01lmLt#F$isFH&`qo?gb+uISna$k|?hojeRuYzCS7o zVQi2ueqLz!1~W*dD;$E_)oZaB5_G}PFjG1A#ww5N^62R(1P%)UgNfMTv@R15OQ8|s z73HU)IF7r@VdPIrG)R=GM0M&&8P-Pka3NSCG$9xccw#u^;||*Fsi6@aIi!@y#j#Vy zVis+-j^*aacCsyATDAdi|GE_qo;V6aI!0kLUHZj?l#*f-YhI%DpInv;XY)x7s zmqs6Vq!~67UzDly-E>K`Wee4bfvl}Naosg{5}+ZNdhUrhW8!E;5)tH6(Ij_!JfgUq z51{B+75SBW=6{d-@4g%7TzV-wL`OS=pYRh!p0;o?+OkXvXPJ5%i)8$UGE%d7T!FMF zuTw!2pb&0jsPhH20;{N1=pXEZ0*U~}_^>{`Vlg46Bc)Ob@iAFG6JGcde>-bDrg0+7 zD6hiP%hy0^&(ioZdH)8uE1jWwO%BeFklODVl%M2w%mwO+Frk%kP z#^C}=%?#xC7Lz5DMjIP>y>}u+s?$_)&&l0O!$Zg6zQ-O#93@BGp-JE6b5XbId~z=& zN2CYH@G7l({?Tw00!In~b*!-F5~jwDYS8MzGa{QO$T-*LG}6^vwy&)?4i% zn>XU=hH@iNAwOq7{nO=8S)+_t!i#dd1zf@x5?j=uEH4QWQLV9R>3UT6?T*cadmHB0 zg>uSwP9BMqIRQRN|8==!A$id%oWiI{lO2A&t1jl@Klx)<8~4>`WSSQCH_ z5!~Glk3>I;V#QEqb9lGTxQ_1K+ZANs-m9)c*PcDySqLeqPPSg|7yqFPPoDEfuf2*3 zuDKTHjTlB7!QCl76@;{1`8YN<62WJT!v~wva2ol&y?S=X+aJ$CKtV2g9e+HIKmBx! zpFG(hU5TKHJz|59x+Ghph{$ClSMtZ&{E>|eiA@!^>4Uj*=3wdfbh}RJjXgj7Xjnp7 zHUrmLs8lT|?sgy>VO{bT#CC|L_+5L%l5Nz7Vp+3>#^IU88_+J;7ZVt7IQjJoLU3lY zlvMA33*0TzRsGg3UxuqFV_eSbPwqO6cA~?Omr;TqRCtM)J_Wb`eKmH|xOxOx2^*3# z@yH+Ug%q(bqn(de#*W2^@#D>8r@WeBWr=pM^WB~N{(UU_pk64BB3?o9HwFUM0$OK# z94t3O{OKhd#XSo$4?BA5_*`-RF9fmOEaElwwF9k)>Y)l8_YB(k=4KE#Z+@ZQt)FPd zaVHfIKA-!e;oE6-8%G=Up7+PHV6?A;LeEP`9Bft#+{Jk+b=wat8Tpw zXHwqv+OuLfnFJzf>wZ*Zmm{;X4)a#;!Ai;_zxVl9$XKz#kWQ!3koNgkUWs;82(iJk zE9?ZL32q->b|Cvk$VT$DG90vyPCyruPq{9u7<(Un82cGVaF6Ia0K?iu!#9Wq&bi^_ z6%a@*Lh^mND)FKb@v>5G7?R!KTScQj6$RMz>jq>noz@mpNjMK~*M%#VVg!@tDpyT1 zC?gRu8%*h6;9d_{2J`JlUSM-i{>DMxsAAxw)b ze>1{l#3FF)}Z%{TrT zXVMhmHM648K0FY~+j3F8zXI8Het6;2`Q*`h++HLq|)dn*|M#j+J-H0jZ6xkUFjiNiGU@oT#b86#p-ZF4k zUiFm45?-m7Y795#0%)}?j0}mO!IkJrxYw2u_W9{3ElQ)zZaON-U%zDHM8bO%#>_m; zP`|x7`L!lHiDV%&ae{^YzZ>o1yzr5J^YMTT!aXH615 z+8U2npL_yY8JUI!_VwGJ&~?)>uG$MePTx65{e@tKOGS0uuSAT`{p5zRt!JL$DB|^m zMhmV9Q1wp%)y!tRR~H?~L)+=5yYei=Br#5r0J_$peGidhLbTcz6~#zBE3?x6iRbT? zry7jcZe+~s|69RD4am66hun;LY5p>Fq;tgHgT|patTVDh!{Og5f}0!x9N-QwQdX}> z(i%+OfJhvm8Dt-DBusiata+Q!0%h`79}96n_Q2r0=%&%6|D{OW7`oriY* zW}bf@rf_*abi_zwbJeN~3!p+^3U;C}H^1|bC|t1viVE`a?7es6&o8}-3#s0E<(Xa4 zzHJZ{L-wOOuL66k{qWky^UWsDs~grKj`GXrU2&z6rm$6Fzc7~vudf5SsNewnoREgO zWaaFnTyu}U$DpEh7vvJz21L-Kh94&<24Vbga){)zzR-?|ttf&|aW;AsreWWT#dv(hD&v`R8nyOLy7C&M71*^~JO+?B}}e}5lmUVI5g`NZIsKlaDaev!zeRkxxfiRAfW?&eH`biexG z?>Oy(3mxeS0!SN>3Ks4MLAWLkf1J+bh>%sf9$IkPIE~IuYc+aw^yW|yU_)3VF(N~5 z<8cm;hBe@!5+C~AdcOVx8+a+p5PMVMV2!7CvyE*c9^$8^@eYxN`<0A+$mmB0K3T(q zWRZ~$xLcnk*AnU_CifTC6X$A z@#H_|qZf;Nb?a7RzEbtE1Ksnwxf)rX>Moi#lri<0as7~-xCar@eNaag;hdld@-!)* zOaQfQLnH3~!3KQ43GS+Z!0$quGNi6cA5z8sRVDB*qb6cOD#~`Q#ezIBHGu0+KMg0J zd!A9QkjY6qap=1&^=`k_B={$PEYg)th!0e??E&r!^{B^;2kJ-Gpq#BU{U3G}DNH(pWj@*gHz`5>hP<1g1s0 zQ<>&H9zsA00m|aV{I|6JGx=f%)084ws*2 z{SFpa{YBKc@LaQ<%CV11RxOcX)aY4A3~rBFR>6ln1AeSw>MM4+Uk9Jf9ED8S>Q z@nbOU;!81X^cZxbtynptRQUZ6uuKig_nEH|Te|1GGiVrm9d4tE#dD^1Gzt(&n{sG_ zyb8NYz3|7+=i{tF1Mu{hUm>PPPbw5h0imHOS~&0xLeuPDO9a@t{{-x7)g8s&ev|_i z+-tO;plw~jJ(P+LZ4BJS7dD8yMU-a)&>gswDy(y)QOIvYSwL%)2DCvy+isLf3BcvP zDQMF%2G4v!I~ZSnZ8q!9fA}GcIsOFnj_c1lIBt{#dm&39#~aJ zgmeWWPh4Q~_xw4Ecs-$Uh-(7V$m8)mh0%WGA>e9*RMe>u=RXTe>&s%v& z#IQNGgUXi3idYr8ZZ7D#c+g6J$WG8);)6W1b_$~}RZUoNRaT->x+Jpz*`~ZJny22P4KmU)naL}D(+`mY5Rl(GNRIsJY%)^%c zmmyJ-c!HjH&pN8;TE{OZPz&$R6Q*mArXBYD=rIv$!6&fB6fpq#v*cUwM_r z$72ivI#SA8^;JVlGfol-^`!dqgQ8GOZLFZ$DzjlGxEsoplR>G`1goIZL@NV(!QGj3 zS~~GH0K-teuCC=n142~G;KbsExvLX#)@6OkE;%3PoI4GxY2@gWCtk#j_udcixb=eb zF!|J3IEJEG9cbgiezWQ!g!fyJo<+LSCVcv?JMib1U&FgyI!nos9Db3W1^k4`3}tNjr*;!ixZfh=-Nl%>j3u!ZQpgvVhK zFY6@)EreS+@0QO5zqYnBV2vLSX_8w zJ4Ce&F&mgQd6h`3_QtCpFEqNmKmGbE2GO>JQxuvOg@+K8TbxRb*OI~Mkc?JtJ!A-*d)dhJdD=DIdtYCV~rTto3pA!F8!NCQ#e|zM@7uuB% zsK}?0bs};<>TEH;kop?rXCz@n?=EOXI|p(xBRSdtvK9K!B~DKoKc6yr6rCU@;D@=3 z@ch?5;yqe66R+no+F6}?>ZueQ!&l(0C!aQ2d=_z-%zopK#JSq_?1h5(J(L`&F>vtJ zM08NN`xv83R5yj8{f%`S6i zTJw@$wMvTqWzZNH-aTd)*9!nM! z<<(r_P*kvIyJXX-(fwClj@Q2Z0XI(@g9}cKra4b~&)^1Td2Ts*={|VdFTQ{9!3T|42pd@devF4U z)g;m2p0tEKDrhWPzQZcYN{#xa+?$uskc!MCmhoA323?9IB^r&PwB#i0pz?qkyp6WJ=q_)R2$bgMDtT=VRH&*#FVZfvT!f`g3YEO_%x zvwpvZK;d)YSd~6f}j7lltrF&DX>PD*B zhm>{D#?Md}+7UG&L;!r-x~2r{7kz+BPCgOG(ABqht6;N>FMx@%RMId?NA!&CjIpDK z;fxc;qaWS)q;5~bOFzuTi*yYXK&`GeZQCL|f-C}3iMD?BJ5>t>Po%rx+c`L8%2YEG zJ$Ci+m@>K}ib-i@5$P6{*I;3K75+MJ86LX%Cfxb>6Nb;}ncm6q$bQ}R;YY1rw+_4M z*fXS4ckK6(9j!wL2kvA65$@&u-imU%Qzs&=Cj~2OU4pse9mJvJm7CKi9O^bGX%+m- zb5R-I)jT&w?{}@n_;4?rbowa>qQ5=GSDOg#QXdbX5t=U1?Q!g|0hl&(3P$wn3BQUe zy!hoDy!GCDNc(jaf~h4JPMZ`#l*d*b)!wR;Csk_lW!{<$lXmaMZKqAcdrMa0j+tX| z(aCh(P6A->R*uK~3aqER+}&R<#)SU;@VBMQ%m${Ydy})%q4a4-5`=VxJ2h5c=5ZnG zsqrEfgGI=EeDH9%wmEpp|D88xsPl6pc?jt$m}hEyb&NebcHrfQ9>mIjd}@|4Y7q(a zi4l6JaQ#_l;JJ@J=JJnJhAS^u{NJ?hIaFCKdFOQUhM1oBp4SaA4!jDdL6I)>j$5`j z3w0eU;Z4F-1Ft1PWjNFa?Vi%*jj{MeWb4Pl>Q8-nU*7;;Qw)s=R|Bqw2;Dz@2N^W9 z?a+#{3%Vx-T%Dg`qy+l+=}Cov8q^e0Ac`&KMJ+Dl4~?t{W9E;Pb^zVP45KEN?6Xck zX@YUVzToE-xcjN6&2jIw*Wk=cFGbwoA#RyqArX~t_<|hZ3g3P9DQ28~4u-Ic+t9F?fZexA2@Ry z&YaSP?3Oz0+mVNIZmRyezaCHgxC|GL9)Tx6`P{f-|E+8tQ@FDtbsC-KL0A{#`-k;$ zi$?M=U4#1nPCm_Kh5jbzi+Id|ke5wAK_OTzCRuO($FPO}4Q9XMk17{}m}=#T0_@msK-=dT*dIMaUiwxn5II zPcbQuOaxSyAYs)CoHlthI?>%tSwbpmIY2egg?PP*5afQxhX~h)jZsIWUQLAqUq0*6 zD;C{pRc*q!V{!h4r()^SwYZlXUk~%R|N84O>%t2$VAwDtUuy=wkO{-5@4ZW-HJ4#* zRA1aQyBoElB4`_zb|+FwkVE7%KcyTWu3CpTo_@+Wd^Da5h`jBllF8(^{rX|zdn@NAl!_4V-j@br!f3R;ikqMzyH?P}{a~(Y zB2ByakhN5pv<-u}!#AK$4^*YnI6A%f$|#NC?!$EQfhK35J~Ufd$7|w8gOsCa;Qe^o z#T`yh#-~l6L<92M@xdd{+sC{6}W!pblgW?di##izi9*06zcp~N|!V=?&kP^g>X{0 zY!Q3GBcH>rMm*#GuU~7EO7eONrZ?MV;!v9R(+n096@YV3>gl|;146UCdBP;xgkYx? z&#R;;7I%OA5w7CS`oLq3HM2LPnb}^=kU-=FX3*weK~jbWk#z>vZ8-~^%cyf_uNUHpe&;d+4LIx)w)!Cn~;LHo_z)v-*6*&*F^N9T2C*a8G|1=YYJVYQLGG1OF*l4Vkk zw$xw?-oFaPJx)at%hG0MV0j_-uKON0oH+y0+*GTQh#wET?ydl5K$ySDr3DD(Ekw+3 z_on4K;~11Mui_-=O?B}gn#3GDxF7m)^)YqoSZvz52k$-oBBq~xF3ysQ^@9%>gX#X9 zJeMw5fb&i`!3@Zku9=ES$3&nww+h+23JhQQm&`i+ZNUoMaOw>F`Hi7ZAu<0}W{NmT39-(<@lA&9~f9z@8jUp2p>n$)i!kw8#*D-5LffdS{uz-cEQ zhh;0*;^T)N!L*lO!d;I&W+?8PAAAVYdk?_%7xqB6jucxWKesx!5~*C~e!VH10{bbL z!wGfqF&?NL101ng|KMoCK)!(9Q7$)}uxzubB&;)V^y-4|YDHai}_@kERp z9D&@7a$|R)lzA^mF2Nhi*W-qnr?`=>H7aB$mZtSO)V)VL>4SuHCDB8X7+k=2)y$J8 zv;VNXydw&%_c?`LSjx!Rse@eFqPTeHn~lsou}ue-CF}aw*=wZ5jr2Y=!t0 z)MXFhLQiO$Iq2*|3Z&D(NQAZ})l*f655M1o$G?>V*#Dg%DP4VhQwmo167`TMMh8li zI;fJ`TLcps4t&};2o-A6s-7yWjRFUDJ*nEjc41u7b09Kf58Ps+D(~w3*{I7;hhI!z z4pvGtP!rC#stm={n(E7)@g6bJ+_mS1j8Z614){>!7tE!uLF6V=zBLi_h`H)t6BOA1 zt+@FnLo@X}{8|$U)9$b8vtxS#zWLxQ?5AVNO-y#qp#He}yxz1h7K|jx=yIvMj|k=4 zEmSN8!@D?xXAXFymoEVswGDi)(qR%)%fadDPXiT&x6*krIDJ>6R0i7I#a?!+ekLOlN*sUpJ?K16j&Im!T8R` zlYOT;C}t&FvsF<#TJM{Kysop58$z$c?AQ^tr6^wf0WQDjOq_MxSd+|B%PxAnarsRu zW00XKScP+CLROu_7NUPUTB7r(ZQ=?}noC!2q@&5Z4QpX2ao?@C4#CI)QAkfDTW4P> zO4)96x94CHSrvaIb$m4?CIYFh{5uD`v0lb}`w`*pW&MR8G}2ofnt-6i>HcRT5~xO?v_JoR`E#IuJXa~EBd>@R~~pf&}x5kRpi?reupm^->J zHHT>GzO<|sS$XA#&&B*2Mwz*3^23&L4@UOSB5FR>a`}9)1{7YQ308_y33D~ci?{43 z=Aq8z71eb8Qtb?Qz4MIjzEM2-`=aHl21-;B6+y#GYyk(s6&1>BtzPjR2d1kiMRo!o zlQ048d%Mcg(Dn$fbQ+MQ#g)dEtjXJj5ER=>yLOWKts?4$R4rB>77r4LnP%jk_E zhZe>i21P@@YP3Wis@iW2LzpnOPb|{nYe;#8;fizO5E(`bW1DkO&-P5nFUF_8#3O^A zIQ%G*T|zI(slTbSp9$Bjy-Q}Km~=S7h+tq{zuE517wCo)FLOd;I4>WGSQ+M8_^RGEXS{rfO%)EKnp zge8-pt?AiUt01$n=45X&QY}pT@gjibYm(6i*#77)jgyI1Prh{&Q{yTtxKkANk*)F3 z3!jp8)CxD<+80B5^Vx20qGeVfk0OKLZ%ju5C&nHFd*PbLo<)Z)U1&&&78x{wvM#cI z*P7B2&cx;dj8kCaU%t|K)r8iXk}YBMg3%;cUhnCw_g5B2G>LP_TfyOII0OU;RI4V; zUJS7sbao?NAqRCDZ7w}<(~WqA8!?|gekQ_6y(F#A;;uQVUiCBv^|2aKx*g*p(WXNX z`x`q0m(HdqI#F8uYx6uXfu%8A*}BU1jAtIPQ`yi=6z3IRG|7$6n|2wc*0YA^l|+D= zZ1la2g2~T;gNi3%%g{6u%k=m-xj*;3Z=9d2Y-XVeP-@3EG$)&>0asS8Nk(F72bQa} zdMaDC6%@9&AD~h~g8h-OuMj`Z+eua6Y>XV$1y`Oogns2{XPA_3J$H)Y^NaBD_X*@f z*Wldq`_VQ+pqU)~0!VdtcGq*0x%olw0%SdMQz5`va`U}6d1cytZ#K{^i}J8FXf_rf zS_;A$P44KfTcLU(MDBKziqIG|gRa>Eg?C8Xiu^fj-~d+LT){tp8z}bXU}r`+SR6NQ zU|Ne@n$O1O;7oo8=GaiD=F=bksdDfqg_lNqw)2wqVQ$J^+&X0dE<9}jWr*vMv>}IT z%JtYz^N63$-Gk!#BHVk+B#a*1*?gaAGXrJMvRw7%ht`sclzeN$ciM0Aj4$vqp5c1w zIorhpNcJmzxY`w~MDY|~lb?dQc27#m-==6>mg1FjRi=r@$-eP>lbJ~?&kU?P_cBa8 z)Argc81Al@tzRXbZYp2TQk{GnennZvgrj&~xpEIi4~RtTcEQM|H6*h2oS+U!#9G5;eNH@=>_WT}-x04(@6?NV z*=%qR9M{`MHPACYVETh1Zj+4ps~l3B9+0NID;16S(MPi=>L<^Ob=%V z%4MTS6SQknDA<7$(XKr%eDo3AaQ8hpC+}FgrwKw%QlV?$xl*xLiHh_IaE%Vm?9$3n zTN>O->Z$qQir@T8*HcnChhku=w`nwz-BDf6&l2XhL7h?cu***^-J4nVn&_&E^5XJY z^2w^Z@-;P7o+^W9Gw5_}Kgo0dLFxIyhCQHb1#{-LSCMn?LIL*Vdgwlx^4xbVyRcO` zRBR1CeT~>a@mWRq^g9ao(_Q2f=a0lG6MK?}T!D;j`DT(^o05Z17sR7|L%|%w`jOL@+z*UFlqP6%Lvr&ff4d>6*V{Ta;KKOn!K3GRr?8o-NRTm63 z&QlW!TUt#)3O6u6S+tw%DsLJ#4?#O}b2_$bhq#{6%*US|G#u?ats+&=Mle>D`F8+( z_{74vi2V9DS0>=$+iyp!)@@Wd&tL(#-SzOmtUO~kbA1%?HUkvTM?@Y{1(cB-t%bhj zaS9K?O$eE->I5zE)al1lwh%=tp74W;9f#oXCbYf9ytTT;}SLUZMzVksu`_5n$m04HJY{&$3eUg_CJx=)1khU+? zK#8nPedv9{TbgL%e=1m{t^8O=A%26Hl!>0%QK5luf<3!#TW~sD3KgLhjHsSnnT7a_ zyu&0S-IuNyhsh&jQIJjbZ_0{l)v!E16Q3-JNB=J2m^mUAF%h9CDd570nF!%(FUqNN zbEWDfpS!>15S3Td!-jg4tpKbzX&ntby&pC%)z?qvq1Six*O>97&uWG#%>AiNIv%BoU-6 z;@#;yGQHe&*|O77#M?4B*;wE6LR@=ou!**`-OfFLdePSCB~zy2+jr+-;;;)47D>VU z!U`f@#`XwmJq>X2vUX(@pi6iN!UAdNMf^37c)$753=B>Nenf6WxOMcURh64WaVHY( z#Edb_2t~76&@0u^qn_rT?*-@<27ydmAGOO? zdT;o2aUu#>?}x6Ri1EWZBWrIF3X(Wsk;?mJM>@V)nS`O;qi`ybZim)EQ6wJg)9-5agm*M!`@rMC~b2TW4lJQ)Q1I_XoM2>E8J+J*cbm zK^o!rKCc3)wR;yr}q4Tx{zef>o3xLEZ)}nkW8{lW&U&Nykux+Er z&Blo=V5P775dPb;eIW58rro{i5lcj{2sIFAS}JPYEr+_ z>?FItHD%l+&ijcgCasDXk0)RDC;xV%;MZ;tB^|;X%2$)70gS75>fo9%+1G2zDIHOm zX-tA9(J|*&YiT-C9)8b07Bi3Ujf%o5q|w-oFIS8k_T}O8MX6{NScl&2yb&H$N44+{ zXxF+G2^O)fOp`YT!665eXQybmuH~eVTR@2<<`hLrTEPSHDxxL2`ty1YkA}p&X-W7u zjJTFmST5Y2Dfu=$AR$4>d>Wf^Tb;v%eSSD`M+<4D~ZlIWJ-tp{1PqsSTp+hJ4rMX~+ zHPtlXNO7!c+KMfwt=>w?<5m?Hpg1iVrL_K5O;5w6i5n=pxr&r@0X5c~Ec_#T(wAE& ze!o2X#R>ciyGO989e%2X2k?{mf@YtOT-mEtr7h)w6p zg6skxm*?l|3bskw_o(H@mN)0U;Mkc6A9^2xBif@iooNP=HRVTjUQOnHG;Z%BHWJf2 zdmtY9s!X5Jb0i6iKLl!m{i)+V4BEv--1G))jQVx?}kkoKH=;wv_vACUA-| z;iyzYGr;-1A8D3T_=(4E9=&-q*K^S&wj2KZhnw)gb1z`#32|gM1fh)P>7+tzkQ@`5 zpq6InL^i|now?YNUclMh&#)ha2y9oiL&FwkpPR(4SVFi$Iu*I`D5ZJB9%o;MW3GM@ z9R>_V5PfZV2C)PzlGUIv9o8U7&sc}Qw)Hw}On#^NROiRJW+J;vFzHo34_^uO^Y9N2 zK}cl#1G1K3oJyM86pN#h_8AJ(Xq$mlb9Ulxq;6b+D*9V1-Mtx=Y$5+RN+qSy@ST28Wxn~r5c5Xx0>p65tNq6Vudw;to1u6TA zjW|=cs8%T9)^|=3J({uJ3btJ=Tn=`xxJ~0U_e^mPmq&B&Z92~~nm$u`N^9dd_s$c; zpLO+%YN}$FBy2Q7v~7osMXxE3p!2{XXcN#n?V=X~?CCbc0F7B{Ji;rEq>^ zm@nS`ZVQThs!_y_WXWlF=1nftxr9DtDlw=_G_sPC3^{%w%f05qcQI__2wFpHpf1gF z(eyB;)Sz zAYE<7phN$mCVCY`^vWyBjVNAk8lDO}oKs^w;Cgn*Q}Yl!WRm~gm+V+Zx`Ahn_O(37@iRbo!%?%ZWOLDDmKECr_6GJ7bV|gej;GitZ%tGp}T_{S;fKPfhlHzw`L*5=_C05c2 zV_RG|`xvqh0*&i+uku>t(%8$lyGyYlCl?a~gV84}$_#4dZg$K);M!25|3g=@i$MDo zULirK&8FJ*hUJ{}yCdRH=b`^eQ_*M8K%+M6`9+@Xr{_)kmuK9j*LHje`MyMNA2;3q zAg1;nf~(GmCR-;GX^BNBP9_V<-y18^tMTNIOK~~-^qnvKjb1^VjV_yKZgw`dty_=X zKg~z}^B=N%!gmO(TvV5O6Dl}s_LfuZ53 zBt>vQcVRjqTklZX0Lacq{n{LKn)5KCr=Ms#yUNuv(6dvjliCh%J5LpnY(`p|aX+3w z?-_fxZ%0u`#JabCaInD7e~pj=0~UKS%vf<{;mH6`;r-#$p6;M1qGl(NgVj*qOR@mv6hGUP8joS8o$B6LYtn>H=L2@J9we)O zTR4h%j~s4VVkk(A z3xf<=(I9m1s4ygy6ktHBaKmnB(%?q?#+wMbmOJl05n<@Q@lCWDJb*=WoO6&^#A+V! zM;%iGL(jFuOHNF{hF@1>?XqQ9{n~3-=!)2ZXMIGMPSH4#NKxf#!=#Y}f-A0bVmIZ0 zRGBqKj@R{62#!Y$d3P22bFlCqpBNWN>SRLZ?EYp`#EDJB%!>g7`y-SZe*rA7OYh$3 zF=!C2TLmDL%f%jaI2o)Ra@IeBVm&e$6R%!yGGf+k5;iU;AP_SWeBF9=>V_FkKrkJ|MK z2!He%bU14U!g`WIbOp65JI(=c7JLmXUC7wR*EuhTtTeW}*gE?X5|Aee6cP_w-RZNH z)}QT~w0#@yeDpC~aO@CVd3JY-f(0Rd1NpQx7F5a&%=vpt@cxQ*c##^Q*WLLi_X^4M zVU^Fk?5fEU7>1Z3eAUKnIUZW~>x0~>6Os1R8>sqjEBvV9?W|5L&Qy&X2i4J+E4cz{ z>;1y(GlmbR2WB$<8EkpxhPSRWcO2EMN6uzYx}%6!P&k6Xjz?IVrA0c=LcSOC5C!Z; zyy_6@Xdz*(aQv*(@Y*-uAdyR`{uCR_r4YVnU>`Q(V*wPI=}TAGTlVDQ#P(f{rjD?= zZDkE!EemV+WWuLQ1bV;mI6`7$90cy_bjp`?GKcD*LmL@W zKxV|nQRIg_#HcDxx^L1`o^W~fhoPjw}EuUocZAGVpnx%8)bMqEU3BY22%7Pq<`HxCD%Ity)y ze8Y&4`yX=*si3x`n1j$cHrAN^6JOm9L?&qzJYf>Qa|doaek{I8&&SiVPr}J`y24bTDcOt z7cDlMQaMbN!9)GmGsw82!~m`se+(`^y*om<-w{vtnlB?3QS|Q%N|dDK*I)wUWOGHf z@W&qz)uAIganno_uPtABHmV!@xp^jdGI+C3N6{{D>jC|+?}N{f`-kV@GnRInL&+v0 z`>0tM^IIi{y`QE}6}oogM8t2_IQA%40I@hzLrLK`wb*Y;<%VX z72C5X!YJw_j~M|(&Ngh@umRT(8)hh)m#;e+lj$xdpYpj`w4EG49(;UZE#CTUG4?XS zH&lQbcfu5t)NBZ`&Z?J1oHkF-bDdyXSd8-2G*o2or&!-ER3z?2^^Rmz|D0r^>g&Aw za8=Qks?%KM7=-LHOICr(7)0sYarYph$^fUiwoS6Q}gY+thXIZIYx-yKh(mg0iGgF2(Wq{8W! z9gO7x&E2tsW@EWxD-*_Y5d*v0p{m7XJA@4Agb{qmi8aCO~Iep37 zDxn6@vx_(5qVRs05Z;N-0I3b7j!S-A9bMCSbLYAD+fO2-E0HeqwUmj9^qiUZB`4vX z=bxt>@$(qLckTPfQ7yOJp*DqRNzEE8;@UhhBi!}`4S2oOf4xINDF0@I?d(co)9b=q z+p{aB;+R5R_vY3VVxBrl`0Jux1UoN^2-yuQeXAKr%2sv&7_X|N1cm!j4I3k!`DMDY z9>a5+_K_HH@+pWLISP^FGy4NlKkoxnYa!2*| z+ezgPN6ekqp$*;hC}^T~U}~Umo;5)7S!22qiLp}ttZI|JuU>yPuOP*hu5gS}gFxsT<849Y=&xq_jU(z*bKs+>HU9hj@J-k4A`B6|u;0F_%n}PM(HWZaf(yx`tsN zm$lwxEi`m@#b)EP6MViQ86o+Bm>nBuI<896;HX=ZiC!ySHOr@_NY~vt*zp$3or~LO z7g&gB?9icg3GGOb9|Y=FnhdKFq`*5|Xxzt9vHUb9=4K4IVQ;x;rpEyz_30M$Ve(e(O&Y~=?8BUZN9!gcQuBN9gV-d z^$xoC>C>ouwWBRl1N%@gG4K>D7!DS%mfCogk~gJjTWKih4}FPd-3hQbj0yp_qcw>en(|tA#HM05b4siDcisVkGyoRB4oMD zpN;_5#HZux(}&^oseMsFg@@E_xwNh8hm<^e>sS;|GLl}4Y4E0+?d6$`<=x7WsxMhg z{-I&0A_1{u=Po3&jB`f}$9aGJBgRgejEE>liOf9~Or@M3J6>uiVwU*QTgbobP593k z=B9kvx`HqMXJ@1xfSS1!9RKcl2kB}++0M?qg=n<#Ad#@Qo_mhsYj}q^`XX@kf98%8=q<{S4g;)`PGtVY8bMHJf7O-^5d_>6$b~WU;mQ(Ii$j&Du6oH*OBbIE80i?{zDr!-@zZ}qlh@A*;^z zFt1$q?zs9^!iSYL6NKW*yKbB9PO5SN-;L*S=ExD$2rNYfC$tK-rJ0!3UTot7`js+vAzg3s%vTfX z-ge1rf{S9)N{_o*~bjSW%u5PtiXe&+HoOAYJCu&?og(hA>w2t@>d(nsr=&*OQiqrro>cDgVb~ z3g~PUE`Uq}{LItwJnW39$Bn}4NK!#mg$9=?hjldfLL{zh zzOZ0&0Ogsn`u@Q?1cTYo$2^- zDVLPdqVfY!#O3HRChx|hA2%+e`}V`o<4>Tw_tuCTG6ZcYmn@?xlC)?p7z#}zrxswpZqEQ-p^Y_=0!3dJX*Y}rQCtjZv@nat1ShSMN!;Bj*H zZsqW@3eU*s@DF;m(^g2d~Y(2=h1}P9jAbF=&XH zh|7s^HK7?qt2(Rxje-Td^U9Y9SNj%w(sOb9Rbz16u`%S?79p3)5usuJ*qK>`FS&vX z^e08kAy7q1o}>)3Wt(#naD|Sm`4;;fd1bWe_*s{740-k{|mk?{7TkY+UoF zyG&cDdUky1TEX-CQ{MbLvaWoL5)a3a^-)1tdOxhHDZrJ#{D67f44*P{X4CW<>q4{& z(+Ia82tR8s)_HmazpDSy@EajOz)OX=isx&Y#mhX@TXcwJWP&prVs za0!_~Z>WY+bn{uA@v6O**SXvr71jeY=_R&~luN&l|AxS*NQ02<^3`@KxvhTUp1X1H zlTYF7F=L24k}2XGMjqY+yNKQZm&u0NHJ*FTMRUzwNP$6`BExGn{U3_AY&bB%F4{3Go#LxuDO${ zD_?h6N%X-*I~lj$I*JBW=mnT6z+{}0I`^i9u~aNrnZ_M+N{Z0ayRbWa}h-t^&L16-N=%NjHD7tml(4#qsgebpgv&Q!i7XEvUA7f zdlSd?z_zjbz3W2kpz0D*o*7vvOQSd&T@jV8-iYe7Y*hcKsgsD55-&)j{pXSG(eus6 z&??Shn+TD+^JRHLjFysLziJhpzw1wUfAKH$X+{<5A%h(XJa@loXUBS-x^zOL=&< zizgfs|Mog^Zh97e(?`IUHnYDg%fLOWmty;-P3T9lM$eT1vlU&TRc--~5p2UftkY4% zOJMxl0|9RlZx!GF&F7tu&EI{CXP&tbRcZMsq8DNxA^}Uz zscXx>*8(noQ%Zty{rU{VE%*?TlgB$9&oyvup6cl@KK3YXyysp+=@wB=*DTr9K{X1< zTdn8jhcBsu04gtJZ`@>DV^8YX0n=`}*{DPIAo9=#g&l;f!oMuYA*k#R%QR#Wz3}Gq zD5uzDNa*{cSi7<9}$E1QS zy!l=%4Y5n|$(PmO&g7x$d0@1k92omfuigzq^wwuq%_)TWD5rh%O}OE%yAerSy`C%~Wp9zM z((QiY1r*(L52^=%;S>Z z1M#x?{K;-eD)YwmAI`(~H{XGi9)F0Kg#0=Wip5g9KYslcPM&o-PU9w8Npd1725vN2 zG(x0XM`|IE%P}G2?{@6O8RU&!$fekri4)PTBcFQ&Ap#v1*?7D<@=N9AkxF^CIA5j} z#auw7P)Z87gvdFEJL@I9PTjo=tG}JY2`im^=>V-I&G@S#g;h;=M#4?O6h}&=%kNR6 zJ7e}aLlD3w64DLi22~j;q@UI%V)eExbc$|6ik-D2oD2eIx$a;tUH1mz2^lLMHykds((K%_8IMpi?1QDtaVq;oH77ryLw}0(X(E=Kc@f)N6R;*@iC<tx7|o*teHu!NqBJAj z)Uzp%Sz|<#K}g4F{7M@RL)(Sph2_7Z8~OE~->&@Zc&g&0Rj^_;d}5+eNWQsPUojqP z@QUjSmg4<8+vZ~XkTBNZCc~XpLyrIfI~c*{a`b$}5Fp?K2=GdtPiEq|t_(Cp+_1Hc zzBeHOL)Zx}nmPcNP9KVlEtzIHV+WbWoi7nY3Fp>3-kFC_mM+7Habp{G8oT`2O%0&a z`VT-1<$1ZY<_ubkJUBAyIEaEfbv7GpODQz|$}`X4wDZrWK)RM?>bK73kR&PGOHlLX zU;CqS6?C@O`hI&fSGw32DGBjdwQvFEy!3SV#AVhcPX2#K|rRKLbe`lsr0sADYET@x?b}0&p3x0Q8B#k!aaea z_}%2+5_f14a!r|*4HiHxOM#@4&OM7WE0J4YMu@A#Ag;ZOG4sqb5ZNvYKfUo8!a{xU z$J@uCGqoxcDY;Y2<-5F!Y{;s^lix1J)V{s&#Ft;ACp8ie7u4o?+)PW1kH=Ob;d$@A zgKu{3GIE=}xtS*A6>av(APzf{M|M=`E-s|@T7M+3U1wNVZ*1I%{(}cMf`Jk=g^@=l z&0VSxH9$U(UwQn@V-8`whiB!bRj!CIQO1W2&9vlU(;SW90RaNsh?DTFS#Kkcempn| z8u+}n(~IBv)IE3Mo~NJ1!ly2TZyq-sQi~c%H;9TV%i4*3(I?=_C!RGs>o&bT%htku zq*lIp>uvPuA7|8a&FEmE3gQI@an{e-y3KUF$A59;QuR)GsKXsh?N)LAQ$cl8u**By zkXXK8KE8eDJ$y;!2Za+UJKDW>YlOF@J0pIF98#vGmmrOBOi0bg|6}hw;Ik^SzketR zgpkmC3!xW91q2lpQ4s_iDE400-gR|Xv8}r9T36TZuCA-D4SR2hprWAkngl{i144R( z_xqcB=iy06LMZO~e?Jb~-22=+^~}tfGpEh5WBZ^hIuOl_GE3(Oa7oZJ(b;@|;YNG; zC-FgHS8(;j*f|TyBbeqc& z)TOV&qZ{^UT?{`Pd$+Vsov=oP`d9XaAmhdG%BSK-djG^$cEe2vTf2lfcM5QQgcaq) z`F2gY-Sgp8J8l2{?B17NwvK4rh|7hiN* zLLCO}X@%=T=0wz@p#`*zok+93{VfCbnpi@wz_;xW-*y|GuP>Xs#*-RW%iIufqk%3% zypOmZr7xy(S6k}D_l?d(>3$140Re#RdA)a%{AMn{Es5mj2mjQMKD2}JnfvG^V{E_V z7Peyc+Q4~E*y%dUs(96h*6+HHtpCW-ku8xj@_Bsm;fFSA>{z!&4ESE6{(>OttuX?k zj0Fp=4gAmtrcSe7Ol}2FEeA1wJ%8QAqdFduh`Rmo%{T37v_Ql-AIUZT%!938a!YHG z5Q8+lK0aRRTPz~+VpPTc^Wh@9W6CVMb>a}4cz7QUeZ&dSqHK_EjIG4+%9QU{*n4R! z?Op^(Jy9830tNIVy4CCE%yn7}WN*n_Ex8h4VKnK?E+P!BZd$B>s}5rn1xiOaWZ~)? zL2yhZ&8Bc7i(+($7%h?p=od>Tx-ke&{Nr*KP6;D(VE;%`vz$vCYEi{)FiReK(y3Ty zb#nr|wjktou#0Nr7n7_~@Nm>i-{(>{{5x7Nm;dpPcmx~Zc>hHpE%E`Ftx_(@yMf+Y>(b;I7~9Z;?Z=7!_kvi4h=zW(|=lPe|+c@8?kq9yWkYuN20cv zI$QoO>fy7v!aiS$d&TLq>~}|xx5>riYj^z>N%-Lj|fbB+#S4YvibQP zk>&92Rj!+9mxOK_2|U*9D)KkP3xswx0BEOL#(f}p{mi8iP-%7a6Cj(F`-0J9ZFA;N z_Q#t}cYb=^Da5cCwyE@v-cHJ4L+qZAOV@dGl z3fHU-B+F_916F5V`+CVr>YZ0!wmT90 z2^pQdM^_s=dQa=$y@e$<5rrp^2HS~xjP3mVQhWF7<#x-fGi~w-L+!Xxy=^1bXgO)Q zNYGhyHW&d##MIM-eZyjFbum3_Hp zo$EX&+@U;aG)P}Lwqn?tZ7cZlT z81#*WkR4K|kBU}f)wYRKjEN(mqGoqfu0xL)Z5@)5Y_I+HGo5bI8LCk%BRLa_Mm9u^ z1gT_gh6)!_DxXncW4V_3+Gt!eZ`y%Yr-gZuiQw*GFSn)d;!ER?=bQ^y8NVK)ewkZB*b$fze>44@ z*ge^n&6bZ>j?gAc7Ggo&S5KJVzNXgpT;KDvD3t7nCT6{PK;p!M@A;ytDxlYSH^jRQNOtZ8KnBLnu;Mdv7{w)CkEEb3s3ZCAo}a$|uXg@5*V>d@Cs+rB6|3fF zIX}J3Mk~$y**cHE&W4_QmGCHO)W5QM57K}0`R6v`&_nIGefCCIca3}RGF9G8qK1jh zY|gUf_V%~m+TO#41%urq0~hhv@yAm%8swtJrhWQ}-F@UZ`}`{d7+~u`xSLkzIdkkEQ?u>)uhv?8 z={g&F@ZT7EVb1n%I50L zmB`h+T^ox>l}QVuqh#T$mx2(a{-g=do|SKurM~fMgX&ZT?&?!bU!@hY5U)6R);Lk0 zL-j#9=cc5>O|^&>-T^#!27wMc>L}~Z!kPqP>CmN%#Y0&)#SkVss7(*val2i2$DMfG z8p45{t63<5MM8Zb%RPnFD{|qKH?kv7?qwr5QMnx_a41x0(V#92Ei2+Qj*^XKHgmx` zyXy7nHWmTxHJ1&yge|ap7Ob}>^i}%CBKzkjORbc4Z^>C>&$GMe3x8GhsE&Q&3fX}_ zf|)MASTCM8*Y3OF27B-Q4{ZM-gRKB6-s$;j%!fhkO-$}-Z+$=0?!4*ucI|Dqy9KGX zvEb8Iy$fNh2OCtLQv#AMKd@fr`3&ZJ8Fh8LVSZN%0Vb&Q>bD;|0|5Z+sorx){|r|p z#H&30ia+OvnWh6VZ=HloLj3fuWP4`pN_2v2QQG@9^!Bf<>(CLER%Ib=4c4tgTtiGxznH()W*|`?hQ9R5Je&p1Uu<_i`ly|L(M1lyX$RopqDidaC$jqy^~bRU zpb$55tw3EbGb7WMz%N|-^G}wF@blX+u0)neTx~F`Z2%=2165mIRLCj0_+x-~Tp^7C zkgxMU;Sh0%7{c;hHbGT{=PCs`o)L1F*{t#waw&o;Uq~!11fq(QxelImg7w;KZ|l*! zwTABs}$5^7Ldg5J?ypbzO#4m)HnY46CzRaU{how3@v8CRGU|&q*(LTtt_!cOD8N% zOh~YHojT#@M_NNcP}yov6@G9u0-iz4apbpwxZ5D#DqZZe{q4S^5CFKq*ipQigK`m| z9jfOAx$OMFlcm{Jr=DhW-?K*szJ@Mg3mVkuar*nn>O8!J6-LK$xPln(9 zC3U-D!YDgzNOPQGb7BR*HSsm6wEdN-=_VBK%h|%lHbQd&FH`rtKif_}2(NF)4`Pd1 zXxXW`Y~5q+qd6S$lV50K26eWUI4I95tgtt~OR*`-SC|Y1vKWj0#19zvP(NNyn4Ox z)iT&*IDWeGKx+#kk`LoJG#`HElHlY?ZOKk z+-riVtK3ff@l>mH!(VvvNjn#I($RYiw8B;C94`-|q)!D6!7T=Ol_KP<$jq|8&z(P<6zc2x=jHPn*6;qlg+ZvNFG zT~8rMPxs$XK8^|X4K@gF%yUkzsSY?aAUEuvLC)muUFb&2xPCG7)Qu$dj^Ec9}5V|i0SUA)NBU{!qm z_s4B6Cco=k4(1FM;+r*Pcar4HWXss82%!dkifOy>g#I{$!CezNoYc~okWUODk@7;U z*np!{3G2fv)=}!+5SsN8f(NmevM@-Kq7hWD^S$BP3ojSFaarp9w0y0dam*kacTf*& z3tK^OFV!|03O09Lk!^rwQ;PYzsNW`d(#wLMKIPjq%gEehf4TK=>wp(E`O=M5epFMA z?9Uk4!&>45zLdP0vOtdL+1eIPHGA**f7;O}avBK+C4#39RpIL2@5o(jBQ54J>;%ri z3!rv|7veZ)0tPLcmNG|}Yl5XL?809RjioC=wlHTx}7@DLIPu6x5Mjc|~&N$N!9d{(W>%cirmGLB`D}%?uToSsCXWWSG z;54B4y%j0w$8TZhApl^3tJCEZvVDb1h*!c?&D#20mBJ9rd(l9&SqrdE>(q;*%oA+G z&!1WH#A_U1FuEf(*~O2Ok5G}(qMZC>)Ff;~=V#6Q!H(+MnbUA>Vchjhq60>WUSP%83#3 z(qs$~T5Zr$oapVO(g|Tir&qg_XKVPqy7~+lfZy38+!6GfCg0}x#%XL{)0Qr=Sx{JM z)27kTsf~-YZCq9&k)j?XZDq`@7yFOK#&lPs(6^GbR%z5RQsRU;_G7u?*f19%cXG8F5 zorVJfH@Pb1u=aZ&FJ=F}cC_<(v2u1MR50~~dKMz@*BQ;g(n(GJg=h{>gPLCtX~L>(zrt2S;ydTtu}=$d`#~<7n6IwC5I{BV=)e8sp_}VfR5`8d*uZ zL474&uMn^czGJ#2C)>4`UTTm1?Qb^rfbLAZrdViUdDW&v5K+iV6EH*&+S<-|<8`M% zUJOsmnawIDld3V4#JfEH?cz(Fr~V*=U&r6*!8m)ybh~E%1MCZ`cGsz+Y|MUb5uPDX zWRglZv>ZPQnakH(0jAXTnbZxLxZqH?YL%96u^-ZMkoK3@o!5-C_IQ|D{M{N`oe#oU zUtv9gpWF~CF9k>och?{793(0dWwxuqA-t%(m7n^^^^<;t-xN>h$F`XGiG|jj@4vGZOP1Q4*I&0EvT__HB%Wn8=Gu(5g)R*vIo%g5Q&QL4K6`bvGmpp5M5pG!3SC?B5QMYS3T(s5 zT%OVKCa!`Tg78C9bMoyUKQFVtUvQ8O?bpH5aqF0oU0`c*VBVdhjuJ6YDPxgPaCHfu zpc+fr-6fz8oNc!=)}_UvE@;{$&bs&RW8K)Yw@1ZHDqgbc^K(P6j2<^GyY2k($Tt=% z&&zJP#g@(a*%p2Im35Tn4}LhvT#1mk2^OTM=a*m!~0nd2e)TyYpO|!3YXapVRglAH3YlEd?Tw9P7NLH&- z3BHyqu*ws~yM=Kf7oZM_P^T@dG%@mk2b>pjPTzzv{cRFLv=)h&Iz#2kO0p=woW%}r zb~%N>U2%ylxKow}uF&bcl#O=E;ABT3&s&&fpMHRbM@5+pOK#1HA8hrtI+Be4m4e=H z;4HP6`kO62ZJN%hca}06@n9pgN3G2j;=95i(KD^)VUV363JF5Df04(9f;}XDjRz9OV$kQEFZ^r;xx=W4`g5-iyB!z6SKZN zb}`5rLPhAP^_=w8_6j^jIlb{|cA*)&oYW&-+2gjUQXlKrJpp9M z_=Aeu0wtThG9PLSy;AtN(kYb+l@Ef0w!`@d)?WM~#Ts|SG7OWb%q97@|FHJ9R~OD+ z<18_;6sk^q;Xb+AOv|rtzkBIpd+F(??4)zgb6I=%`f~njcPgdUsgE?FJ(U+-zSv3| z+12F2lT2H>%rcqO3ox5qjzDN0tcst)$l4qAO+bZA-qED-fFFq>##9N^85jjsG5Ki* z>EWL7vU7&nsA1ilPOhj^!E|F*sbwu)&q9vCPK#YM+{s(;6wBe$IcscIMvnFB+}xJ_ z$SK%+Hn-t8z-g1v*ztyioTFhcxQj)RTUcuU_-qk74f>D;C-ASU#SxzmxFwB2NLT@@ ztcZg$=b}}xmV8dZME*3iFh-0T6_Hi&+QCZq?S<%mef+O~v1_iq&JIV|R;2A@EkV74 zh%ER=W7G!qliur}zHAfJm+UxO^yzE=`DVJ^bK{M6?HzYGHcE6oNayGO1@!q-e2e*w z;@t+CyZj^dhZlk%>hmqh`P<}PTr1Z2uUB^+^SK7(wrnYT|=dBpAhN6N- z0HHj*2i+gM(*)J23#;d*+P+7Pk9gsaO8q~1^hmcD_Q(_W-fdUjey5!`a+sYuwzGBN zq~+pLoVQE(RlLPA@Jn0>Yr**@W^hMOepOzu66GXk*%J@yZY$AfXv*1Mo%ifw3B1!$ zG2-V&vle`#9r1>4^n$e2?$w@yhq$}1pO5!ny;CXmVRd|C)x#rWXF3e!meixCt6nL> zsRGyxI%G0q%^F+IBAWtrJnN~)>>akeK1kHp5uM(c`kdN}`d8kD61(-PgKghEWHkoQ zm~m7eq`VH_xEopQ8o<)2igcBi>gW&;fA;U&&N{ZhLl`6bkO}RrCE6nj#UO0Ky(&r) zqS7uUEh`V+ZV9XwRO^Z{r*^3VWLAcnUs3s5)(1|rhKLxB`LtB!(x!c5M{-@rZt#*@ zZnZsdrlU{=k%c*JdvoK3#a=w*F&y>uB8Yd>!ub~0rCZc;ww1QQ;|?#Km>k&BMU}}nlG*zC^X$YCBkaa|?s3k4#-FfNRV|F|N$Sh_{S7(VMh+eu`_Br3XzEJ7r(=; zjs{i)jcXFIo z{0%sRM=(}_CPG7$_ydG15LF*!N8g$W-ww4W!NwunP>@p{(p7v3+t#8^_Tr4W_WSeB zb5?xH-NCT>V*V#@RiD}rvA&B%p-Tw4FKSH(j~taqz?KQ1n#aKv6hQ<eALZ z6N>DP%P+CNy*|b6IQ1|)`hYgr_&0zu6;ejBzPiw|nEY_@5aeJg9ks=+t)6n@DCg^b z<*5mQYI0~RjZ%b7Tbkv}ZzrB}wiAB*PpRG78W;7gHbksw`mRfeSc?6+dgG5nJkh`1 zdAof=oYRKl7W267))g%eX)Q>2nU_;!nM-wE8jBxDSC~M3w!NSohvVahC`V;iH`bJu z|28`<+g89rYSo(4jce*(U$*kYIg~#a$K;+;)_FS3@<mE)h-nZhWTUL#mAkxW`8fKf zi6d?8=FOWqq^7ua8Jvo@yMpSVO9L$h{naav9_NtmL+6jS2?yf=455wqiVYaB8EkPg zpmcF-;0A16k*?}0Py|KixeN#tbjrv5xBS7bn0&ikeCSZS^yEI)6%{WD(Hejlayese?feZ$^25bW zSWH(PLNwfkaH|_%J}asHEm*zYzW8A^)VV0cIuzid1Ql{l&fzN;tX_qBT~j->MWS5- z#hbDO*Oinlgj$#2>)16Z$;QCHE?T$7u{G3aAzc~%X5$|8tPX?h^5nrbvUOMM+&ICS z#sc~2(>_@+~_zm$^fy!?D>HGfQVV;$!T@4~_MHF-xrY!7#p1%s`&|!Ddnb9E* z#du_?0m~u$rimv^gnS!GE2=}`D6gC{I+tJO*(2SBsPLTDx1YWA<4^YFrAOKk!`iqt zLVU$7o6*BvyutAmoupk=snW(>t=(3nTW&MgmD^n(d~FwwA7@haQSbt-T`lL=klrI- z84%>dYPj#F8|=D2-DAI>aFCsIY_hd#*2u9J8p1=(PA{-^9PlaOWj%ymZrtp~M!50D zpIBHs3)gJ0|9raKIVo~=tUKndR3q)`N)a$N?cCLAKYa9-Y?1=rcwOr6cZyQw>wWx* zmYXLPMP^Dk<~EiW*`-|v+vv7Qj>;`Y$R{4JWGP#(%~lF;w|FyFZ}4gMYl%Z34yzR_ zA|EzH49GVUNT)Kz=2*(h@4xvb_{97SgMUck_kgIB{`+tD-2?$~02Dswn(v;N`{BLP ztwZO|cHK2s+pGV0)mdNZqeq?|IVblY;Eux4`D0GMP~E7`<{v8b9JjI|)X7xp|6cHX ztR)B-&K$ItJ38f`*Bosl_ihnTI}AMO=dMQgGk;xPz|ZxK+l|%Kd=jNADf7%VrS|yy zU)y+guYbT#fXt_TJ$GY~|I@5|d-C9=-*eqHcH5(m+num=PCB%MCE%|@I=a#x$iQqj zd)bCSH`k4sU!d9mU*^Wk)@`&G-<`+F%rQ=@!Lb;mGq}?M%9I^mKhpm^?Q17DP*#3U z)FoRPwlu+7%_&?`msk*JO6dkWt;0YY(!9NufLO&g@RYo2Tlr$Q!5azI3a}CsJL2Sy z{jGFMn9juxYa(KbII8dL88e&#jdm##f`{4q`M=-)8N|yfC)2QPuCU3!x*MxK)phI! z0b6E+3YGR9>J$wk9fiLCs8R0dmSFL?B~U3TsIXO>>|4B%BVh35>Z$VGSto%*YEK&A z->)vRH$IwfeJJ3sKg{FAhot{+k*FtH+IBgL~=98ci>ULSB`8Fc4s|{<}5o%UbGaz2Jn;#K`oQtSaZ`coq zwM??CvI6%mI<3Zc3tr6Tz|Hlj_9=u5g(~e4Z^vJqa81aS3PwZLRal5jji@e=Mhl4I z`8aZ#;Ge5Y^51&rvFLv&@Q+;L3aDO@_EC3!az@vlgKQ*sIs2#-{2+6Kgv5&P-$Bt- zloq>=&VmP~EoT!X!oO7Tb@hYVZFAu*-uAl-?K28}?#l5txOY>iEqGoMZskMi&da8b z(i5$_5N${5?~$%1_jep;{?4Zh-S%Q8TfN?x?t02_NAmnN$5kI_u~T^NZ;#j?AAH#E zIPGvda#(O+sblLf>eg_cUm?OYCq(;&W`bZZ&5@L>0(*1r23wS}EMn{-{hFrTx==m@ z0~F=fAuo9VKlR5w@Ejc&+kSk>^IrB(!2zugo`1eOBB`$ZDLDCj*5e?^%?jiUnH2?g zNvkf_1Qv|g2yStRF29y%OYNr+XnMVHz>Du7`m%Cq$Z)Z zUXr2;eT$+jD=4&9eR|m!_-c?JS$X;2-N9{@q9%m$mn**YD89>a`O_bLVt1Z$ud-W`;63$_Uh+9nrMu#KK|I`3&RUyLW;v>Okn`1|&Y&ut@?mOp>-rE}^e3Qz-H<5!oEvhJG9(LqQ+WQT0ZWaYM+ zYXXjcrs4+mz{3vXmvES3)ji07Cl3UeK#~pRB{rmKXX_T1=+wgMp3Byl0g~O~1aK`H za!NIbwt?gI1}H#mz!>!Nvq8LTSFW(0_)Ctev#)%+|0&`P5t;fx$Tor?d9Oa{#kD8b z9^tdxEw_k z382ED`d&s^(edZqge{d&k? z^gL;Ul=RCvIwk`_T5tk_cwEuo*bS8;i$IV@J3+eBmuK6QFMqbZ_uA8Le)<{PBQ%tV z){TuWmHu*U3BA*^4}zP&{JUS%U3K+sn)d0(u4)sG=xxn7&c0YGVl2pOW&WEDr5n+W z-n}hw-~#M14Dsa~D(vNNQf=DWbbI|@FW9jso$SWSiRYYymIXd;GdT1UqmT8t&Xh~j z9Qe`loR*uNWyP3DXD(mv*kfK0CXZmPyCqqh?%g@=sJ#u`dv6<{6PocwqM-KIb!_{% z8}OsNa_0AzNZ)3Z6xiUV9jys0hp2AfF#yqajlDSXXcN=K)|BK~6A-T-MXrw!3~nIT z0*In2!B5Qp9pa^eS_Fk~!5|K(g6;^evEg0b+5&Qrhgw44D&JO+_t(6WuY`}uU+f)q znlDdCu*N_<0Y}K1r1gXaYmHwuBhuH8Docxr?~@OBUdz>iON)S85V$*}OPa{(0eYrv zeTsy5^mIEPu)n2YnwW@RQ916A@MJT0C#@x>yqAqcv0iDb%OjFm9Ur_dq^O4JUw&!- zxbM&Q?8l!tN2B|a$6f>bTI#%fJ80h)*0x2w6X*!}X0zol!*O{->FDnC2(P)48u&W% zOL-jYeX8s{^ca&dx!K&>%)t_iqBOkm!*rwI_0bA!6fO`lE ztl~H)ZsQhvc}AK;x}V{PV-%-{D7Xa?id;^_byXKi@pqsxRa7WF9Zip5tgC%G&L|!Y z9*>Nb-m@2W1n#cyNSg+7{|L*z(df&U<)mA?MjS+{dE?+yd5)>9?FKg-5Ew%&N!vlH zX=*`qaP8nljEpua2jAGjZN5~FxBGulDN&HXFoJM}U{B*ZhN~(P72*=xK@}Df7Nnw~ zrT6~3-V2#^3oqq4pYs02{W;1LQK-qZjW>9`SI{?Z<-Ts=yRO`Oa_z$#>odAVfx)E&;gWCxlAEWJ5QMS(QLU@Cx z+3uYO1jr25N5Q5Nyo(hhFccM_?%BOT6GoO%Wo)azUcA=6_oW2%i)Z`S0_k>_9?5KnJ2@-A=KOBO(d6-RD9$8_13AmDs}{5n>^`eX zcly!wmF^1aBXq{CiQP#zkCf~#l8 z)fBhtd!_X|gBcJEt%;y~q?r&4e>Rv=Ra4dNx7ry9*~%=lv;b8!p^U9T{wOMN%p+G< zI>kc4j}tw*7pUd`*AOp-tG)8vC1k3~4C8hM*HK*2Js_l|xT@QZ)=v!+A{1g1L5Gdy1tD7@2Quclp_Q<8YqRfAd;R;F zHlH)DWKOy(!BZIW32*~PxBPSJ*LFN7Tj&V%JOn73)DnD@Az0~R%`Gt=#21dKJhr(~ z(wC>J-97m=kz;AQd<1-sf7oe1thJ7v<3T)3QmCh9cy5~rf83q$z~UgPDj$dO-@Ngr zJF`q3ur3NPTIIVn=k(m3z43_E%91g~ZJ(6nkaDY*Ev!T5E>LYPIMF!PnnHz1ElrD} z`pNZGR9?}K>Mvai(eX5>d7;0%6pGN^sFR?MgLhG7MwefuKWt0?J;bzWqYp`|fD}m%MrEAwXgw~b?-UZ+8 zpay!CwxftG$Y=-t0$kOVMZ&p5LpOO#H-6v7b-LbjDF|r^393FqN-k8_G5%W(cZ#X! zxA4kONLc+Z2;9SU266AabQe`7U0=bbl!c@)HO)5u@S~+szo`i2gqX#f5|t#X zMT0>1o;V?D*P|z=U^KOkxOEieoWNE(9xAyNl#SRKF<3ik!qeZ|g}chHEhvwgg3PWJ zq|Qf4?zsRb@mqjJJ^WtnPQo&D_CWAuUg`K4!`cE$`t`}TLo{iu8yN7N4anoCwnInR;q@4UQh$FdOht2})W zK~#dDiew0(CX5Xih(p*K>6fN>=@cOr+FMTuSEb*;^#?Ae-ND@^gw58i8~-7M9pMKcv~RR2-VLk8|t>mzyB2 zzyKG*l_rHdv4K}Qg1tmzLiKjvL5xCNOpy9a(1cz+tr>TH>qMM>q;Z`IQj`7t7Ld74 zEtvhzyN+tsfGOb0;8N(pHLyo_Yu>95CbBK9Cq5H83qeC6w`i7#-`$SZ6iV5#G7yUT zlXkZ^wgq2Z(ZTmmBl<#N^*WpkZEj0E7TzMYuX6n8v^(=GnW*@=I{$7~)@Gah&Qv>P zzhQRIi!WK1?%iP}favv4yRTnCaFj0nuQ281m+Yo{?zLm^`B02YNcf8mQD&EM0tfY` z_vz;ygO|D^hWwWmY_RV(VG`W{-@8@%NUPKu%0F)Gk�Q!kvH$6LF@nz&`{(cjI3M zP2_Mlo(Ge)klnsv5kiE5Py~@_rP5&Z+ZDyrR#8$(Nk(aqSq+7m}_6&UBX?w<(s$^Jn~c=k3ij|)u6Abo5+_1 zztZ4SROO|s9oI9Ibpm$@5QlR`cLG0a4}y#8FQ+J@jylTj65@rIYNphC3IQ!Ot%@r2z6<$NrIHg5&U9bU4^a95a->Qr938UGQ9 z_T$`jxb^F#ZB5W(^=eJP+FnJXzN^hbh(fw+HWb*~Gt(>+Rz|BfxEX}Ht1&o4?Ebhh zrO6N-)K*or0xKhTV5rJb5CTyf^q;%a=HLW{hD{T#F$+s;U>B}h|GQ?5*=-9jVETkI?L$?qe?C-Mb*`MG4!Y)2~yxscv zlg^2|Uj%lol--xw3xbv7XYamiXP$bRjqB4JSqPdGQo+OrgDlWCm6bST={60n-|Ggn zfZ9bj9RrH5veT^)lVSOCkOf;1$VUjBaiRW~MutKZocxTCMDhE&mFlvn`yVDuM1aNf z)ua?Psp%*ls*rtTCa7Wxeo#rBY88@c+5JmbaW-z@zL~$E(qMK8qW%o!`H*wZwV}9J z+!GJ>-FxrHa)kzSE6h1h_Oh;A|*Y`|hk*S)t z(r@zcJ>=i3lPZ9dsMl;%b1oh`#@5XK$y#!hxK?u^0R2`|opz-#9y}UtcRl@jTeJR;|(2#5xmg&FWnH)8s=fxqUNR zJY%iXDRs+M&B?ZAP{Wfb3Dvm)2yZE-sBcYAbFLucIe4)Q=ia*$$<$DVZC|^I%4^ys z%?B++f~h-l2dcR~h`9j^pscObCpsBKXL*(9Jh%0obgH%J z*a0fJxmz$>qYoQLK8Xkb|UJ zS4_8_jaoTV>e!(Kw|Q@b72FI2}A9?(Rc#Wk#MrPifpVH}TixhL@I zIj)J}C+*ULJrWNBk}i*MH$vI!`0c45ePka#@`!y$oMv1SCpBZ`(qML8>?7M61S--Q zMKz_EtzgQEmCjGVAmUHF>PkCe!UPU}9PYMuDo3FT@)XrAeirks12_?_pF_C&bfB%-g{Xg zjQp(u%9s_o2^~HM9oys1I@4}svb2FGso-}35jc>aLvSJDp2H~z=df_KMo1-IWqE#{ zfVMUJYM$iKeB-Jj>$y)$yY!49R-936S!tWBF((4=EU!^@iiOIR=ECe1S@!-nD-mwR zI4@_V|0|@sP1}KsK%=2Wef8kR8H=C#K$B1iS?n1h;Rf(tb>^6SZ3w0n9C1_*8$-7J z1?=Y56%;vsq+g|daU*%?cKBhOJl)bdbZ8G7qla}!N^;)np8MU!7#g&*$#)F5_VIDH zeCAqCArPVM@;?sAmwW(1HLzdGPj{Ud5uK_1C&=`q5R;_=^5N6R(Ar4fC&uDHPNOZnc- znMN$Ka;oEUr9UNaiAR|B&b;~d7;2XnV%*~AYuIj&cpqT?T^-aW{IaVz3UcfP?tRGX z4X*xNDo{fqLSYJM9^uMi_U9kGZ*Ky$m%f_nEbMxr;*)?EeZ|qqpE?5D9qz|AA=oC8 z8$l9%@g8-Ih$uY?Q1aHTwT0=coPgxgOD}QiRekUypb%ZId%-L25$fAb9A0?Dl!Gdj zv;-FX{Ikvf?psSmPqrQtXEPH^E2Sn>eJ0L;Ll3k5g9qE-;rltis6tu_RRHha`S<>Z zFU$X~%e^2k&n_5ygf0GNhIQ!M2QNe@bDc)Xjv!xGL;l17cRAOYgD&t(AAN5NDC3o9pRp6pJj+q`j%~%5 z4_c(Y@(m^8FBts1^hHFMy8&y>uRi(Mp85Un?OTYi5g1(P=p#qzvhaEME3N$KE-1>f z>$?rN;Vn9DMVvn4YVM+FrT1G%e>}T#j?KwOXFMhzxdAx9w;zB=#?gRzdrd;i_<#DUf+!4JX}q{zo-M*8LTLQ9cdHP=06+;#q1_(w?rwTJQi=g~h+sWPj?m z+wB$X>ZK&x1|%Ze?lQIqI*d-fXNBBcukv+8hiIg{dgV&{d5m9SO}>c<+2zS3o4nxL=7XUt|G^~CU<9& z^|Ri6``LWrUCQB+NnKE)kT5CQxsKpLJL5QNJc|{032D!U`aRvZT&09mh8!qI!!ntMMzhOfm(z$*#0e+?eZEZ-C>7xv5`GGSTl~?aS~+Jr4HGs zJ7g3g>SF1-T>JFrRhE;3ABq;i(eLtfRi(?eU#VYZ3_tE%C%YQDaY6q{C>YJUkkCrU z;W*X>i@D-yEa>+~2tL30;Rh!AQac)pv57+l+R;NgTU(I(X7Um(=D_Y@?~C;?iFzL# zbAvK^UI|u!>uuW1`8Mo`(RLjNM)uuPd&rtYP)3+p*wOmsiVHg|`nkHC{Je1U*r!!Q z(pR0QdaXINDlN@fP`>u?L-r=SyAix^iEvpyaUI?e@e?)hRHrx)Z%Xk78{4XLJsP#N;w=H;-uW^aw6Vi=rtD_OnN(4Vo{pCK#gA~A; zLt#lNUnhYE7FlG;By`>9paXaSg6klF?f|N)6DL?vTqNQX6puI!;+nYg$i!#+V0nsN zecEYu4cm~82;*EnL{+EiS(m8mV#(@u9b<_Dnv7uPk(@o0P+S4NdTzI0J2 zYf@6(x$ckOcb`MTO=V@qWKdb2%2#s1;3s(4U%mgHO@00a`+;{&xrE@AU)7)iA~mOU zE}`_{Cme5SKmNe#&Qb47M76t8a6d6NU=dQ?*8+ptL>R!;2Ns16M_m8ybo+!M(TjII zLF&rGZO_#==_Q$oE~~SJtuizs_i3|EW*>*xSNbZq~bxy@g+nzi_I;75Mg0 z_z$e76Jb4dH8`MabO!3&**Ot*#P6(_M(XffXP?o4;zkef~qb)k{pU zlJpfe?wV_C*rA8I1;0(Zw$=gxb|c1_bed(%<9BsombKn|{ast4VyfR2{5|T^Hjmon z>Abl`^XB0T;tQL?>D+UPB`PlwR;1=$IdL=&UE{XN0d&DfIl0lYbW_j$taWS?$EOWy zQ8|HQyRh&<$l6DlY4*&TpR8BoRuDzPp<&Rtg{2XXzIXQAdDfBL<&&R&?oLToo1@yL zZH^|@MBcr@H%_|S#o#gtDBV-Y>cud-GVVgUE_l9>5aMSbn(L1`+SxMiHE56(vn|k( z$qr)CwNa-KUn~Y8SCR4Bl1X?W1m*w@k4#Ga&TLi`n(N3AVT1!$X`6KDWbqQ&WJ&EmeoRxUt61*G@nwkT3RoVO(A1@ZF`^{Oi z?UD({+T_O`XR9p5hJd=&!1)Y;;ZsbS`Mi_tvu)Qd))*m}+94@_37Q*`gUn<9h%2mVqA>HaT>6n^olE_ptHa>6qMMqj9it|DS!<+=)f-R)P1`7vwFN6u ztv5-+{RY@qGrqM+$4{_7JoGRFDc4@XN9QdMJm9E^BYO382UHfXUk_U) zj|B$b3v7=93chlQ{+C-U$Xfy%oy=NM#DYZ|>Vzx>Q~1&K#TO^Nmhvs)C2G-g>>M#O zqT1pMt@d6O28tAN5BPiPh($(2q z5~S%g+J=1Fajk{u`qZ@z)i&%`{jy2B4mJgO!*fu3XI&yqNa35U@)8NCr+xmJO<@Q1 z3@TL${kR7AZf7mqb+RV->QyKxDzVI!OYId-rf9?OJv((q>Rrn2LSGpdATlA7IB762 zo=b7|BAsSkxmh_*h`m8kqk>fCM16MW7^~(8cZG;slV0BP61g{$svL8zK`eC$yGU#{ zse0uzKeN_wdNS&wQXS1kKmCXIKX5xVUxYN5+QN!#1hOw9P6_xxh|irr=p)tPE~-~P z7FuzN#8t^dttoP$GQp6#2_JBCyx_cYcFGwk_G;Dw*b7xO7<@UTYYJ=b4T!mC9)8%) zx$H9c#S?V=P)l`b`#u`P`w8E~9l*$YCqop{9ZeP=h1qOHx|--*TJ_ehO1FEjyUreZ z`4wmDUkuPnbF!@gNLRqC6qE+)wGkAc1PfngW;?Y54WPdG_w2^mSpt53c8)EcGQ~a( z6OZnU{XM#n#o1ZBp;=Ka6EZ7@dTrXh2YRV#wmPrCUYtIibGY`iAJC*Yg1f|TGP~Wd zU@lf+GR0H|RVK0fS>;`ovV7SU@Te0oMNI|S58Q9Ktpo{wO^#CNZpe160CU`O;?#o@ z6(#F>yxROp6dg1&fRP(?v}=b#nARa{mr!u3rk&Mgs=7s@R)v8Q=wo;bKMRSfieeWO zVOUXwWm^DJpBWTLlLqLua>O@?x(wl&SPrgAL2lds>&l9*PgFg&pZfns4EnnvPLlFr z7sa#7d3xrLj%DVF8clQ|U~ep=Qz=s6{m%VymMvt5_2V0F+q5++9ji(WZp}5Mb5|R8 z@kN%&eE9N-C#^}RjuFA1`clYOi?Ta@9VDu=&@@g(Wr|JJfH6$5fdhVaYSQE>ShL!4 zHi0so{x zemXBLy>yeE*si}F)xL+rQ=)QX!ugb5m}hUSTVmgCT4l+NS~y)@VX1IrxhKKLYY!cL z(usEWGtWAI!M;zz3PII4dG$*mTX6({nLHK9dFM_95<5Lz?vjZKpJ>60AYBav1q!gG z^XJ?3#~ot}mM*tFhYhnl_(zU?5G`?3eBz2Ps}5bic!^CXy)gRtJMXkXP@~=0Rwd98 z@?IqVQW+~{G27-fsFBDK#=ow=-l?D+meeiy4u$O?70zNI-wIajCY`$28uWs@!RI{r z>Z|SP>#nt&b(z*>@IIEiWD!U=!67!)-vtJ=3Fgzchw$$C%@_h+Yc8lw&$OI%1%D_ynxbG+oIYhp4;Qr;VBRLC$h_(uCYT z%0NP;MP?)Q-9o(@w{ByZ^hb*}E$!J^KjYVo8v)12MK$YbUv%gR|m6 zuqzt3X~XZ#C?S;w90Q9%&=_7;2}pc(`YI>bQbV;PQ%C613N329SwB>;pyHkRWTcnK zg2~W`@-&FbHf{)3Y6h%pKRGyU1j-R2i51dka8=S!Af|!_r-IlL+VZ(_+`C;Lgf8RA zAL;~4LaJoiZmN&8t=qUZ!`2c zD0@AGK~D};QSkELw!H_i604*UY%1w5uguxt)T`7Vx>UY4)1v5-pbQ(UpVcQJ?0CIf zf;UGN(h+=I01*k<>$hv?ZYfEbC_mUQ?ARKzMSgy1nzdv5UWKxXlqSTbOGwQjw#b)l zA6|Rmc{|~ZGwo=cjr*PIM>as=1;73^aCypaqr24I5e0 z*m&+BP3ohw%>VsZC3lST^#B*C8x>Vw7Q;ET{W)fj4BI%E7 zM4`nY_>ywP*9#WfVS5a)^H47vG3rof-|pj5o`0TSc;HLJmq80Iol3A;t1>ByN{b#0-UVu z=AfDI(qvAK$jq^>1Nu94mTe}U7g~|G3n~MI>{d>Yek_Yl2?#X{N?(kU)F4)+zgpee za<%J`?0CaP+1UsdHU^aQcHmPfqnduGZVq{fO7HBbmQ`5CEmWCYhm1SY`qH0W(68>) zJqd$`z>B|NUx+%*dYoRbTD8iukUY;puToCZSCVcxS6g-y5>%GJgRE8qgE+MY!ApGt zq7$Vt2SJ9A>$oegup#Itck0^BjVJLV(^Hn)9F+dwf|st*-xZ`%#4f`f8@?-(H-wKC zrN(XBJ0YjWm8ax1enjz=u_(H+&JLjxlBuRejnFC>UMCC$%~}dTytju zCar+BjhZ+$E#Cqe6^K8YiTAGT(n4EYm}6^8^Q=|FCYFFFFQ@w5CbP zwsFCHTZSgco729wAqR|L+JH|g_lL)c&oe*HqzpBK$E_#p)VW<@Vx{JB%adeuK9>p* zk>T?3SbzTP*){>qfsP1O;=3iWnsT(Xr~{86XmABm^u!*$EQiTGlML=hXK>=_r&}9r z*cH?m%5s-`v7JAPuOI+OkoWZc_uI9%-DcyZIgz=J2?8Liwzg`|sv|VPq4q@SN>X|0 zq)B!TEQvk#-rMnJtE-0C9rvNY{mF|jqGuaYzLh&-Yck5%46=_&M5BPVu3x;!K8Nxa zayxLyU>k(lbMk%Mj+a@~G(W1D=yk~|LJ4cEJaYHlb~!E+ zC*ogu6N^<rt|;ATNsXG@@MayYUqWkZk2CQ^w*J!p4gPH{UQbKCEbLtKgSNuWwqSwY;ly8IE?b8cq^*281_aRw=;Wx z$x=HCIl|cAU2J_gW=W216bk5@#dGG^=P&-t-lU%682jZ2gPbz|&LDjC3)FKW0@^na zlF7xP%)~YRs>0Y+pbme0+IbL0`$8BU#=Iy)Xr)CrdaSBfq;{1FqSX6i+Vrx)9EC3G z)}P)Wwt+{m)fMgf0oXN!x37OPw|#rvG6Ytutu?$;A66se65FC@tzEg0 zhNy`aAl&RecC6iqj((p310uK}sz1bm6CsL1u?^w<5@7CFvz2qxsH#ICv1c*#C;BWd z&3Vlukq}OzDV45ltVXUr{&<`J^)&0o1j`dbh@N1gIvM!d!oLHhd*$h;*;RMkX`Q-t zQ#MukB<@{Je>8{`q8c5E>K6e`?B^eQ%+9>@Qs-$aAHkmd$vTKfVuueJ&>-E$csUcL z`!A68sTW+}2A)hMHJSZ{u9{SWz8H|!_tU1?VIZr6*t&_Y8BJkn#gq=RbTMZjY)c}O zE1!JQM#8?3QAc!8+kE@$=$EQNX67HFZF4zeXIS?nN3H8zysF#ys_Cl!77-x5 zYwZ>{Z^*Tm(Sm8qyiwb#Yjf4~Re$?*QsKH4V&au2p0GWUyX2*&umjPe9nL2bYAe3H zWW61b(8Z2v-^;o+NpuvU)ULD*S5O_cZN2*x`JoR9Wt_6l2$rCH1HP{~Nfb$n3y(qPyGb-Sse*jcRY zmDm=qV(1(**A)sWYXd^H*|{0^@%j|U5>d-4Az#`mdBNMvk(dARoSk^)S&VVDi)-sb z?!^qsb`5oO zT9$K24bRIXISnN7z@(Qu1r@75|KSgIJ=>jD{Pt8!^hB+xQW`)CuH&m2`1&RU=_R}} zmS_wQ_eJW}w_ksFzIgKrfv&ibKweESbP&>gf)(v-gnJa?^cZ!Fxyq|4s62hwWK4&e zNJB*}9)Ao9LK!N5extCaP|6E7cY_#I>PMpd_FC>tF4R zi!XMcwGhKmExkb}&d+ z&d|xkwa@iS)R>f2m(0Zn;3WL^C!ewbNl81VZ|y1;lr0_g_m-vDJQr|Z zv+^I+Q)$2Z`fIm~I|O!!P80ELDvc}gSoLp3X|A2rc7RR7thQZaPWXVjmEN`5tS!0n zsRIOE93NlK!xPwXc50kpQ?iy=_r}fb(xkz5K+8_no-!odk|Ub91~Jwwrjd1yOW+9g z)|SoOO)cIC`$3NEcPzw$+*iY2PNJ+!y6s~lC`;`TdqJvWItHjIb!yWn&IZ6j5gTNB z?&@vI(p-@n0z;0G*s_(4IQ(#ng>~v-O07g`2$zZ&Js$YT=XWJx8t*Fw$0n%sX4@Zb zX&|G?d>Y0jY@$88D_9duL0h4_o_Nv@zyKjv9V(t~fF|5(sx5pYCZCY*lYjY(U31%Q zZUAc%DG#5I%dCb!0)XpuI$o+|(hnAbRS*!OgD7w>NVpIk;?bD%t%0>6!I7w#=%9QX z0ni21?Is+JJEThlM>&IZw-;0=l`p07Q4=S+qmCP+5g=Z!D6IHSooqS6ku73)losM@WbFDOEY~BAFr4$typruC!m< z_eLnTzeT!p(FA$y-S;Z_5LF^AgjMTOIkVb-DXm{j1xq2pYf%q88NPWc)PwXOWpYU& z+h!zq0>cz2)+V>@8r1C=WO zvzl2Bf$mJ92J|zH+%Dc#eHy~Mtzhz_4Q}^h_|t`jQuX(+*F8Gu zw{NH5sPR1#3O|EL<**vWX4*RDqElA$%)FA>`Dd^g&cRBpQ^<;{T4psVt9|9+@F}M1 z&qB4gg*8>`RU1j+DP0*AT-j}3JBWirWl$hQxT^>^xppf;XB@>t)e4a-R9hBrRWIe=FQJuV zcSM(?zAR@)-vZPAWjH6wgze+P)-m8K<_0Hq*944U<>6K*p4^>i7ww$JOX=sQnI~?VC3#Q?yx`qxG`C#$a72ZC5{yJ3>4B=N`sAZ}Gxlv9G;gmj=?y$pLbgFLT?=0}oRU)SdqE z?Fk~*?XS{ltYxF8{>;rc*})`}bM|O~w|KfMN;cYso%Xh2Ejl;~P&B3x;qDH~F`#a1 z&)=71wjd%b0Xen?l)1 zxj!)FBdU-Z=6#91``QQSRo(%@J^$)!g7@KIAlY_91nDwiUxC*(=D6c*945Y+NT*$R zfhB>=Y73tEEnezScr%M#WQG(JmD8ie{(S}8!OJF{V28lJQ`i-x=+nlvXzuvM&p!N+ zU2)S*mDnmfGl5*is`(>CCM}5lXcu)P*(M&1#fE{ht(&FE+Bla7V*+y(qfQrT{$GWIgBgPGC;iH3;C?Q?Qf4- zKLiQ#gXzp*!-*wcW7BTQwv+*U+SoC6Dac0%Vn-%<1nE+qs57Y%O<=N$|Moraj+l77 z+k!^N3#%5nX~65dLZ@y?&hkpDm#l#_ksG1|`xD>JzUEqYtcv39%p~(^Hs$0vTl4on z`P80!iMH`>U>LJe+i#&G_>#aJ*M{vWVLO&{y=W{0M)F5b`X9oZgM6M<8`A z$HS!%1fGJxQ~Wt6SgU2HAxc|ThIwn(I2Vhj{_b~f=d-h8LitrlyM?lSeYRJB%^~q$ zu^F_{#~c$`7(D^8z4X!FeZ8eq{q*_g+b}%GRUj}FD@vmy21J#^i3~%VcC;hgCA)eG zdDeAH@5>T$6pvVlPIn>m8c~x^w4J-^M^=ss&Bho*f-tWkQBzWV&jazE+_Ar7Q50*= zDU&(`p)D>nWA7y2rgG*}&=-)-cwi7V&DMX}EzcoNqQ@{7q?CilM zC16R*0>nur7j?MITLM1CbA^-{JYxH>6_yFg>b;2Jsn?NM>l!iw0PsH zuLj#Fv^HGOtk}WeVZ1Um&1NiGY)zr`DiFvrgKEr4luj%xj!)LCvx~8O>w!MHg6h08 zL53s?V9v$k2O-dv-F_}wG*{!PYvLsrn^;H+J4@~Qh5o(QU$ZaBr}@A^?!;s#3!trz zhs%9Z$G%SEzWXxIiR$G|EKJ#`xkV#hjcqyW*|coA4MdLR-MspA z-2I?R*E`~Gcfj}rQF&;b)noD4(v_F+23*iPHz8bSQjs_Q^@6h*ZVob(m#yd^VHM<^ zeOpp)web6lHjhZ7BXsHW31EVmF3^>FJ^F}oeKGjvrjS0No zp%1Mk_~1|os`0fWeH7iNYgezaRpb?mirHq)SR-V^l4cWjD#&u}BZ zS}Ra3;tdmvT$At_vlb^Tfxz4zFG;Q&_6 z>NC({o{{!NqFem6C!~h(N(?&GwDl4 zA7J@L;5QD~r`0xiq?TR&}ymqEv{9-(hv8pZG z)?HZ*^MQKHEO}XGwjFcrwYx;^IvCLRi`Y$}W8$=CHfxJtTOhtPDYBu?&p+xYc3?wY zY~POBl2-jDm?@o*_Uq`2i%^MY47juTcrU4c8p5>WTJ^ELo40j*-8#&qO2uefFB4pu z1K+iLx}{@nrV|>HaHA*)=ag!9p(GwBRZ3o`A42RIAi|KOs5Xth3BgMZ?&!9?tOchB zmoffDE!Q=uzCwiJ2QM$!0OG|Fzs6mdfe0lnib8}EzDI<-j2VJYuID`csY|HDtf0eK z^y-25w@9vWVVHr}<8_@W$1pEH`Gny|i<4xV+NQJ=c#4hrMfq;<#9Iy$^FxOYaqjDs zheJAb6yA7bm5Xxy?U;MUlRggB8hN9VL!DXtKL51S9qU5C@rX_3R2SUD4FQ*iUSwM$ zi2^BrIu!+Y-3gN%a?uK& znU-evF*$TfadeuR%8P#Q+7@)aJN8}Q#4nYq5S<0mEx@SdNtmRP|^D3rZ+SZIOq% zd@R2P@!M~+t4qJVcVaVWvgo*&g|lZlhdMeyQo_ZWf)KwJcJ;|vbg?zAgt%&JM|FAm zKG}rkzycgaiCIzGsUj{9`cOhWsUCj2BHboK?aLI|!@Rn3>hezB0dGPp4=fLpL*K% z0kNtxBZndi&@!EcNA?YouX*6X2w!)5$kFF5|DAq?e-@AKQ@)yNo#6u(p$;Uc>E%rN zWFj9hYE+Pf3AVl9!M6#dIG@AlI+8%hY2nIrI|Zq=r*^lOPfg#|0SdC!KLVb2T$Ksq zCJ&ve1?!0i?uTtM$-1%aJL!;-c33O?GT^_oR+YG#3J%|EkO{HR;RNKSylcQ#Rf0{e z(6RUzP#k)%4&VCFmTTC+|a&@9ka zzlQwiL$PCY+|3DRpJjsv4|Wm#SgavcwXgl8Ux%*eJDj4>8QyY$recpLa)@SRzNI?nS59mE6E@f)IW-Dn2FQXOmBHOW4QKlj&L z{)oG@VDQwLVgO+LcR|_`S5Lmp4ncoYh7IDc#DI9$FfqoTcP=O8294j5iLnyN?iU|? zXdPjX6vCGjl{F4Nnin$dNM1oLgTFpqD-Hyd`Lkf<-#Mhq0MZ#!GN-->&57ecSSoWP z`p=)T{onh*gDf73-cj$w-LVh=EE5FtA)dH~#>Kibxxi+wZ{naPrJsD`tFM~09-`}3 z*K%CnhxK~?u}AIqP*BHXomiZeVH=k$az+jXm@2y!nsHEp<7*duXVzMf`aJA(%~Hx) zO8g;sXp3W4veQ9%5z>l@Ecw`R%;Dp4r=JlS6FYXNkGRaS; zC~WI6DY_Pv&w|zj)95cbtM0VB?zR@N!5rMfc0|YC-m{12jHy%Y!53e&WT@T3)#;8v z5-&8Xtk6a^PqMw5wRH$`N3m3ZTI>D4_Ra&|uA*Ap1Ee=X3TdPlNGJ+Yq=z6N1f(mX z6v4*B2A1bj!QQZe3SvRUh7F`iks`f=v_MGj2_&TV0{`z@`^>rfp4^+6AKhh88s2G#AiDW`Q=g_|*%dQPM9d%uU2n!9fI831S&8h9p`}cF zO6eEK#}63Km7Z#)%BvqJpd)E}*qNw8FX`5N{DMyvW#QB;oOn-C`lhUfj=bcnP! zgP4{(6Tzq9cBc@(6WSKDuzP#jyWW*{hX@LJu*8oR|8Z$ma2qC(iLM(^mvSTQMVDTh zPGlo#B_=A@A{TcC9Zk5bC)y}sFtJ9V=yPu?zMC-Jc7J1-KXgLu*mtKi8S{?sJNDS> zMp5p^Mw4scL+--|IGy<33ocA=P8Qr|CC^B>^e4DY_9W&a9{Ugm zp7usZT*!D+3cA_X+`g`0p1>x;0AgsJtmxZ3KIBJTS6gEhWPLOQujM+8ldBKw68N!s))H>lKC@2V zJ~3|j{L~qfl=XnhDWZ~{-S6a63*$(gR;FVTHf zP||G=hzL56&OWLdtwv_H2mZS;g9{CHMto@Ck2~*oT=TmqyGt(hcHai!)wY6iyp{~I z2C}J^DGvY%%ug@7EPWQ+6el1u@!FpatQ0&xedqjF(+94)D)dxy{ToWNF+cmoHc+GB z26UWlm37OD30GZE2Z{AO4ebh#73Y3zEWL*Ovy5}W%P*(z{MUI&k|ZHdg?V^151;j7alql;)K)BbL zK(wpz1RqaEwyONj{%8(<)u#J0Vuo8$Cme-&&{Z%aq2&{#sWv3})0Q1LW_~-OuQM?k zmB&tP$maa^Xz*{a5ZGry6T1-Q!ng{u7g|4GAr$+w(s3H&3^Q;bQ*HsF>s_p#A{;5W+cD= z&O5n<_tG#3?!|fP2798(8ye5)kj|2+HDlj+x*s@!ki^fFxFJ6S4w z(WQ{{ZemiK4OS*_fB5Au(|PQW9}kzPUEA(sYiW1tAyT6dSOuv&g#D0W+AksKg#KkM}_N2JUXB|?frU;*7E1NcQ{b-VI_>r z0DKRt=qqW`l`eIjoB)xLn^)Z8VQTHiW|E6oeYEmirsnTN#4nWg)u0tBqu`5KK&0XR&kL(tOC#o%-E=2nn+I3KHqohyUFf#;0=FyBsx$YeQZQ<2v^3@cZ4>3JV05C+c8eE1NA zw=Eo-+?%&Exce_`F8!RYn_4#^>vDt1ji>v0p2VrGE^G>k;9HB^wM^!j;cmE&)6O|3 zc9gvx)xrPzI`mW#b=*gw^RQ6XMZ+{L;jYNXax2F8%ddWw)->RNV5atW<=2je+f?=o;%(PDa@`nE*7FSMC?TPbY0L zR#~(=|MKhJ7SRrwen1l^?jmrGQVjKJ7$kZIuMQXz-@xXQxYQ7MVB#@8oQ@e#)!WMD zYbgAt{4!2HhI~~*s+L5o3>t|fPIxQFGw3m&!$$A$iio9=;Ej4)<~&K>_Lsk;J2>^% z6+<}I+i%K3H5xOv=g=^LsI7!TR@-Qj^*)HzZg#EQurA$&jocm(*V<+Mwh%DI4109i zksB&HRJ04~C9~NduF?uZs5S%E(}>}3{mT%XHbBIYs)|>zM$8;kC z?4BG6UO5|X7h+y*Z#rG*0ud2Jrp-YPqao~XU;V2z5{_{3jA2 zZl0zo$MZpCt49wV5*Jg-^*G2na;5fU^h!T~4ExPYB0@{bko#rA3ooQ!U`&08JLhb0 zxk7Lwaw(<^kEXBosc`kVEt*r0j9d?)pX+4RZPe~*4XR$*^4tapeV1Xw!_4Q`KK03T z0Hzm(94~$}@F%y?S~OH1y7P`SgfKzGnM9`XK4|2qxMC@XYaoe6y^NFdpEUl(xL#Zh zme)}ws|8n5s&7qa1@!Ji4o-XRw_g#Y3Ak08mcMhKgqVR1$X(E+h>JGG<0>MTVuHfk zp#ig&c2v*yh!ajIj1{$j8OV8SZ2n3n$wPWFkND}oW$1zH6v9;;wLa7ZRT3F3bh>Gg)DAxS#|RHAF3KPZ>pYX z6=7a_DSZ`=V=M$@+4O0#X%oE=neZnMhEk*WsWHd{P>%4=zxa7@yKO=8+gTEyfgG(5 z7d8sH;Tub*IsVqgj(KJ7-1PB74l4A8l}&r)5WAwbl?wWDVejGw0M*V~LR>6U=AsnM z1imrAVCSQqw#97Xbiy2U{)eme%*4pJ&4I-ndA**icHE@Nr=GOjWj(eZzH_<@%WEHi zV4VLypNTSM`D#=0m^glXy8Ox?r%}iuS0l?8>4Yw)L4VFzrcV>3a21_knOFt|rBxL2_`CyYUsClvX_>lXhpE$iaupr$YXfeP1- zoatT;hrMkFmaq70W^gjH#P=h^%%|>JQif{3P@l)qG#EhXt4mtjhqjuF-~MC9M7>HI z0nM(bDp7x+4R=Jgv;{P&}-!s4eL+ahNOJR|$hEOPLZ-;ne=2QPjXB|E!?SJSY zk!*7&j(X)N;qpUw-xU*moRo7itWGKQVle9AZDa&@Et?z5`&z;(U0AeX zRXVhL|1e&zJh`bG>p6EzB_VXvPkC!w)VfTgq0-8gcG<378j37=OF$9DB95s)v+R|$ zV_d>10T<+Ux$+4a{Yx-wV?Xh6u3eHRb$C^I)l3MllZMg{iW#zBgx%DiNQFq&pxAAy z2|W$@(;e4dmwI6YczxLpCCDb{Gl7;F`>_jCZ;pQ2NKNH-RUZ>xghTkrH!n#$bErTy ztKuq{+}u1tAkFy=BJB95{}FP{m;j920Emde0l)=b4+k)eFzQl{e|}v0#Cy)J9tCY6 zYKy!w;u~4Bm0$Y757U0!RIqINR5*8zG=L;>yNUtic8_|;J3`CA@SCcK-y4FQpPCvI zkR$hf6MDfAugC)fE6d@a#_l?*DlcqF9y!i)Y)ZP*+6e?~$HcL8{7dO>>?+TgJSBO+ zlr_q_$QZp z76zOYk8~Nt1((XcYs#h}8q=5CeLM30uBbvbwGIsxCD&^{1n)6yI%~lyjN)u!{p;RH z^wr3n)0->T`ZTxBhIhhW!=!aJ+B5c*gkwFjc$pRDu5;EcO^0?Hkh*b{w%{I1Oie?T zDfuZ|mD}}j#1o61&avi4nq`@*sd-{;2p33tpP91O3iT?b)U;O#;j3#_LEz`5UadNY z-QL>LX6|}ahaOlUBMgK(_j}WFsS8DRp&B=V%Pu>@IakFD1aVT-a^W=I55#PNdDQez z-ZZ(Pl46sIuHiHJRu4!TC2SWRTG@GquUF04D*6gV2#eJcPzX?S+5i#Q0aV7A|ATKtZhn?b8=I3)dY^(<~(8(-r{iQDCrJQK#Kr;V@7to4WKk<_44%iQm&TgcHnx%N!(`R&f9yB=rJhmfaw4Up~SHzU>T>a zT*5`0i$l*?u}B&GWqB5(Mz%|*?wFy}a8C2;l+%-LsmDBS*@Cn^Dr>f7&P}50WeH2T z9QP2y?NU*b%yH>)H_)u)IL7e9`txk8!~MJwxE2zMlLUo~qIE`MH3FV{{IOWhIyl^r zkaLrZ+ZOr9;)#>fkw?5G4Xf^nqjJ;I>%bk(;J`BPbekAHbs*rglcP4sKzZxzmr_r2eCMIfReEt3J z3uD_+8rrAm7wIl#qOImHS(f&H+i`JIY;*Fc4IBLcVtBIaGn^vqH*`q)>vz7J<`8fn z)cGdCjqbM3J`t^{M6osxe|rRae>4@2f`Ipe$a|Ifkm18qUsg7`&6}EjZYMX?zI)N< z($f&V-QmhtqB1BR3AR}K9ec(lmf_LDKdV=+ifh&f5nlr5BwvRiyotNu@FRTxEc+5& zVJKQc^m6gaIQP_}k0e^B;IPHb1_43=?yXtAEb!&9is-Aspquc_ZNHEW&r9ETZW`06 zeKFxfbe-(wc(q}0Z1>@4GN5Gu&NOGDxAac(a=N)on_+(H33t7= ziYKIrc&0r^kRL)c_$fAyH&$T{%>`%4;C8_Ql53haLoS&06$;65f&YaMe0;}q!1+s?-a|qm(L?DP6-!=!%CP5r&vAdBSlWJ<$6>geA*)V`aT*{}R z_5I)^zRA&B)pbN7JI8!8h>XI^Xhr<(7gwcScz2LJz;i6NaeMWx$Z48TPSe>GqjNUB!*I?Wy6DH#ap`?uSc;6i@gU*OOvVXj^;7?A|d%!;Z^>3sbIhb+v zZMUVBGiStsfg#zPVoTDlw2f+kU*fF)r@#8ubneGK7ALCux+%$b390SAxrx>=Wup57 z?sxme&#p>uK*XTYqqzI7BDZ&=q4&swjpQDQ7JRH zv;>{xx3gJt@ZpE&4=sLXoLjVDL3-39={cnmUS=;E4+djRTz+{MDqrV*=iA9PZAx3# zrBS?PB6>ZX?)@*%#~2bCG%HZZgQ!c8B!Y?syggS#I4x1PGX7#tbUnHR!|+^(6NI%= z5J$SYyR#5w9Jk}Ju;(i`ym<;xTa$u_D;Cif?zj~T;)-=Ip)98rgx>N%6nE>eV_ceP zdGZQy<5U~>k+)S%ZPw~#v56OlChGXzz!CFXjB~F&idsib~g~K%dVAxQP3@gSCYE6A(dHj%W1jJA1&A3fl{C0y3Ej8rWtLaw)VA9^Bx z^|H!a=e?5lMAWf0CKydJdP8p}u9n$04&6o&G1tGLvgfddagucc|NMtPlrG_B={LLi z1JU*TyWzqxlB+0SEqcLu#nRIDS%4_xe$w*rLL50~fCW$%Z!~sO&F^P2fyV`()G61o z2-vc}QzuPKk3INc>N;>hW%brj4P5=S4+A!G#z(=lv;;B^k4Fsn4JXCHgVJhG$W^#q zYrK+O|DCZ<+rI<0B53E9fT%ld<+E24b9qD-rW@`06`Yu%1n}j{W+FO!#cu_o^$&lD+Zb9yIOa^B!9jv|q+vVD|JK#5nr;hTBtX`8`jjc@zmc;Z z0e7`_$>KOl3+1gzv2Fn79qZ^sv3d(3x4Gqc06DC%3uYPHVexEhKtUKKj>T*r6Wt~@ zj4Vec3YnO>+kPJRmi-n^_dq5>PuB%df7QHPk$5CrLD{>w31oYg{^pn4s=4);i2kmi zQKbzSZla^&cgR&)r7gll=BazW0dDs#;h)xttRapG+pchkZ7zVQkiRFXecR z&FM~vYW~V5<%M7QN^0)dv3Rzzc&??mUtj*i*kf^%n?@^~fygU#cRTf{sE{?EGZc%x zcKz9<^HZ0S3{(oEAsMwL2Da^r-G_m}S+fS(sLB?lDIkX~w>)jt66E}~GMsmYMLFk*3di^k#UoLqC>T%j;=+?@^9;LEV=QbRq8OI!R@W(#Rg{6J!Cs6n6 zE?>a`lH0V=^SUT?Z`RV)(GIvMlgN}|HcNJ4J9I-sCy@dN0<&D6i^|mA=bV!|gJ?|! z(-zTvKFbkgp%t7@5cMV`;?BKlO`Q=r?9jVcI*jAV-+*)Z2L^EDbmXkOo9$1({aqTu z$(>bbNlYN!(Z?Rwtc{>e6~(ekRFr>TcJalDp`AKnr(!jtkop=K8>bSRk9fymFSYZ= zA)Drt`)U@J&Thost}q;pigxVA3R^-{a(vThEFJTg(CQ=zdGa&v9K;?!`qm|JyYn25 zHSbA(sOdpsu^bkUG~A+k+6^4Q`N?;_lg45jxuuZHICoZovmRxyf;b1a+YxoRd+~hF z=PnKvILn{oRl_#pCE^oa8^zCpgRZPX3{=iLr$_s=Mx>@%qpZp?7~~P;u$l}doN+_# zYd`)8Hz1EJY;@SJ70Bfp=Fwe?$)Wys#f!^*#rc-q*F!Du2)0EwGtr{$84XT#?T z4A5wV=Cpm@#7Uu7)6-L?VZLn$VHSgsOT&1K#<_*&B49(sF%9c_%eB|0?{O6Q#69=q zE~b~bN=QRA4Gba%mf=+~ng@Y&pvB;<_&1bCQ~XNEkgi8FL{Wx1fgL$XDCBa+j~4!s znAWyg^fUDl+kDTW0kd#2aux8%efOqce(=1on5H!|%_6RxH8a%4OfNQRnY=m=9+Ixb z#>bC1>2)qgW`*27@uP`ozOqMN`p-cI>dg+r+hVMqG$l`MAzCl2UXb43ch9s( zm!4q*qM`bh)7#X0k=M3qKH|oCYnPLLp{bDLNYMSK4dDw0?3a4AL-lYAG$%~gddxtC zBC!vArd#JG8CIgKl@zsRj%DJ=C zzARrw$Z3y18R|%TFwj?{&ZQ3R%ZM^g`RGSd|G|SJ`O2)-UKVet;N-SOk1wgRM1rK1L#6$0oA& zE1HY5o63J4tF6Cdl<2^by9D=H;)xTI2b0I1T5!9~sjubD{qd)(f0=H0 zX<|Bg-+h?$Cvq}UCaf5~(pZn~j_TM0xhzL_!N2BmqY^stbJi-xCjSI~E7zUKJ#EK# zACboN7!X8Vb*|>79|>0ekSx0YY8b2vwKQ${mN(W^i<`T4S$fM3L&An|OXa#|XA7y# zuolgX$txG9PTR5Cwg4sYG~@hrucaf-cz4wpe`(&zuXjVRSJ5X*3>x6JN(nadD&dtG z6Y3?fj=?f-(p0Y7;e0pmMUE0ZIuoKNsDIcJaDZsUv=luXQ*-6SglZnTEnEYDG86| zl>6?p$L{HxM;=XQJ@imI3R?{hq84#?PI@4Z@n2vGe*pyd@Ey3U5cMQs`-;{?{Aj_S zOprWw`v64U+o6?}=gHR9%BzTU=E8M(r1*sg?n}=-^hjJ~)SvtYck7x?8ZtDkMICK1 zHa_f|5W&zVmb)EBg0xTY7fy}+mID~4Vc33Kcik(KS__Ko>=tP+qVE@x!1l;;H`pHJ zr;&4QWV`^U`4MFO<--cW?X)PvB2tdO1d*2cDmGiU#MNkhgmzQf$>y%b*vSQVh75Nd zY4PfZ3(rRWrEZ!;zOJOc?7^WXJuPAqziDx58Z{%IbcQ>Tz z$Xs{u+8wYxZ>Am06+0*Q9TBQzp|jhJI&72?9=3qxUb=oQj7TBh&8;Fqmn(H+@8}(e zpxfIj4s%F6TLQMBSNwQ^=G0Y-V`HseI`osc3j2<(26;^iY*I7u3BX*qZVgO4&v(1kn*^D{hYZ&Maw1i$48=P3r@g{Y^wjiwC^ zCj_Alb>9-m`AckX{PnNW<(yiSOeiZCS;Rag$Ua3Jr`}#1tQd%#+l?}D!_#QiO)giQ zaVgve5r|6Z(>eLZv59mqde-?4xIC%h-6F7c#2~NwQ3N)E6^KUpwHdu#2aKptrs?>f z&M7r5kTr+XBwB-z385e7Oq&{E3NxnkLoraR@mh84kQTG4cg&96)3FuVZgVCYR|-zF z%a9LW$_C`Vq^yR9ZS8opjWgH|iW<7LX&;=ZpPGaErsU?+dI+8yXmW)T#FtjTnhxta zD4ns>uHn~`W5KzM<)`>27waJn)!s6$TMlKhZm4~1z`m(}`_5@Kvh$V_-JGYBkovm| zxQU}{EA3X-fvb7Ns&1#{LlY(3bVii|&%wEP+9EI^$IwRLK#sYa86r1^ft`^|K9nc&&c z*_pEbF*L%W3y6H;uN(k7h;4rYg>b-Es+yXRYvN`K{; zcqY2$W&O4Wtb`B=ii5IY9QPw$9RE`K^o19uy${%bYh<%g8gjgXoO^*2MG(h|c+eWL z*S&as@CR4K(fZP0E&^`$ujDkPYku}K44mJe`oT=BfZJ_|=qg&+kpltw28iYm-P&S! zRQP3#r%!|Uo!oPmbawyULSJ`l5#7LVGL^k>BhQUM|GWHeM_bP3MBFjmho${?=(9yf zgLA!dTg<=+&ngVI*P5JE=X=ep8KM6_aLAAxEkBD+fao$NRm&G704)MBUIH)F!~TUV zCnfD3ZFF-YrcGK(`PF(2>ns|#Y{h;+@H0p=Ahlet1>@ubP|uKG~48;Io~Q6}Ac z=MiZRgy#gb4}OK^FRyxE#paV-OPR=4i8213#h)={3O5vAo6bM}ZRs8FIVbH+Ivx5C zg6qa^DTFI4lCR+8*46?|C0>f|?eCDAc4c$QOA}Wi({^)HLhFqe8r$Zh8%zKIKmbWZ zK~$ETk5!SQ1{{$abBW&x`=yU-WpKf;kS5o|3&Z@I;fMN11Yk7FZ?WbAkC;kHGzGM(erAHhZd zV?dn0$5E_ygyxJI%WJ(JM}GbR>|Gy=9CX>9oZO1C z1>vKu_U+my%t?N7(EjO^zB|WKe`^ula=Fc~Ai{-gS=BK`4?D3zczo~S!TBmnZeC5w zIRsil;0n>jd_)Z>G1gB{+Qx)EfBoxeA8t4A!2u$nf-4p;(??Hw+waXXiesEI$R-t7 zQFX}Zw#JPDl`{W>9r2G|bWu9q0{|>(tK}wdIXef~42a9Q7hja-V5H@?UtXPt zpz4z^>vWK8$BwLOXFcE=ccAWPVO)71>T=K-XQW+7x9=dX+eU62;snBbQ|z3nQ`3YO z#-&HD`gth+?@RcTApFZw*K&|+%R(dKxi7NHT5S_G=eGmh$x5WtCd1rm)6zFl(>o58 zu#Ua^q*d(VN1jYJuW!In)*$|fj_iYuoDdWcdW z4Wj%kC);=Fm+nWu_7YAOdYiQ08zs7Y6E~X+=GCqu-iiE811CeFzPl$I9bg|}0`ovh zZr7&dliTT4WY?FR|6gIAQ}IH`buDiLAPH-QW`OeDnjzgl=*GQF-n-dGvxz1VSDx#p zSDxS-kJ+*>ww?dlKyT&ecXgi@TK5_(m3Wgr@VJ1(-D%X{sP zAYM&^9V(l0s@`Z9Rqm)INFSMrA-J|I@vXq0xJu>+U-@!e%;=qJrN(Pg*~)<;E(q*K z)0JQTa&fL2f~57QZ7~QUIKbM_Rtq4yJHKJ~uuc2IbI+!4LwJWH<7@?I5`%@wDr9wT z$iXeQLe_XPHWil6o|$fflfLrT>_(R}ue_U!OOnAmY@beMQ^uPn+{xC)L2=sz+&r9} zW))RWnZwNs65lP=ZjeApB)lc#Vl0vE&St~KANW9AYANyWgw{gaKK;0!9?NKiUB7l6 zB9E8T(mC_e!yH->E@VQv5~Aw`l1}78CMNNt=FgV&l}WXw9R%}e-KA?M zNob%5rr!|Z-O_w_%of!!Ht|e)`Vd+Z z`^2r=4V;dw7q_4X=CvgthpCLU4SlO@RRXl7#ILqImef+JxzG*{+x*OH=DH>6GlLI| zn}a37TPt5_s0SxrUAL-`CszyPN`)~4{dj|!DDx>@5Vq1g^=TW*jnhJXwlJearUf%aR66GiO^4R>#l@HPi1OEeVa zY6bV~Cf#(G_@v=Y7RQ`?au9>&a2K1(-ebW(#i_ot7#%;=#~4KsMdVS&`6$mTuKY=G ztZ$^_u11V-7OG=jg}!{&tXOu&?G40JGi1Z6)oCe&p$$9myQ31f8~(wqmIn;&EdfDz4y6oFqQ|=Gv!6{JA?#a9P%`m#L+V}ce}6g{4GM3h zxDU0sY3Qxa!645w(F%o5;z-_i9eYfAFPbF#al^)NY%j>=3HF6JB{69Ta#`i#ZP|th zWJB@vt8gW$-+%#O3)ji16QYr8k@LHWr`?Iha3SaGNifs#2zFDzusLq;frBE76RX^4 zTgHbXI*INte{@AUiqnnFft;>!Zn^F{>~VZOoq&CdRdBws8ro!H1zyrBn+&zNkH%)V zAu*Ty)3{vAU|r=zy>9*&^Pfv^XxB4+4x+oq_UZ^r+qSG8=8-{B%mUQb=C5CodbeVe zwLI1cr135{i+6z8XapwF@2^m1x!*04kxF=9sbrE_>G-J~&K>bt$svT+ETh><3Id?~ zPW!J?>$x*$V)4txa`T`&daQ?y9h;tnnC-OtZlN>W`0~kym(sEju_NGSyfUS2*PhAS zQx_mVdx#Lq(9TB`FruXN+?>H%TZ24My~^b~++B7AYIVzJqqEEM(X!AO+&Q))Z9ifp zO7V}UAD|S!7hGYEcN#LqX)ldWpTG1n2==~dHO6yd6AVr@0D0gxSlC|&!SuMX#52Sf zq+12+Onqa+hW;qT8+@-p7zwjnbti7MeF-y>p5T-4=5kikYr%_cmwo9c_qp{uHPGsnG8fK&E{j@*_xt<62%NF6bF zP}&B22p&}U9?Sc@iI<6UIgN7q*Zla(H1@Q2hKgjaZ#is3?>Stbe*I$4Lpisim<9B~ zeMI;L*JYjg{trada$XIkRrWJo+e_W(526KeG#i-eyT|R$jZ9p1@tF|#;j(>8K|)nV z$J+GFp@glqpPTes(S>s3ojC@4<%lC=Q*cMDsTD+b%hS(=upYL_GWM%H1=qJVV+QSy z09WzQG>s=6iA@(LaG`{=xtdSMhb>SetHckvg;E2d9^q|AK?a|AQd+cRIr^%o1xfTQ zJWGLGp!v+eLPJO$9~${1$2$hDR0|UwJe!Xug4zhd?a1-yBshp7UR%AA?{Fh51JnWa zp074st1|2PabY92&u*jAjlcgrGRU6?(G_Vk;x>|fle}}CZn)-};AY#iapt5$X_SxZYJPC=M-i2TIV!GsWpH0^yA6BfgJ)(m6?xtI@^WhFc7J>U_@P2GUo z?|=i6#*#FKAL1RNMP29%<66A>9bU z`ox)MrgJ~?kx0g}yqJ~+iy>2edMEQ9evai7=~)#d&%p1^s1kN=xz_8RJ?HZ z`R6x#<)xU!@#X2Xi%x%M5cD=oLTeVXnagr~Qy^hc-AX&WE0DcQI3+j#49Izz$JWYW z1jM&zn}X!xs5 zB7dIw-vY;b!XRv3-g{p<@UYQo1>BQ2P2}cQ-l7O0v?^#AmF^cHPI#CQ!nsEsnLc;s zS?RzrV^Yt`mG7I99~QBi_BW?9iF?7m&rb5_o%#jG>fLEkPg+SCo=Wk5U%5KncKu&M zZ7mOG#RJx}6NKOsU;I)Uf_(2&XPlW9PMe+%Wy#ozHe9)2K39J)i^(O@l|V)D%H3U{ zbPzF!f|HM$6SmD7Gz+S2F5*x&Z!_MNG`vLeEyo;F$ay6&q`ftuO1wzZl4U|{+U5}7 zt%vzlCMdr(1{oLp{(Elk_y{7BA-sEur`&Tjzs3?M`N!ZP-?w7y{^mb-Vq^8d)R_ss zoJT2mlwAVn1(9FCh>tJ_b-~_l4BDPeXfxxRzBBSNp4IIGZ&yrCB;at8N)ccxT}x_OP{S0|-~8<}RUxKtyL-5mLlpNx0MCLFq28 z7QcW^r@pY}maZwym627s+c$5%CH>Ed$EV++g`j+}J1&D7neO`KS;4KDx3Qal28LAj zW>5_uIVy};8(!oW8j~N!$0q)+|AX`{Y$YgT^@^ojpCW^;@YnoErYXd! zvE0f8@sC$uo!)uwx#{%74g(kdj_TC?aWjPJHN6vdG`nO;G z$cMQXuWjnT5`!>stb4H$*qnNqzWGS7PkQG&tJ#=`>1=QYi zwsgYy)T#?doGUn93^da%R!Bwc`hV@6c@V|jxo3?cY*moxwxyif>HYEar_;&3N2Gq( zgeWY0ZCPfU(wJx}eQBt&Ij#payrSMsyl6)QQ{+O|x?|klGJNskt0E;*OYXXbpZ@6Hfe?-@JwoO9BHcio-- z`lHL!ZOEr3w*zt3O0>;WE3c8I*(wWiw^i^e@?HZ5re|-zEyO;Sr5Kckq-+p!JMmU@ zeurbR%(f{ZPlWk*|H(*gIsW*|U!s@%p7b_!dza0f6SDMcF8(}nV$@fuHUl%RX_NC`uXjDOQ*uAuI41*U4Qs}I)!{Yc5dFrr1@H>a{kT1$#Q#Kefj0- z!yNoMfeqK?)2GEob{)|z7#|QTL`&Khl9C%QX=Da<(84e zB8w`~g-N((_KWE+i)W>`^cWZ?&x)zI2=~wUs|ckqwBG!h6fp`}vWG$nLp-HYmXd7E zYO)OUsSRw1#k?0MWs0F$4%X~DRyE=H6pM+>qlnr4$)SyCJtdmXlm6m%GHuAmVy@%Jc8FU_Lpo<{)@& zI<>z|GpF~^J%2BbpQ5Iw;qEHyKo~Evv{Rz2+Ti&hhZ-K{1?(m+#0g*`DaoObgqAm z*z5g|JsQ@_USm67bEr*)C@1MRmBY>HmM84ra1{OQ4}B;!7(4~2XsJqAEjsO{?TeLD zr=@n=cZf|yHG<0ZmB@DFn6Ks>y(ZmFtCbL&Ejemy*0!B0UmCOd)y#2e{K^IC0PI0{ zBk(>PwAh*iKDSMNnznbNsngrLUB|S>m1HA*oa59!#R%Ks)%B}VTXco1lM$6ucCNm) z(7s*Uc$WTTqh2YYY55%#_yXNTn)u81# z`$8s;@1t?>MRxsovRAf6qlhna?J2y>06kIJp7oW(;zle%fzW#(H6X9{CtdK zx2LfA=EbDTu{o{$)Dw7WZ(hw|3pWf^p=)-&rG98i@yh8-bk9Kj@OX{`YXigsDpev* zy6U=j8Gsp0v^9SG?Qf;s_j*&haLkyn)zKa+d1a&TnJC@+>dZ8dyU8S6a*Nr&DP_xH z7fXS=D3l-zvz$U7AF+D2+u$DR676IW7GE_x-O@^@KTMI zJh7=3XT9ygtG(?z3oXItrWP#|UF)TIL85DWtXc31hhZvxYGRbl_UX{4Z<-3xeHWspcYWZ4*qt~a z4LkIZ*qrgY`ZBzxxPgl)|Gs!ydUVm8I0PZyHOHi)B-HSpfYT89c!=L+Q=dpvRxM7w zA-Z0AnJ>HamWl$l4&gNwiLM;>EUp8(V)|2QTRg+ILJPA{6ykAeRmtT3tI0Sau{o-Z zhQ3`pryUVz1`wtKnV3wj{BHU(D5ucCGZS7dC_Yj79wXa9E{x)|%<**gE8|~6!=O`v zsD7Ya>#+;aXV+2bCpZ5!xe+DgrFHyh)}Mj^XX2U(RCAJWkQ%QgWe{17y99mPtH1Wu zxQcx}Tv+JPMj@y$ZDOm)?dJQkaywO?xj#Qmkb`lGa2ypzVUz~}jxf2Lc`0rD#vJjY zPR5ZK2;K#C?iW6v#!a6Y*T)w&FA8Llh^+vT4qzF5@%JuG-~IgO!ybgh*8yLZ8AeL~ zoIlnj2rdKHGCMI}bJbO843pVZR!E0KWR^{y7@9H?mjVjz!|Q=Yay01*e-=Pn6Bcp)G2HOVF_KBH z+5UaFpz?Qf#-+y=%?;w)R%Qu*T}ISuq{@p+aKD>c2g;#L^zH>S(l@3&92-+!@vc#H zxo7R#4rsG|x^e!*q=rK*;fb4jvo`If{Em{5hD29U$xqn8n~Q<;POUh?%fH-RO-ZX9 z#<nu^Yr4reZ{dy)|D#VXH>+CSO?tuj` zrY;YN)p``WT^e45{^cY0-W$Zr!f&Dib@8{-n|X=Ma#eCR-b5ggYAP5%6Tq8Tn*ZU6 zCsSWEDCEZCU z*f4gY&e_zSrfD~xc(Pw53=bKgVk5`*lTlIY%u`Epu0V>U{6aVf_S7k1*WvTu`d0eV z1sBA1JP!E0qWNd^QSJ-NXmrKbbaTR_e!uByq1UeG)zo}z!$o`<0|vM_qpIR3&O9Ui&lkRsPNY2InqdmgpZZZAiLPzr zO68Gt>(lqy030}USlqW}`rf3mKTmJqF6vT;^R!Z!fl=Q3qnS^o+g_d;W*#MIItfr+ z=%Td#Rvk^Tf{5w0?mwULT)J}hlc@)HlXcjRy&Z^cj%OIf80yE~IB!xA>3pMckz(`y z76gA=v?f}m7m(@7VJ}+0Iyh`q!2+7z^&4d_2XFME{H28Oh<;`=YXEX}w=Ur&qdZmq zg?E?83KUf zrzfetjy~nD;gm8+kPX)mM0~y*UwGioJJUH#AP)HDa7R_Md)9?P(;5?lFA?sTx4)x0 z7$ai+KVS1FX?G&sRk-i*Q&H}jxI1v}Alk+{+uqD`=UPzlx$H8oR4ZX!ExV`4zIM&; z(rr_wVtR34dW|J&H7N#!lZHFvW6=e@?k7J<=L{R1ZolFBxYjB65fQ5G80}GMS3Lc+ zF;u>n!{&Db=f2Ru&l3+llrDmM91C7N%wE8WTu}Nt{wt8xJMX?Ly%E{7`MDQj-FhSIKaYb$cP&~1Q^2w4RSe$> z=PN>a+EfDTJ!Ze2_gwnH)F;#3^Jj2#@RA^sBAJk7zY->o)IPpw9vf)Sr*BPrG(EHY z<+%FC^yGA{M+VSf0GFKg4ne;ZFkL|o-UpC zL}3ISOKaK}Hk@fzM@lKJroPsd+!*zL+2(bt+XhptiO^t}CVO(=fDGvUM1eq zxus5vkInrUV$pyV!hj5LD7|I+o>}g4-Ftm>u~Z z%L9H_*U0z{CCp(s8_4|}_rZGN>MF#KM!>NW;d0(`lwV`A`8tH@IP5|!df|EWQV|K| zxt)O_cnugo#j^7T(AdO;{5{s(ZpRY7b?OXPyd3>jPj$^%vz$vH7pIQfwn;l+C_Fe?(rN$`BK`cQHJtK{ zwTt75p11WLn!2Nb;NeU~c*d=bZ*9dS=>jUrSU-C$O=Uv3{gtWdfu%D-gi*+Jxm5xd2{%`3VjSJb2w- z(f~NZm2==)+PgbQFp&k~)V5pqv)_(c1i(6d@)&o`T^O1I@%;hLc$PJ9bloqpNWG=Oeou|J2iI=stU=#SqxC zAs@oarlEh_KI~k~hlo9j*U2pTk2?3gX%9G2x##V>c8v)o!Z+cMDq-HA=e=u_*tGfw zOW_+Yxg>e6QW;BcvQ}4xmfmM6;KaM zUngFba4Y=G7flOyydNVCmn#1S7km%|cvm(rcjR>4_Ne&Pi&skA%RjZ7G49!C!UEk7 z5M%Al_daamiDO~4uGA(V9rizvXJR;hH6lD$GMVU_Yz@v;nWC4*aXI5|yQTfv?A?~} zsNH8p6a`~cS0L6v6qT_`7&ph{F5PgFlU2l^Ylr<1iInx100eQWOJb965?>9c_iWuU zjqN!!jqKb#Y%geiEOXT+U~@a76;Vj=XS0F!Zr!qZi}c`cqbCtb_wMQt^d+B%Q7 zD`~s)@UM>T&E>nP??J^#EE5LG`1*}RI1aZF*E{ir7t&E2dKk&s^w;4C6*E)^35_fF zqK>OZnr~)spMTPc>8uMbMD=B_s#!n_Bf$`?T=YU2;foJ}%pyrGk(LXX!_v$_e%+Nn zN>`(Msgg+_mecO$s}`imMPvc#HuN3e2X}YG$)}{j9Kh&Ex!SOFiw*~|Cp`CDy6%TR zNPqTP7KntLM(tpb`{_JraC!nQhqJJWdJ#MAd8t<^o5T@JjYDpuDDzC}1zv1ZRMPsC zz;FDNxGK)sh!B^8)_WzM^yV>~c&H01mWl7Qi4)WN=oe4g?SvJzMG{{ok!WB1G6WY% zJG6z6E#eg1y`}`1_@QT=nR*Q#lDb1oG&rM4$aaJcE)@bHpab}&Yc@e|7UKx zA^mOQYmcjk(o1?eAL$=-)g z{gJJD4ZC{DYS7dX4sZ(@pNB)*cI%ebP=;q%e$OS&Znz^bsxyS8`+&iN!(N1WJbwRu z>4N|HOl*>Ng;DSV)asFM;#jEHIab@26NqiVmleo-UEWvmIl}Nq93?%O3Mc-_&yNcm z!;z{TL<^eI!g6m?>{8?l!Yk>{ZXV*K7?-EQI)b{8s* z&o?KR4SeguEhVpVBGTRN8}7O*9W-W4F;4jHc&G)(kp6ewd~yR z?3wzYYNigZ1Xx&2Une{6g_8+fz#K?heSH9qI@eFyD) zByGea4po>$i=kS65^PO^>v+2S6<7RbJ46^eh41OkzuugVIQF=75}cK?y0{3_8PcG9 zkB;Ffhd7x{s%QA}-{+o_-ohFBT|qpL3TIP+d}kJ3`;znggC$RS`VTRNE0>c*T2G-&*CzN z9RV}(os6jG{kx1z)9~FBS>_Tpu%b<>UC9TLpNUUZz}6f^?SS5*8&E4~zZnp~@sVQF z{>Y;5it7Rg*B1G=H&Lu&wc+HYJqcts(S~)YN$l3fQol3!F&^VVFY)o_>#t85WjYj2 z^5~&MW9g}FU++uHOXg@E`r|c-<#uh`hfGrug+|$@PMNw0O?0mwiHdz&l@q5Z6Zb-zjy* z@XA^ah`1V*uvbMb5t05NFO5Mt5k2q5F8<$p^brof6f#|d(f<*;_D|q!<=f4F{!=>c z)VHUjI6%{$<+38dFdX9e@1Q}c16+cL*M>o)+Ru8H5&TT^;Xvld)sX%R4A5u)@W&vw zJxFJF&enI^b&oKrvkKXaM=0wf0M&AK20=RX?H2|xlpn5>o021D_p&XVoN~vt5OsIB zV{lU%Sq~>0^;gVcni+iOziD|9=ndT1@&04Srq40H`m^$| zPlXDu`I;kJ7~i8%B_a_U#OEUqcAIrK>f#C0dWbWQi26@uz21N!Vg8?? z`=@@Jk@M!9RG(8F?%{>A({JaEgS*)_u0@jQ7QIP6w1u0}U4{`t|MsWzPN_=x+eE#&i-$@_*)4j@o>mf8{lwFF{04MGN6Df&f~DK2dyL2*ehfwWD6Gb6pCeF;eVez$#eBCUNM z?ZMzE(jvHRC=nw3cC13&95bwWk--Sye)8HV#+IWO>L>4mcCpsHYsa;X_o~*?K64)wcDb=f{QY`bXcoB)uE^y&C#)jM=UgTr*=QbMdPTnJ84sXkAm9 zF?DLX{L5cTm;BpBUnrQhn81X(5SBpSkFo`>-B-bx~M@?MZ86R%$) zA3f)zA4^A|3D6N8*OiDKqA@{42Sk7!TNxB`CoypF(>|?(DUbk}-})uXSmL#gokfX< zJB=EE31k2%iUGJXX77w7MFv)ZJV?)hXRac>w>%gwJp2>Isgx-q^3AB#Zh&wxQEaP- zf~DaqELYvm3ALYJ{=;-OMAvf21%=Eq;#Vi6r}()a9l}QG))BlH$#49Z^kYYrr(*5< za8$BHeECsAnDoef_od6Z1Mc?~Ie2#nnUl|2cEYud5Cou>aFRy7wyJ3|KL^;h8SHEO zxk9)lwlQ(RMZ0OXVEp))#rG!sdoRB{9ev^nX-8H(LMAZrgFg-=Z*#u#l5eK3T=Aoj zjrJKfG_{2oDZdX64uTjHgWKVm;F~~{!$kZc_?-v%Ct+wuzbZSHv%K)#?@FKl;pJh* zF%DQjq`fWJ8=Svz;m2c_b6BsQsUzi-`*)%bd(4zCCM4^h+rWH<+*Xl)CGYSr#y~;% zTZe{;xVrecXVX3FUrX=UZ=ZC*x4x71JJ^Lxr4P&LmzW0R!)UVD%$+pd7tRd+x*=GMr|SX{k(_Mh0Rcp{=|bX6eL9X#sxIabx=R zPA7iizthntofHPK4ErKBk1sy^%yjS5&!+u%->sPN;fNj2)yc*>LDez|=s@g%6T5IMZB-y73G-FGSuLljl6230%al{G8V53Rmp5)ni(2z8Rjac=2em-R~x9SwD4cieg2Vdj~_r~v~tuf9cWfH6hBpl0So*(DK zw{b41oo!<(DG%=3k_}mH3tTbW};oP|Kq>hz_ z6bbB%>XMK2tF0Tk@tcMVVo7Itzm+mY8%qMg5+=sK_{`fz}_s|US<3AI~CllW>(z(xedvIlxQV$>3^WOf%j6Aglm-XnU_^|N+q%?Z#H@Btv~gU3 zil1sYa(r!(E4Jo+Jrm!O7ss(7vM#Kuoqpf}>9}*xO$U!TJoT#_Jc<6LuS#ti>1W(h z%<;h8cc*KvxIF#o`WxbCtc1J!o_oesRI9z|Sj_;4F*1>2q7|hxy1z1HyU*dTY&g}Ba%TM*{q8i({XP!=vL4+T@?Y1=O&b!hB z5vo{mO2L(fn}DlPpDv}|h*)sH(@szOa=d!)H}79PN-J*ZWR6~)|4RA^c=Z|DX?Nlb z<@B9;*2>*uVn?1ID-c&ue9sW=!w>OI(T!uTRYz9FY+%}NivP4%QPU39H0B~VKDPVt zw0GCup$=!d5?t?6d!7xn`xehYTY{U=(Oyubv9YaSXoNUJJ-Q4X#zy_4X+I{vZ~p#| zX{SMh8tQ}0^&Uz4-;u{)u1`yz<@Tt?C*dk9&HI&n_yA6kgA-@ODI>^uX70;5`uM{? z{W*>4zC-GUykrH0ZyoZ*Abb+wh?bk2pDhBju5ea#S|Br=wnG)hF%{_PyM!w~Z zy&;2Kz9PQ3bPO(!&T!K~fq-^ymD6eXW-_~)&lB=PEJ4rv?B~)bHUounUHyr(BbwOC6o>b4b-Amoj^ zzTu5=rH=lTxjJzD-4K+Cpr;U@<9c`xx7>Bb57TXkDuhFD+wZz7>VFV@CR8;Y;)r)f zF(ze2Mb>K--)=$7CE>o9rSj3p>aA-zAJg=A)Q>va=Ne>x^nv@+-yp)jz5hXjq&MRF z?K~nhA+)8es+k*S)++A`#Mi3#$GYp)CW z@1ATZc0$!oIlH*z1X(msrHveaFkX&3#Zcmovhe5F)0sosd*b>IA69IN(#Azo@TfrM zd?lz7S7pEp*fjHI;&$7#3O~ar6W$2yTPH&ptSS`aq(X{)$v_mjI8>QV04@XpkK;l(>HQuiy(1|!XUpcIZXGiGl3M;Lk*pFby`fc1N_GXrR9oeO~ zZbDJ+c=eBM-~^QQ`ou(~?L}aI{40R6WzN@m=1;g0ILlUDneZOO#AhBb>SNEGJ}o`x zMD@hu=|1j+`v>0(a%M=@Q@HjxE2d~O8p;PX+<+4kXW?ezGDtp-Vkx>Cr>k~_D;+X? zcxm{0i7`RUNN9-aR2 z*kkD%U;0uy7!G>HjOoQ{CL>`be6~BGY8ovVI>^d!8LVMLf-LG9p%TY$?mM++Bhv}- zCMM%=BZ@nZy{bm6(dDsj3Yp2GE4F;BS34+|t^A}@V?#$j-v7em1e{0_eZeyjr#wKs z9P*yqZcV@XXq--@Ult5x*5IRVIypF};X3!tKnoOEj13qrdan*@W0kBCba`5YXG#EM4tr(j=NcPYs=_@BD5`Y zI9kj_mT)6g^(W$YWJ%nCNohD@r9q?E?4*BGvFih;i@4PY}~GxaWl|r6+@5FLM-m23+IQ;QIsSU#4KRJxy4Oa@&Xge7%hg*+$;T8I2 z0*5+g&z_woA?6szap%=*)<*k~Z0I|8g81!q^wFsoI?w|UKMkgTy1VjXB|2~RtaRE= z{i6~+SRJU79D6oqQ~?X4U`bJe5cQLe@Lu?Yck#ze0mU9_z|3be^I?Af@fW|q-pLs; zsgx&?2wY#T{&w0z=Ei(@{U;ONN=9YQ_viS%M}MF^8x*sqgR256*+2dG7&V-Auf-Z`F(DR4sFV)hZB*J1 zJ>vaFAC^W!Y*qBnVB-rSOop{Aoma1Bhna6L)1j+YtPBI|y&)71f_7*-D3ZuvI9*00 z{7qCu&LigMq<^mA30G|!fB!pxGL$Drxu0+0oLdCTZDTk{1MlLILjTKnage7<^JV+ zz*=OPYnVvPJAh|<2C-JWGMGk9{2=sv&xmVe{rnw%xh^&F_*{S@nU$;}ixLS5?ls0IPJ>HZ{G*(Nb7R*<%4c!z%VdPMy$fnvsuO+zmZkFIpov$@ zUIGK;J}cuhHzi4gk8(i&W{erjazPfD?g8!Z#r@% zWx>-XO-@rDe=N;;=;2sqSTmb(InE&>L3TJxj(hDqJPoGt`wbe*)juPGBkc#l?AB8f zUI(@L=HG@Wm!&R7{kL-XnmA4_7SCb{Um?AVd?C)io#+ba88U8HiGMk*9H^9L_Nw=+jZ+ieiP3?a@yocs!hMo( zuMpSq$?K*EJK?Ih$onwk<4(aTeaT>*&Ch3szy*F3l&bK}geCIEtK4Ve(R7@72)T`_ zVK!6WB39{dZTS4jcDnnPzox^-9vkQ6dvh9d6$fF~;;O2S1}0n;m`F^|4Lb?6rwx@O zuVjVt;#6!f5b(p;G&u#CZdblVLlEA)Dxg=!iiQ|c&E*%%s5Nl9((Nfc5mb!*r%52mgLA$5g zJf`Xu@ZNvqt*!0U1FeAuxR;s`#ZS}DP<}5pcDdnmP!Uv@Ifq%ie~>tTH2~>U37xc8QB zO8gsn+Lnf~+HytLho z-nAc3w*GN%Xq! zCAGJ5(?;h$ebO2x?qh;fP$JP4!-&vs~!KzPz3#87uHUl~N|y~x*3=2mj= zc@!Gpa&y0kug~C)r3|wu`#(inX(J@EP>n&QbDdp1TI_5p=lj7*_*`%ix?Z{xTd_{2hE- zZMi{OTOBH8bb_=EY_t7&oP|3E*B@7gOjIOfy4+UzSq|IKdu})T-u#T0(<+C5jvr21 z*HW%0@(0RRT|)D2Ux{0p01xK(J8&oC-0v%c#KruKlgr&$nJr_+O~cxUg5{92yvH9I zD2MWax8WpOhvCZd0`oL)^D-TujpfUyz)PPwf5T?n(%-t?hI6C#Rz1i+%E#{@p!zij zD)EUPHnt-we#3Y<2saY97j7qloi;KW6>Ty(LoeK zC2}1ES(_XWu2~m_ID~K~fAuTe33+tfa~S6jOcz%}Lvo+m7JtK?LcYrgSH(~Kwkm82 zX&TLl0##=L-tEwfa7UIR*!T?&2rC^#v=rD^8`##j!2^6sWO21}pdvs7x1)^^$XfBQ zeb}Lf6Uz0iWKFiDL}zO}K{o z+q81Ic@4;fIsf)K0rx|k#7+jnw2WuDB>Zx30est;m$%?-v;A<2BWgiB62~$=`F(S3 zBu?Fg`vdt&+ycn2IPNZY&``HgyXjZ<9yuFxN^Ggx=L894O;nL4ue;NV4O%lKS_(Eq9tqN-$$6MaQdHw z+XvUX@|(wI;T1wWh;zoffh^`4gZyIf^4rPe{}fcpE@7!BpTb`}9fp$tkHR@%e9HMa zD4Pl*t%SdjR($?KPQa706RUtrKS)jEw?e?L+aaN`MVqNn<##a`8Hs(7WK| zO6{kHK>T?b_jlZNxLe5Ii6I6TkJ`4ey{d%8Z~}QduJ^!=#+k40-Eeu(^4cKe_IZM@ zE@AKVNg1kyjkHapS=Qn*q%-jO#|83 zD-9`Yd zBf@jzKe^&QHwBS?BLP<6{~YcX+>Mk$q^<(xa4-(UIY1>e5|v@N7|=YYSEbss`H1FXU9Vvm3U)22sh#+y0a_w;k9wRh!|VpPi}}UN{RyK z4z;s_Na5!omgpT;c}j#uQ2kp1axF7)64-lj50lpd@E8&s&4qxjjhzomLazva3# zE%yJ7P?b97evjwYKs3_L13LP@(i#%2^6wTm(w3IHTt3RQNsha=khOWYq`k`dZ|%0J zE$_bp1_Js0`uEmopIm0rI-B<^acyxDXbEK-T;M9-dEGqFN992Fw;a2KJktm{>@N`R z*E}Wce*2d(S@lY10k#yVfEAMqu3ehQQ4Pcm#|_0P2GGx$P;iD^g>wmJV=ur-Y^UL- z`XtR6JnI4ZNysI%^|%-Ot((sB7i2U!mCpXYyC;6E+!f3yW0+qvkxpM!fER|XG@cukq+ z8Q0?6Ak9uczZs?$9>(KCxbCI&t9fO=f7SC}Lb#}``N(1Fgx~^r%G&4m_~4&X_?QLp z@@cpR!{h%S_)iP`rv?5$X#vK6Ril9S_EhV5+$p%jaITV@0 Date: Thu, 15 Feb 2024 18:01:53 -0300 Subject: [PATCH 005/391] adding RAG spefic readme and implementing specific helpers --- src/crewai_tools/tools/rag/README.md | 64 ++++++++++++++++++ src/crewai_tools/tools/rag/rag_tool.py | 92 +++++++++++++++++++++++--- 2 files changed, 148 insertions(+), 8 deletions(-) create mode 100644 src/crewai_tools/tools/rag/README.md diff --git a/src/crewai_tools/tools/rag/README.md b/src/crewai_tools/tools/rag/README.md new file mode 100644 index 000000000..c65daca16 --- /dev/null +++ b/src/crewai_tools/tools/rag/README.md @@ -0,0 +1,64 @@ +# RagTool: A Dynamic Knowledge Base Tool + +RagTool is designed to answer questions by leveraging the power of RAG by leveraging (EmbedChain). It integrates seamlessly with the CrewAI ecosystem, offering a versatile and powerful solution for information retrieval. + +## **Overview** + +RagTool enables users to dynamically query a knowledge base, making it an ideal tool for applications requiring access to a vast array of information. Its flexible design allows for integration with various data sources, including files, directories, web pages, yoututbe videos and custom configurations. + +## **Usage** + +RagTool can be instantiated with data from different sources, including: + +- 📰 PDF file +- 📊 CSV file +- 📃 JSON file +- 📠Text +- 📠Directory/ Folder +- 🌠HTML Web page +- ðŸ“½ï¸ Youtube Channel +- 📺 Youtube Video +- 📚 Docs website +- 📠MDX file +- 📄 DOCX file +- 🧾 XML file +- 📬 Gmail +- 📠Github +- 😠Postgres +- 🬠MySQL +- 🤖 Slack +- 💬 Discord +- ðŸ—¨ï¸ Discourse +- 📠Substack +- ðŸ Beehiiv +- 💾 Dropbox +- ðŸ–¼ï¸ Image +- âš™ï¸ Custom + +#### **Creating an Instance** + +```python +from crewai_tools.tools.rag_tool import RagTool + +# Example: Loading from a file +rag_tool = RagTool().from_file('path/to/your/file.txt') + +# Example: Loading from a directory +rag_tool = RagTool().from_directory('path/to/your/directory') + +# Example: Loading from a web page +rag_tool = RagTool().from_web_page('https://example.com') + +# Example: Loading from an Embedchain configuration +rag_tool = RagTool().from_embedchain('path/to/your/config.json') +``` + +## **Contribution** + +Contributions to RagTool and the broader CrewAI tools ecosystem are welcome. To contribute, please follow the standard GitHub workflow for forking the repository, making changes, and submitting a pull request. + +## **License** + +RagTool is open-source and available under the MIT license. + +Thank you for considering RagTool for your knowledge base needs. Your contributions and feedback are invaluable to making RagTool even better. \ No newline at end of file diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 5ef616795..df86d2a5f 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any +from typing import Any, List from pydantic import BaseModel, ConfigDict @@ -13,7 +13,6 @@ class Adapter(BaseModel, ABC): def query(self, question: str) -> str: """Query the knowledge base with a question and return the answer.""" - class RagTool(BaseTool): name: str = "Knowledge base" description: str = "A knowledge base that can be used to answer questions." @@ -52,23 +51,100 @@ class RagTool(BaseTool): adapter = EmbedchainAdapter(embedchain_app=app) return RagTool(adapter=adapter) - def from_web_page(self, url: str): + def from_pg_db(self, db_uri: str, table_name: str): from embedchain import App - from embedchain.models.data_type import DataType - + from embedchain.loaders.postgres import PostgresLoader from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + config = { "url": db_uri } + postgres_loader = PostgresLoader(config=config) app = App() - app.add(url, data_type=DataType.WEB_PAGE) - + app.add( + f"SELECT * FROM {table_name};", + data_type='postgres', + loader=postgres_loader + ) adapter = EmbedchainAdapter(embedchain_app=app) return RagTool(adapter=adapter) + + def from_github_repo(self, gh_token: str, gh_repo: str, type: List[str] = ["repo"]): + from embedchain import App + from embedchain.loaders.github import GithubLoader + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + loader = GithubLoader( + config={ + "token": gh_token, + } + ) + app = App() + app.add(f"repo:{gh_repo} type:{",".joing(type)}", data_type="github", loader=loader) + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) + + def from_xml_file(self, file_url: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_url, DataType.XML) + + def from_docx_file(self, file_url: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_url, DataType.DOCX) + + def from_docx_file(self, file_url: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_url, DataType.DOCX) + + def from_mdx_file(self, file_url: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_url, DataType.MDX) + + def from_code_docs(self, docs_url: str): + from embedchain.models.data_type import DataType + return self._from_generic(docs_url, DataType.DOCS_SITE) + + def from_youtube_channel(self, channel_handle: str): + from embedchain.models.data_type import DataType + if not channel_handle.startswith("@"): + channel_handle = f"@{channel_handle}" + return self._from_generic(channel_handle, DataType.YOUTUBE_CHANNEL) + + def from_website(self, url: str): + from embedchain.models.data_type import DataType + return self._from_generic(url, DataType.WEB_PAGE) + + def from_text(self, text: str): + from embedchain.models.data_type import DataType + return self._from_generic(text, DataType.TEXT) + + def from_json(self, file_path: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_path, DataType.JSON) + + def from_csv(self, file_path: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_path, DataType.CSV) + + def from_pdf(self, file_path: str): + from embedchain.models.data_type import DataType + return self._from_generic(file_path, DataType.PDF_FILE) + + def from_web_page(self, url: str): + from embedchain.models.data_type import DataType + return self._from_generic(url, DataType.WEB_PAGE) + def from_embedchain(self, config_path: str): from embedchain import App - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter app = App.from_config(config_path=config_path) adapter = EmbedchainAdapter(embedchain_app=app) return RagTool(adapter=adapter) + + def _from_generic(self, source: str, type: str): + from embedchain import App + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + app = App() + app.add(source, data_type=type) + adapter = EmbedchainAdapter(embedchain_app=app) + return RagTool(adapter=adapter) \ No newline at end of file From 7ee9926f2e0158fa87e2026884243114da272459 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 15 Feb 2024 18:24:16 -0300 Subject: [PATCH 006/391] quick fixes --- src/crewai_tools/tools/__init__.py | 1 + src/crewai_tools/tools/rag/rag_tool.py | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 src/crewai_tools/tools/__init__.py diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py new file mode 100644 index 000000000..8d519fa13 --- /dev/null +++ b/src/crewai_tools/tools/__init__.py @@ -0,0 +1 @@ +from rag.rag_tool import RagTool \ No newline at end of file diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index df86d2a5f..17913fee3 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, List +from typing import Any, List, Optional from pydantic import BaseModel, ConfigDict @@ -16,7 +16,7 @@ class Adapter(BaseModel, ABC): class RagTool(BaseTool): name: str = "Knowledge base" description: str = "A knowledge base that can be used to answer questions." - adapter: Adapter + adapter: Optional[Adapter] = None def _run( self, @@ -53,6 +53,7 @@ class RagTool(BaseTool): def from_pg_db(self, db_uri: str, table_name: str): from embedchain import App + from embedchain.models.data_type import DataType from embedchain.loaders.postgres import PostgresLoader from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter @@ -79,7 +80,7 @@ class RagTool(BaseTool): } ) app = App() - app.add(f"repo:{gh_repo} type:{",".joing(type)}", data_type="github", loader=loader) + app.add(f"repo:{gh_repo} type:{','.join(type)}", data_type="github", loader=loader) adapter = EmbedchainAdapter(embedchain_app=app) return RagTool(adapter=adapter) From b4f270ad1ff31f76f04141387438e98f933c2cfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 15 Feb 2024 18:26:15 -0300 Subject: [PATCH 007/391] quick fixes --- src/crewai_tools/tools/rag/rag_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 17913fee3..4e81df5af 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -140,7 +140,7 @@ class RagTool(BaseTool): app = App.from_config(config_path=config_path) adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) + return RagTool(name=self.name, description=self.description, adapter=adapter) def _from_generic(self, source: str, type: str): from embedchain import App @@ -148,4 +148,4 @@ class RagTool(BaseTool): app = App() app.add(source, data_type=type) adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) \ No newline at end of file + return RagTool(name=self.name, description=self.description, adapter=adapter) \ No newline at end of file From e94fd2cad2a2d4382b16fadcd6797b821907deb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 15 Feb 2024 18:27:38 -0300 Subject: [PATCH 008/391] quick fixes --- src/crewai_tools/tools/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 8d519fa13..1628a6bca 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1 +1 @@ -from rag.rag_tool import RagTool \ No newline at end of file +from .rag.rag_tool import RagTool \ No newline at end of file From f3c693a5bb80f07c72a57b64c2721ada78f9fb11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 22 Feb 2024 19:56:42 -0300 Subject: [PATCH 009/391] very much work in progress version --- src/crewai_tools/__init__.py | 18 +++ .../adapters/embedchain_adapter.py | 16 +-- src/crewai_tools/tools/__init__.py | 18 ++- src/crewai_tools/tools/base_tool.py | 6 +- .../code_docs_search_tool.py | 40 ++++++ .../tools/csv_search_tool/csv_search_tool.py | 40 ++++++ .../directory_search_tool.py | 41 ++++++ .../docx_search_tool/docx_search_tool.py | 40 ++++++ .../tools/file_read_tool/file_read_tool.py | 32 +++++ .../github_search_tool/github_search_tool.py | 44 ++++++ .../json_search_tool/json_search_tool.py | 40 ++++++ .../tools/mdx_seach_tool/mdx_search_tool.py | 40 ++++++ .../tools/pdf_search_tool/pdf_search_tool.py | 40 ++++++ .../tools/pg_seach_tool/pg_search_tool.py | 44 ++++++ src/crewai_tools/tools/rag/rag_tool.py | 127 +----------------- .../tools/serper_dev_tool/serper_dev_tool.py | 47 +++++++ .../tools/txt_search_tool/txt_search_tool.py | 39 ++++++ .../website_search/website_search_tool.py | 40 ++++++ .../tools/xml_search_tool/xml_search_tool.py | 40 ++++++ .../youtube_channel_search_tool.py | 42 ++++++ .../youtube_video_search_tool.py | 40 ++++++ 21 files changed, 703 insertions(+), 131 deletions(-) create mode 100644 src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py create mode 100644 src/crewai_tools/tools/csv_search_tool/csv_search_tool.py create mode 100644 src/crewai_tools/tools/directory_search_tool/directory_search_tool.py create mode 100644 src/crewai_tools/tools/docx_search_tool/docx_search_tool.py create mode 100644 src/crewai_tools/tools/file_read_tool/file_read_tool.py create mode 100644 src/crewai_tools/tools/github_search_tool/github_search_tool.py create mode 100644 src/crewai_tools/tools/json_search_tool/json_search_tool.py create mode 100644 src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py create mode 100644 src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py create mode 100644 src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py create mode 100644 src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py create mode 100644 src/crewai_tools/tools/txt_search_tool/txt_search_tool.py create mode 100644 src/crewai_tools/tools/website_search/website_search_tool.py create mode 100644 src/crewai_tools/tools/xml_search_tool/xml_search_tool.py create mode 100644 src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py create mode 100644 src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 09d10d88f..a6a269be6 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1 +1,19 @@ from .tools.base_tool import BaseTool, Tool, as_tool, tool +from .tools import ( + CodeDocsSearchTool, + CSVSearchTool, + DirectorySearchTool, + DOCXSearchTool, + FileReadTool, + GithubSearchTool, + TXTSearchTool, + JSONSearchTool, + MDXSearchTool, + PDFSearchTool, + PGSearchTool, + RagTool, + WebsiteSearchTool, + XMLSearchTool, + YoutubeChannelSearchTool, + YoutubeVideoSearchTool, +) \ No newline at end of file diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py index cdb7f1d5a..16491fb25 100644 --- a/src/crewai_tools/adapters/embedchain_adapter.py +++ b/src/crewai_tools/adapters/embedchain_adapter.py @@ -1,14 +1,12 @@ -from embedchain import App - +from typing import Any from crewai_tools.tools.rag.rag_tool import Adapter - class EmbedchainAdapter(Adapter): - embedchain_app: App - dry_run: bool = False + embedchain_app: Any + summarize: bool = False def query(self, question: str) -> str: - result = self.embedchain_app.query(question, dry_run=self.dry_run) - if result is list: - return "\n".join(result) - return str(result) + result, sources = self.embedchain_app.query(question, citations=True, dry_run=(not self.summarize)) + if self.summarize: + return result + return "\n\n".join([source[0] for source in sources]) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 1628a6bca..2910185ec 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1 +1,17 @@ -from .rag.rag_tool import RagTool \ No newline at end of file +from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool +from .csv_search_tool.csv_search_tool import CSVSearchTool +from .directory_search_tool.directory_search_tool import DirectorySearchTool +from .docx_search_tool.docx_search_tool import DOCXSearchTool +from .file_read_tool.file_read_tool import FileReadTool +from .github_search_tool.github_search_tool import GithubSearchTool +from .serper_dev_tool.serper_dev_tool import SeperDevTool +from .txt_search_tool.txt_search_tool import TXTSearchTool +from .json_search_tool.json_search_tool import JSONSearchTool +from .mdx_seach_tool.mdx_search_tool import MDXSearchTool +from .pdf_search_tool.pdf_search_tool import PDFSearchTool +from .pg_seach_tool.pg_search_tool import PGSearchTool +from .rag.rag_tool import RagTool +from .website_search.website_search_tool import WebsiteSearchTool +from .xml_search_tool.xml_search_tool import XMLSearchTool +from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool +from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool \ No newline at end of file diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index eadef2368..e2fb18395 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, Callable, cast +from typing import Any, Callable, cast, Optional, Type from langchain.agents import tools as langchain_tools from pydantic import BaseModel @@ -10,12 +10,15 @@ class BaseTool(BaseModel, ABC): """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool.""" + args_schema: Optional[Type[BaseModel]] = None + """The schema for the arguments that the tool accepts.""" def run( self, *args: Any, **kwargs: Any, ) -> Any: + print(f"Using Tool: {self.name}") return self._run(*args, **kwargs) @abstractmethod @@ -30,6 +33,7 @@ class BaseTool(BaseModel, ABC): return langchain_tools.Tool( name=self.name, description=self.description, + args_schema=self.args_schema, func=self._run, ) diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py new file mode 100644 index 000000000..fd0acf4ca --- /dev/null +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedCodeDocsSearchToolSchema(BaseModel): + """Input for CodeDocsSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the Code Docs content") + +class CodeDocsSearchToolSchema(FixedCodeDocsSearchToolSchema): + """Input for CodeDocsSearchTool.""" + docs_url: str = Field(..., description="Mandatory docs_url path you want to search") + +class CodeDocsSearchTool(RagTool): + name: str = "Search a Code Docs content" + description: str = "A tool that can be used to semantic search a query from a Code Docs content." + summarize: bool = False + args_schema: Type[BaseModel] = CodeDocsSearchToolSchema + docs_url: Optional[str] = None + + def __init__(self, docs_url: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if docs_url is not None: + self.docs_url = docs_url + self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." + self.args_schema = FixedCodeDocsSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + docs_url = kwargs.get('docs_url', self.docs_url) + self.app = App() + self.app.add(docs_url, data_type=DataType.DOCS_SITE) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py new file mode 100644 index 000000000..8cc06e263 --- /dev/null +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedCSVSearchToolSchema(BaseModel): + """Input for CSVSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the CSV's content") + +class CSVSearchToolSchema(FixedCSVSearchToolSchema): + """Input for CSVSearchTool.""" + pdf: str = Field(..., description="Mandatory csv path you want to search") + +class CSVSearchTool(RagTool): + name: str = "Search a CSV's content" + description: str = "A tool that can be used to semantic search a query from a CSV's content." + summarize: bool = False + args_schema: Type[BaseModel] = CSVSearchToolSchema + csv: Optional[str] = None + + def __init__(self, csv: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if csv is not None: + self.csv = csv + self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." + self.args_schema = FixedCSVSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + csv = kwargs.get('csv', self.csv) + self.app = App() + self.app.add(csv, data_type=DataType.CSV) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py new file mode 100644 index 000000000..39c34fc93 --- /dev/null +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -0,0 +1,41 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.loaders.directory_loader import DirectoryLoader + +from ..rag.rag_tool import RagTool + + +class FixedDirectorySearchToolSchema(BaseModel): + """Input for DirectorySearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the directory's content") + +class DirectorySearchToolSchema(FixedDirectorySearchToolSchema): + """Input for DirectorySearchTool.""" + directory: str = Field(..., description="Mandatory directory you want to search") + +class DirectorySearchTool(RagTool): + name: str = "Search a directory's content" + description: str = "A tool that can be used to semantic search a query from a directory's content." + summarize: bool = False + args_schema: Type[BaseModel] = DirectorySearchToolSchema + directory: Optional[str] = None + + def __init__(self, directory: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if directory is not None: + self.directory = directory + self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." + self.args_schema = FixedDirectorySearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + directory = kwargs.get('directory', self.directory) + loader = DirectoryLoader(config=dict(recursive=True)) + self.app = App() + self.app.add(directory, loader=loader) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py new file mode 100644 index 000000000..1a52e5f3b --- /dev/null +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedDOCXSearchToolSchema(BaseModel): + """Input for DOCXSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the DOCX's content") + +class DOCXSearchToolSchema(FixedDOCXSearchToolSchema): + """Input for DOCXSearchTool.""" + docx: str = Field(..., description="Mandatory docx path you want to search") + +class DOCXSearchTool(RagTool): + name: str = "Search a DOCX's content" + description: str = "A tool that can be used to semantic search a query from a DOCX's content." + summarize: bool = False + args_schema: Type[BaseModel] = DOCXSearchToolSchema + docx: Optional[str] = None + + def __init__(self, docx: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if docx is not None: + self.docx = docx + self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." + self.args_schema = FixedDOCXSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + docx = kwargs.get('docx', self.docx) + self.app = App() + self.app.add(docx, data_type=DataType.DOCX) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py new file mode 100644 index 000000000..0721ec127 --- /dev/null +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -0,0 +1,32 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field +from ..base_tool import BaseTool + +class FixedFileReadToolSchema(BaseModel): + """Input for FileReadTool.""" + pass + +class FileReadToolSchema(FixedFileReadToolSchema): + """Input for FileReadTool.""" + file_path: str = Field(..., description="Mandatory file path to read the file") + +class FileReadTool(BaseTool): + name: str = "Read a file's content" + description: str = "A tool that can be used to read a file's content." + args_schema: Type[BaseModel] = FileReadToolSchema + file_path: Optional[str] = None + + def __init__(self, file_path: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if file_path is not None: + self.file_path = file_path + self.description = f"A tool that can be used to read {file_path}'s content." + self.args_schema = FixedFileReadToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + file_path = kwargs.get('file_path', self.file_path) + with open(file_path, 'r') as file: + return file.read() \ No newline at end of file diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py new file mode 100644 index 000000000..7b6066e00 --- /dev/null +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -0,0 +1,44 @@ +from typing import Optional, Type, List, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.loaders.github import GithubLoader + +from ..rag.rag_tool import RagTool + + +class FixedGithubSearchToolSchema(BaseModel): + """Input for GithubSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the github repo's content") + +class GithubSearchToolSchema(FixedGithubSearchToolSchema): + """Input for GithubSearchTool.""" + github_repo: str = Field(..., description="Mandatory github you want to search") + content_types: List[str] = Field(..., description="Mandatory content types you want to be inlcuded search, options: [code, repo, pr, issue]") + +class GithubSearchTool(RagTool): + name: str = "Search a github repo's content" + description: str = "A tool that can be used to semantic search a query from a github repo's content." + summarize: bool = False + gh_token: str = None + args_schema: Type[BaseModel] = GithubSearchToolSchema + github_repo: Optional[str] = None + content_types: List[str] + + def __init__(self, github_repo: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if github_repo is not None: + self.github_repo = github_repo + self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." + self.args_schema = FixedGithubSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + github_repo = kwargs.get('github_repo', self.github_repo) + loader = GithubLoader(config={"token": self.gh_token}) + app = App() + app.add(f"repo:{github_repo} type:{','.join(self.content_types)}", data_type="github", loader=loader) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py new file mode 100644 index 000000000..89e515e78 --- /dev/null +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedJSONSearchToolSchema(BaseModel): + """Input for JSONSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the JSON's content") + +class JSONSearchToolSchema(FixedJSONSearchToolSchema): + """Input for JSONSearchTool.""" + json_path: str = Field(..., description="Mandatory json path you want to search") + +class JSONSearchTool(RagTool): + name: str = "Search a JSON's content" + description: str = "A tool that can be used to semantic search a query from a JSON's content." + summarize: bool = False + args_schema: Type[BaseModel] = JSONSearchToolSchema + json_path: Optional[str] = None + + def __init__(self, json_path: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if json_path is not None: + self.json_path = json_path + self.description = f"A tool that can be used to semantic search a query the {json} JSON's content." + self.args_schema = FixedJSONSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + json_path = kwargs.get('json_path', self.json_path) + self.app = App() + self.app.add(json_path, data_type=DataType.JSON) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py new file mode 100644 index 000000000..0f4deb056 --- /dev/null +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedMDXSearchToolSchema(BaseModel): + """Input for MDXSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the MDX's content") + +class MDXSearchToolSchema(FixedMDXSearchToolSchema): + """Input for MDXSearchTool.""" + mdx: str = Field(..., description="Mandatory mdx path you want to search") + +class MDXSearchTool(RagTool): + name: str = "Search a MDX's content" + description: str = "A tool that can be used to semantic search a query from a MDX's content." + summarize: bool = False + args_schema: Type[BaseModel] = MDXSearchToolSchema + mdx: Optional[str] = None + + def __init__(self, mdx: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if mdx is not None: + self.mdx = mdx + self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." + self.args_schema = FixedMDXSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + mdx = kwargs.get('mdx', self.mdx) + self.app = App() + self.app.add(mdx, data_type=DataType.MDX) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py new file mode 100644 index 000000000..ba54e34ca --- /dev/null +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedPDFSearchToolSchema(BaseModel): + """Input for PDFSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the PDF's content") + +class PDFSearchToolSchema(FixedPDFSearchToolSchema): + """Input for PDFSearchTool.""" + pdf: str = Field(..., description="Mandatory pdf path you want to search") + +class PDFSearchTool(RagTool): + name: str = "Search a PDF's content" + description: str = "A tool that can be used to semantic search a query from a PDF's content." + summarize: bool = False + args_schema: Type[BaseModel] = PDFSearchToolSchema + pdf: Optional[str] = None + + def __init__(self, pdf: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if pdf is not None: + self.pdf = pdf + self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." + self.args_schema = FixedPDFSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + pdf = kwargs.get('pdf', self.pdf) + self.app = App() + self.app.add(pdf, data_type=DataType.PDF_FILE) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py new file mode 100644 index 000000000..f625bebc9 --- /dev/null +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -0,0 +1,44 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.loaders.postgres import PostgresLoader + +from ..rag.rag_tool import RagTool + +class PGSearchToolSchema(BaseModel): + """Input for PGSearchTool.""" + search_query: str = Field(..., description="Mandatory semantic search query you want to use to search the database's content") + +class PGSearchTool(RagTool): + name: str = "Search a database's table content" + description: str = "A tool that can be used to semantic search a query from a database table's content." + summarize: bool = False + args_schema: Type[BaseModel] = PGSearchToolSchema + db_uri: str = Field(..., description="Mandatory database URI") + table_name: str = Field(..., description="Mandatory table name") + search_query: str = Field(..., description="Mandatory semantic search query you want to use to search the database's content") + + def __init__(self, table_name: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if table_name is not None: + self.table_name = table_name + self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." + else: + raise('To use PGSearchTool, you must provide a `table_name` argument') + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + + config = { "url": self.db_uri } + postgres_loader = PostgresLoader(config=config) + app = App() + app.add( + f"SELECT * FROM {self.table_name};", + data_type='postgres', + loader=postgres_loader + ) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 4e81df5af..3901129ff 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import Any, List, Optional -from pydantic import BaseModel, ConfigDict +from pydantic.v1 import BaseModel, ConfigDict from crewai_tools.tools.base_tool import BaseTool @@ -14,125 +14,20 @@ class Adapter(BaseModel, ABC): """Query the knowledge base with a question and return the answer.""" class RagTool(BaseTool): + model_config = ConfigDict(arbitrary_types_allowed=True) name: str = "Knowledge base" description: str = "A knowledge base that can be used to answer questions." + summarize: bool = False adapter: Optional[Adapter] = None + app: Optional[Any] = None def _run( self, - *args: Any, - **kwargs: Any, + query: str, ) -> Any: - return self.adapter.query(args[0]) - - def from_file(self, file_path: str): - from embedchain import App - from embedchain.models.data_type import DataType - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - app = App() - app.add(file_path, data_type=DataType.TEXT_FILE) - - adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) - - def from_directory(self, directory_path: str): - from embedchain import App - from embedchain.loaders.directory_loader import DirectoryLoader - - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - loader = DirectoryLoader(config=dict(recursive=True)) - - app = App() - app.add(directory_path, loader=loader) - - adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) - - def from_pg_db(self, db_uri: str, table_name: str): - from embedchain import App - from embedchain.models.data_type import DataType - from embedchain.loaders.postgres import PostgresLoader - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - config = { "url": db_uri } - postgres_loader = PostgresLoader(config=config) - app = App() - app.add( - f"SELECT * FROM {table_name};", - data_type='postgres', - loader=postgres_loader - ) - adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) - - - def from_github_repo(self, gh_token: str, gh_repo: str, type: List[str] = ["repo"]): - from embedchain import App - from embedchain.loaders.github import GithubLoader - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - loader = GithubLoader( - config={ - "token": gh_token, - } - ) - app = App() - app.add(f"repo:{gh_repo} type:{','.join(type)}", data_type="github", loader=loader) - adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(adapter=adapter) - - def from_xml_file(self, file_url: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_url, DataType.XML) - - def from_docx_file(self, file_url: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_url, DataType.DOCX) - - def from_docx_file(self, file_url: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_url, DataType.DOCX) - - def from_mdx_file(self, file_url: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_url, DataType.MDX) - - def from_code_docs(self, docs_url: str): - from embedchain.models.data_type import DataType - return self._from_generic(docs_url, DataType.DOCS_SITE) - - def from_youtube_channel(self, channel_handle: str): - from embedchain.models.data_type import DataType - if not channel_handle.startswith("@"): - channel_handle = f"@{channel_handle}" - return self._from_generic(channel_handle, DataType.YOUTUBE_CHANNEL) - - def from_website(self, url: str): - from embedchain.models.data_type import DataType - return self._from_generic(url, DataType.WEB_PAGE) - - def from_text(self, text: str): - from embedchain.models.data_type import DataType - return self._from_generic(text, DataType.TEXT) - - def from_json(self, file_path: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_path, DataType.JSON) - - def from_csv(self, file_path: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_path, DataType.CSV) - - def from_pdf(self, file_path: str): - from embedchain.models.data_type import DataType - return self._from_generic(file_path, DataType.PDF_FILE) - - def from_web_page(self, url: str): - from embedchain.models.data_type import DataType - return self._from_generic(url, DataType.WEB_PAGE) + self.adapter = EmbedchainAdapter(embedchain_app=self.app, summarize=self.summarize) + return f"Relevant Content:\n{self.adapter.query(query)}" def from_embedchain(self, config_path: str): from embedchain import App @@ -140,12 +35,4 @@ class RagTool(BaseTool): app = App.from_config(config_path=config_path) adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(name=self.name, description=self.description, adapter=adapter) - - def _from_generic(self, source: str, type: str): - from embedchain import App - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - app = App() - app.add(source, data_type=type) - adapter = EmbedchainAdapter(embedchain_app=app) return RagTool(name=self.name, description=self.description, adapter=adapter) \ No newline at end of file diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py new file mode 100644 index 000000000..d4a886a73 --- /dev/null +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -0,0 +1,47 @@ +import os +import json +import requests + +from typing import Type, Any +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class SeperDevToolSchema(BaseModel): + """Input for TXTSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") + +class SeperDevTool(BaseTool): + name: str = "Search the internet" + description: str = "A tool that can be used to semantic search a query from a txt's content." + args_schema: Type[BaseModel] = SeperDevToolSchema + search_url: str = "https://google.serper.dev/search" + n_results: int = None + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + payload = json.dumps({"q": search_query}) + headers = { + 'X-API-KEY': os.environ['SERPER_API_KEY'], + 'content-type': 'application/json' + } + response = requests.request("POST", self.search_url, headers=headers, data=payload) + results = response.json()['organic'] + stirng = [] + for result in results: + print(result) + print('--------------') + try: + stirng.append('\n'.join([ + f"Title: {result['title']}", + f"Link: {result['link']}", + f"Snippet: {result['snippet']}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(stirng) + return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py new file mode 100644 index 000000000..130f6f164 --- /dev/null +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -0,0 +1,39 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + +class FixedTXTSearchToolSchema(BaseModel): + """Input for TXTSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the txt's content") + +class TXTSearchToolSchema(FixedTXTSearchToolSchema): + """Input for TXTSearchTool.""" + txt: str = Field(..., description="Mandatory txt path you want to search") + +class TXTSearchTool(RagTool): + name: str = "Search a txt's content" + description: str = "A tool that can be used to semantic search a query from a txt's content." + summarize: bool = False + args_schema: Type[BaseModel] = TXTSearchToolSchema + txt: Optional[str] = None + + def __init__(self, txt: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if txt is not None: + self.txt = txt + self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." + self.args_schema = FixedTXTSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + txt = kwargs.get('txt', self.txt) + self.app = App() + self.app.add(txt, data_type=DataType.TEXT_FILE) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py new file mode 100644 index 000000000..f4cffa9c9 --- /dev/null +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedWebsiteSearchToolSchema(BaseModel): + """Input for WebsiteSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search a specific website") + +class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema): + """Input for WebsiteSearchTool.""" + website: str = Field(..., description="Mandatory valid website URL you want to search on") + +class WebsiteSearchTool(RagTool): + name: str = "Search in a specific website" + description: str = "A tool that can be used to semantic search a query from a specific URL content." + summarize: bool = False + args_schema: Type[BaseModel] = WebsiteSearchToolSchema + website: Optional[str] = None + + def __init__(self, website: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if website is not None: + self.website = website + self.description = f"A tool that can be used to semantic search a query from {website} website content." + self.args_schema = FixedWebsiteSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + website = kwargs.get('website', self.website) + self.app = App() + self.app.add(website, data_type=DataType.WEB_PAGE) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py new file mode 100644 index 000000000..9259b819f --- /dev/null +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedXMLSearchToolSchema(BaseModel): + """Input for XMLSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the XML's content") + +class XMLSearchToolSchema(FixedXMLSearchToolSchema): + """Input for XMLSearchTool.""" + xml: str = Field(..., description="Mandatory xml path you want to search") + +class XMLSearchTool(RagTool): + name: str = "Search a XML's content" + description: str = "A tool that can be used to semantic search a query from a XML's content." + summarize: bool = False + args_schema: Type[BaseModel] = XMLSearchToolSchema + xml: Optional[str] = None + + def __init__(self, xml: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if xml is not None: + self.xml = xml + self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." + self.args_schema = FixedXMLSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + xml = kwargs.get('xml', self.xml) + self.app = App() + self.app.add(xml, data_type=DataType.XML) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py new file mode 100644 index 000000000..9b4e51688 --- /dev/null +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -0,0 +1,42 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedYoutubeChannelSearchToolSchema(BaseModel): + """Input for YoutubeChannelSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the Youtube Channels content") + +class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema): + """Input for YoutubeChannelSearchTool.""" + youtube_channel_handle: str = Field(..., description="Mandatory youtube_channel_handle path you want to search") + +class YoutubeChannelSearchTool(RagTool): + name: str = "Search a Youtube Channels content" + description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." + summarize: bool = False + args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema + youtube_channel_handle: Optional[str] = None + + def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if youtube_channel_handle is not None: + self.youtube_channel_handle = youtube_channel_handle + self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." + self.args_schema = FixedYoutubeChannelSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + youtube_channel_handle = kwargs.get('youtube_channel_handle', self.youtube_channel_handle) + if not youtube_channel_handle.startswith("@"): + youtube_channel_handle = f"@{youtube_channel_handle}" + self.app = App() + self.app.add(youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL) + return super()._run(query=search_query) \ No newline at end of file diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py new file mode 100644 index 000000000..7b26c8e90 --- /dev/null +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -0,0 +1,40 @@ +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field + +from embedchain import App +from embedchain.models.data_type import DataType + +from ..rag.rag_tool import RagTool + + +class FixedYoutubeVideoSearchToolSchema(BaseModel): + """Input for YoutubeVideoSearchTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the Youtube Video content") + +class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema): + """Input for YoutubeVideoSearchTool.""" + youtube_video_url: str = Field(..., description="Mandatory youtube_video_url path you want to search") + +class YoutubeVideoSearchTool(RagTool): + name: str = "Search a Youtube Video content" + description: str = "A tool that can be used to semantic search a query from a Youtube Video content." + summarize: bool = False + args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema + youtube_video_url: Optional[str] = None + + def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if youtube_video_url is not None: + self.youtube_video_url = youtube_video_url + self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." + self.args_schema = FixedYoutubeVideoSearchToolSchema + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + youtube_video_url = kwargs.get('youtube_video_url', self.youtube_video_url) + self.app = App() + self.app.add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO) + return super()._run(query=search_query) \ No newline at end of file From 7c99e9ab508f5db89355c8b2e8238483bf4f52f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sat, 24 Feb 2024 03:13:17 -0300 Subject: [PATCH 010/391] preparing new verion and adding new tools --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../directory_read_tool.py | 33 +++++++++++++++++++ .../tools/file_read_tool/file_read_tool.py | 2 +- 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 src/crewai_tools/tools/directory_read_tool/directory_read_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a6a269be6..e643bb829 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -4,6 +4,7 @@ from .tools import ( CSVSearchTool, DirectorySearchTool, DOCXSearchTool, + DirectoryReadTool, FileReadTool, GithubSearchTool, TXTSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 2910185ec..e2382eb9b 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,6 +1,7 @@ from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .csv_search_tool.csv_search_tool import CSVSearchTool from .directory_search_tool.directory_search_tool import DirectorySearchTool +from .directory_read_tool.directory_read_tool import DirectoryReadTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py new file mode 100644 index 000000000..7dc6c1a5d --- /dev/null +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -0,0 +1,33 @@ +import os +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field +from ..base_tool import BaseTool + +class FixedDirectoryReadToolSchema(BaseModel): + """Input for DirectoryReadTool.""" + pass + +class DirectoryReadToolSchema(FixedDirectoryReadToolSchema): + """Input for DirectoryReadTool.""" + directory: str = Field(..., description="Mandatory directory to list content") + +class DirectoryReadTool(BaseTool): + name: str = "List files in directory" + description: str = "A tool that can be used to recursively list a directory's content." + args_schema: Type[BaseModel] = DirectoryReadToolSchema + directory: Optional[str] = None + + def __init__(self, directory: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if directory is not None: + self.directory = directory + self.description = f"A tool that can be used to list {directory}'s content." + self.args_schema = FixedDirectoryReadToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + directory = kwargs.get('directory', self.directory) + return [(os.path.join(root, file).replace(directory, "").lstrip(os.path.sep)) for root, dirs, files in os.walk(directory) for file in files] + diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 0721ec127..8c2e8dcca 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -8,7 +8,7 @@ class FixedFileReadToolSchema(BaseModel): class FileReadToolSchema(FixedFileReadToolSchema): """Input for FileReadTool.""" - file_path: str = Field(..., description="Mandatory file path to read the file") + file_path: str = Field(..., description="Mandatory file full path to read the file") class FileReadTool(BaseTool): name: str = "Read a file's content" From 50bae27948a92534f0b0eb9fd3a8ccf43e1ea549 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 25 Feb 2024 21:11:09 -0300 Subject: [PATCH 011/391] revamping crewai tool --- README.md | 19 ++--- src/crewai_tools/__init__.py | 2 +- src/crewai_tools/tools/base_tool.py | 59 ++++++++++----- .../directory_read_tool.py | 6 +- tests/__init__.py | 0 tests/adapters/embedchain_adapter_test.py | 67 ------------------ tests/adapters/lancedb_adapter_test.py | 22 ------ tests/base_tool_test.py | 46 ++++++++++++ tests/data/chromadb/chroma.sqlite3 | Bin 159744 -> 0 bytes .../data_level0.bin | Bin 6284000 -> 0 bytes .../header.bin | Bin 100 -> 0 bytes .../length.bin | Bin 4000 -> 0 bytes .../link_lists.bin | 0 tests/data/embedding.txt | 1 - .../requirements.lance/_latest.manifest | Bin 237 -> 0 bytes ...0-d2c46569-d173-4b3f-b589-f8f00eddc371.txn | 1 - ...1-5ae04c7e-dae3-47e8-92e9-6b84b7a4d035.txn | Bin 97 -> 0 bytes .../requirements.lance/_versions/1.manifest | Bin 183 -> 0 bytes .../requirements.lance/_versions/2.manifest | Bin 237 -> 0 bytes ...2164da72-df18-4c76-9f6f-d51cc6139c92.lance | Bin 19605 -> 0 bytes tests/tools/rag/rag_tool_test.py | 21 ------ 21 files changed, 100 insertions(+), 144 deletions(-) delete mode 100644 tests/__init__.py delete mode 100644 tests/adapters/embedchain_adapter_test.py delete mode 100644 tests/adapters/lancedb_adapter_test.py create mode 100644 tests/base_tool_test.py delete mode 100644 tests/data/chromadb/chroma.sqlite3 delete mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin delete mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin delete mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin delete mode 100644 tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin delete mode 100644 tests/data/embedding.txt delete mode 100644 tests/data/lancedb/requirements.lance/_latest.manifest delete mode 100644 tests/data/lancedb/requirements.lance/_transactions/0-d2c46569-d173-4b3f-b589-f8f00eddc371.txn delete mode 100644 tests/data/lancedb/requirements.lance/_transactions/1-5ae04c7e-dae3-47e8-92e9-6b84b7a4d035.txn delete mode 100644 tests/data/lancedb/requirements.lance/_versions/1.manifest delete mode 100644 tests/data/lancedb/requirements.lance/_versions/2.manifest delete mode 100644 tests/data/lancedb/requirements.lance/data/2164da72-df18-4c76-9f6f-d51cc6139c92.lance delete mode 100644 tests/tools/rag/rag_tool_test.py diff --git a/README.md b/README.md index 818b75bbf..0287c6c3b 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,6 @@ In the realm of CrewAI agents, tools are pivotal for enhancing functionality. Th - [Creating Your Tools](#creating-your-tools) - [Subclassing `BaseTool`](#subclassing-basetool) - - [Functional Tool Creation](#functional-tool-creation) - [Utilizing the `tool` Decorator](#utilizing-the-tool-decorator) - [Contribution Guidelines](#contribution-guidelines) - [Development Setup](#development-setup) @@ -40,32 +39,26 @@ There are three ways to create tools for crewAI agents: ### Subclassing `BaseTool` ```python +from crewai_tools import BaseTool + class MyCustomTool(BaseTool): name: str = "Name of my tool" description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." - def _run(self, argument) -> str: + def _run(self, argument: str) -> str: # Implementation goes here pass ``` Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic. -### Functional Tool Creation - -```python -my_tool = Tool( - name="Name of my tool" - description="Clear description for what this tool is useful for, you agent will need this information to use it.", - func=lambda argument: # Your function logic here -) -``` - -For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic. ### Utilizing the `tool` Decorator +For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic. + ```python +from crewai_tools import tool @tool("Name of my tool") def my_tool(question: str) -> str: """Clear description for what this tool is useful for, you agent will need this information to use it.""" diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index e643bb829..6ed1c5d65 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,4 +1,4 @@ -from .tools.base_tool import BaseTool, Tool, as_tool, tool +from .tools.base_tool import BaseTool, Tool, tool from .tools import ( CodeDocsSearchTool, CSVSearchTool, diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index e2fb18395..dc679f833 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -1,18 +1,24 @@ from abc import ABC, abstractmethod from typing import Any, Callable, cast, Optional, Type -from langchain.agents import tools as langchain_tools -from pydantic import BaseModel +from pydantic import BaseModel, model_validator +from pydantic.v1 import BaseModel as V1BaseModel +from langchain_core.tools import StructuredTool class BaseTool(BaseModel, ABC): name: str """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool.""" - args_schema: Optional[Type[BaseModel]] = None + args_schema: Optional[Type[V1BaseModel]] = None """The schema for the arguments that the tool accepts.""" + @model_validator(mode="after") + def _check_args_schema(self): + self._set_args_schema() + return self + def run( self, *args: Any, @@ -29,14 +35,28 @@ class BaseTool(BaseModel, ABC): ) -> Any: """Here goes the actual implementation of the tool.""" - def to_langchain(self) -> langchain_tools.Tool: - return langchain_tools.Tool( + def to_langchain(self) -> StructuredTool: + self._set_args_schema() + return StructuredTool( name=self.name, description=self.description, args_schema=self.args_schema, func=self._run, ) + def _set_args_schema(self): + if self.args_schema is None: + class_name = f"{self.__class__.__name__}Schema" + self.args_schema = type( + class_name, + (V1BaseModel,), + { + "__annotations__": { + k: v for k, v in self._run.__annotations__.items() if k != 'return' + }, + }, + ) + class Tool(BaseTool): func: Callable @@ -47,8 +67,8 @@ class Tool(BaseTool): def to_langchain( - tools: list[BaseTool | langchain_tools.BaseTool], -) -> list[langchain_tools.BaseTool]: + tools: list[BaseTool | StructuredTool], +) -> list[StructuredTool]: return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools] @@ -62,10 +82,24 @@ def tool(*args): if f.__doc__ is None: raise ValueError("Function must have a docstring") + args_schema = None + if f.__annotations__: + class_name = "".join(tool_name.split()).title() + args_schema = type( + class_name, + (V1BaseModel,), + { + "__annotations__": { + k: v for k, v in f.__annotations__.items() if k != 'return' + }, + }, + ) + return Tool( name=tool_name, description=f.__doc__, func=f, + args_schema=args_schema, ) return _make_tool @@ -74,13 +108,4 @@ def tool(*args): return _make_with_name(args[0].__name__)(args[0]) if len(args) == 1 and isinstance(args[0], str): return _make_with_name(args[0]) - raise ValueError("Invalid arguments") - - -def as_tool(f: Any) -> BaseTool: - """ - Useful for when you create a tool using the @tool decorator and want to use it as a BaseTool. - It is a BaseTool, but type inference doesn't know that. - """ - assert isinstance(f, BaseTool) - return cast(BaseTool, f) + raise ValueError("Invalid arguments") \ No newline at end of file diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index 7dc6c1a5d..94fcce076 100644 --- a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -29,5 +29,9 @@ class DirectoryReadTool(BaseTool): **kwargs: Any, ) -> Any: directory = kwargs.get('directory', self.directory) - return [(os.path.join(root, file).replace(directory, "").lstrip(os.path.sep)) for root, dirs, files in os.walk(directory) for file in files] + if directory[-1] == "/": + directory = directory[:-1] + files_list = [f"{directory}/{(os.path.join(root, filename).replace(directory, '').lstrip(os.path.sep))}" for root, dirs, files in os.walk(directory) for filename in files] + files = "\n- ".join(files_list) + return f"File paths: \n-{files}" diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/adapters/embedchain_adapter_test.py b/tests/adapters/embedchain_adapter_test.py deleted file mode 100644 index 06a3ac9c8..000000000 --- a/tests/adapters/embedchain_adapter_test.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Callable - -from chromadb import Documents, EmbeddingFunction, Embeddings -from embedchain import App -from embedchain.config import AppConfig, ChromaDbConfig -from embedchain.embedder.base import BaseEmbedder -from embedchain.vectordb.chroma import ChromaDB - -from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - -class MockEmbeddingFunction(EmbeddingFunction): - fn: Callable - - def __init__(self, embedding_fn: Callable): - self.fn = embedding_fn - - def __call__(self, input: Documents) -> Embeddings: - return self.fn(input) - - -def test_embedchain_adapter(helpers): - embedding_function = MockEmbeddingFunction( - embedding_fn=helpers.get_embedding_function() - ) - embedder = BaseEmbedder() - embedder.set_embedding_fn(embedding_function) # type: ignore - - db = ChromaDB( - config=ChromaDbConfig( - dir="tests/data/chromadb", - collection_name="requirements", - ) - ) - - app = App( - config=AppConfig( - id="test", - ), - db=db, - embedding_model=embedder, - ) - - adapter = EmbedchainAdapter( - dry_run=True, - embedchain_app=app, - ) - - assert ( - adapter.query("What are the requirements for the task?") - == """ - Use the following pieces of context to answer the query at the end. - If you don't know the answer, just say that you don't know, don't try to make up an answer. - - Technical requirements - -The system should be able to process 1000 transactions per second. The code must be written in Ruby. | Problem - -Currently, we are not able to find out palindromes in a given string. We need a solution to this problem. | Solution - -We need a function that takes a string as input and returns true if the string is a palindrome, otherwise false. - - Query: What are the requirements for the task? - - Helpful Answer: -""" - ) diff --git a/tests/adapters/lancedb_adapter_test.py b/tests/adapters/lancedb_adapter_test.py deleted file mode 100644 index bc4d6ba4f..000000000 --- a/tests/adapters/lancedb_adapter_test.py +++ /dev/null @@ -1,22 +0,0 @@ -from crewai_tools.adapters.lancedb_adapter import LanceDBAdapter - - -def test_lancedb_adapter(helpers): - adapter = LanceDBAdapter( - uri="tests/data/lancedb", - table_name="requirements", - embedding_function=helpers.get_embedding_function(), - top_k=2, - vector_column_name="vector", - text_column_name="text", - ) - - assert ( - adapter.query("What are the requirements for the task?") - == """Technical requirements - -The system should be able to process 1000 transactions per second. The code must be written in Ruby. -Problem - -Currently, we are not able to find out palindromes in a given string. We need a solution to this problem.""" - ) diff --git a/tests/base_tool_test.py b/tests/base_tool_test.py new file mode 100644 index 000000000..e7ecbf8d9 --- /dev/null +++ b/tests/base_tool_test.py @@ -0,0 +1,46 @@ +import json +import pydantic_core +import pytest +from crewai_tools import BaseTool, tool + +def test_creating_a_tool_using_annotation(): + @tool("Name of my tool") + def my_tool(question: str) -> str: + """Clear description for what this tool is useful for, you agent will need this information to use it.""" + return question + + # Assert all the right attributes were defined + assert my_tool.name == "Name of my tool" + assert my_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} + assert my_tool.func("What is the meaning of life?") == "What is the meaning of life?" + + # Assert the langchain tool conversion worked as expected + converted_tool = my_tool.to_langchain() + assert converted_tool.name == "Name of my tool" + assert converted_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} + assert converted_tool.func("What is the meaning of life?") == "What is the meaning of life?" + +def test_creating_a_tool_using_baseclass(): + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + + def _run(self, question: str) -> str: + return question + + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.name == "Name of my tool" + assert my_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} + assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?" + + # Assert the langchain tool conversion worked as expected + converted_tool = my_tool.to_langchain() + assert converted_tool.name == "Name of my tool" + assert converted_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} + assert converted_tool.run("What is the meaning of life?") == "What is the meaning of life?" + diff --git a/tests/data/chromadb/chroma.sqlite3 b/tests/data/chromadb/chroma.sqlite3 deleted file mode 100644 index 9c9cf0e56ca4d2d2470cc098267349634a2c1cc8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 159744 zcmeFa1z=Oj_W0j>Q>V0eaYAuuOH)r~0u(4(tb(&8ZPQYyK$GII6nA%bTinTEbg$t|4iCKVPSXQ_xpYC_ul(YkR;p>WK%<}wz0ACz<(9+U-qj6ez48{g6~p}&)@p!VdEWt*N!K4d&|?5%zXP+a&cS` z5CjAPK|l}?1Ox#=KoAfF1OY)n5D*0Z?+Fxl%W-dBOx&CQ-xC%&3Ic+FARq_`0)l`b zAP5Kof`A|(2nYiI?+FxjaK=hFx=prmyXUsXZGqclw`*>v+zzP-zve9gai%~h|_#!*xvMu72b;drIt-B7^7Zlc_ww=e()g)0K{~%6U4UOb`5(#2H!COkYdJ~( zNKTrpoTRVh#P|QJ+JESDW8j{p$#xCiJ zX-0m~T!Kg+Pbh9y8ZR(Jko)o8!F{%wQNk+9fRoyu)HP&b{sWm=6K5BECA;n~f zH7BN~nADw(X=;-(HZ>*ATg|hGO^q|ElhaLRetEaFM6=nLqE1Xvho{H%@XlxFAQdUd zD`n{1IXW@UY&4lO^4apL@)&gT7gdc&O-kpbI6Jp6s#A={IJH3?pPphZS#9osVeZU^ zPDT?RHJQ`U5?*aEVNspa&1yqR95&CKo`y9vr==U!iSc*=HZU_?BEP1yAt^B>4%rwz z)v0(gty`iAty8fwSrFr~ICU!4?o-LUzF3un#I8swvvS_*PmMKY0pML=?trz%e&8i}=an3s ztRO#C&w8_;2P-FDf`A|(2nYg#fFK|U2m*qDARq_`0)l`b@Lx-S@Bg}2WY(f#H$(`342WV~(-DUNKtVSg(M%fPi>kt)E5{ z@B5X?cQwYEQ`4$;NHKNuN=ik`3a{*X$2AYF71<;_`|Cfo8T|Rb?6%s*ZMECEe?k|F zECm5UKoAfF1OY)n5D)|e0YN|z5CjAPLEs-JP*D~2w|oZ56;x49f9Hn@VgLV+cY?S= z5D)|e0YN|z5CjAPK|l}?1Ox#=KoIy(BOvVm|I>O*)K?G?1Ox#=KoAfF1OY)n5D)|e z0YN|z_{RzG=l_!10~`D+UV?xiAP5Kof`A|(2nYg#fFK|U2m*qDAn;#7Ade(h%5sPh zjs6h~DQ5osU)=xyS9GpugdiXY2m*qDARq_`0)l`bAP5Kof`A~9MIf)LlGEq+|C!JK zx7fIC5s(A{K|l}?1Ox#=KoAfF1OY)n5D)|e0YTv3jX)jAHp;7#Pu9zezkl&x_POl) z{WuJi5MwYIv%lwj{(sI!c>n)*>tRtJK|l}?1Ox#=KoAfF1OY)n5D)|e0YTue32^`a z*}njM@xy@iYxcR!{r{J3+%Erho+5=HAP5Kof`A|(2nYg#fFK|U2m*qDARq|*8xrua zt>pE^e*f&J|Jl0%GWY*S+qgyl8`fV`Ne~bO1OY)n5D)|e0YN|z5CjAPK|m1r&mfRd zQgVnYm*FNy`31-L1!{Z@UP1l=8ZZC2_&_ghjBl`)UrdljW6)@{fm&^vu}gYlnlag! zVm5U)rkN5=xJlovt6DYduKnZt-^tA0Ci@xxUqAmB`~UwlhLfnJARq_`0)l`bAP5Ko zf`A|(2nYg#fFSTW0lxqL@BB)Tx&Qx=joYEmuNU700YN|z5CjAPK|l}?1Ox#=KoAfF z1OY+d-;#j8ZMi6?oLdO~?(=_px9K)+&)u%Mope*&*5DWM5(ESRK|l}?1Ox#=KoAfF z1OY)n5D)|efqxN!g0@l>Nfp~6Ej8Joi%X47=Vk%+F1At;NoDBVIXW@U&YDF2B8jav ziJX~4RlwG%iu6Sa+1b{qh*M^QO#A<1Hg3oMMa>e$2m*qDARq_`0)l`bAP5Kof`A|( z2nYg#!2gy&Rh25Lc$_9MCe}CB;1%l=6yW9WqtSY4{fq`Lt$$#wZ@j-I(5Us#DoB;N z|NpFw+u8qZ(c*eRKoAfF1OY)n5D)|e0YN|z5CjAPK|m1r7Za$cijw}$&j2dx^MCsX zHu$#`){*}!f&YsAi|Z?j69fbSK|l}?1pbKx);UNnHhzv7Zuzgd6zgBt*T>i2%g5JC zHLCp0e=5$x4VgP8t*0R$nTe0P|xlL-EUh5kmt^Y;n})r?AGhCP?mxkx(WE_HFG<` zwzW^lg-5pFJkpi6Z1WV_I2ECTXTK+FCKn;8zIhe+sgct5uAYX?oj@y9-^K>z8q9pQ zeown^XhdV>)$F>qr1J6}K>lCN5Rqp9={mn3(KxK2wa=At+|wH>#wAc~^gh>8Y>iyUGJ;-=`BHJ@OQtRbnRTT zZhVeLrk$k7$KpJ>JyOir?AUn$b`yx1G}Ma!=YWZvprayMzr; zjnH+9s6}os8%240t_>?q!aKI4DI?=yYxNa`b1h{YKz)XurJGg_2U0aZH99W`L+Rl# zh_14nyX~Z3(xe63=`fvKeAArn+IgE+Nf`q#?@?uG*OhR*`*hee#2*?jb=6PZxQrHQ z5r_3EPAea{X>s|@f%cx24AAT~DP7QpZCj!y^^4S|#8FRqT~A6OkYV>T^FDWhvbq)M zF%J`YvY|8UJn=fyp6|r#&fJ~u*}iW;`My8 zV(fFvsdE!}o-CxUt={lvJ;gq@%)6REYY*?i*4zAIdFmGm7pB_NICuxODlQ{^XH15u z`it4*m8T27`bW#Oj-CFngT2l`Ist*>``2Ka8S2$QdzA$=Nkgr&0v zP~Pq;DQ^0pZpFy)X0@r~My4y%xXkt;jo<24-6{^cvjyO{yH%|Hl|P^~A6Ilna%o5z zd5fOS8w9_2R3qD4lu&+cHilt2x zy>fn`Wq(sY+UtI2K>4=dF-wK-rqL$OSJ}+HrRcfa!@%!$QyTL6Eal_BlSeUFQgSs> zm48fszEc!@$DTm`vqF)wOg!uaeQHgGO7R6KA16-6Ens_i#@6cfFM?l@9^~+@9-y_^MNv-a{z~nL*UJEA zs_B6JHv<}HY_Y^!7W4kHRGao4NjfnFHk=JoP$yBZuES}eNz(y!11pt#5cNNpPD#}X zu)5j|nANxrtrR~|J1NRXH+ob}Fh5bzQLYad^0N4DUJ9|TSCYDSpICT*6=^h{iT(jG4u9G=cJ6X7_waYsZ z>*cOg+~lsGy8SI#5Ff;LMi-`UesorXO3fnkH|Z3Vdum?Tf(0+#4eO3kx~R|wqErk6 z*Ju6dD|4)+aqFL9{GQQp>~RAUxPLYjN~w)wcnmc?yI`^JJ(V{9p)1UKQI~G_sSA7E z$I?SpGw83CwOpUDh@z)0#iDXkURIx4h4nM{&Y{HzZ)6>NKce=hCbRwnH<9M^%0hs< z2fHkX!?=<+>5K0Ne#o;K*9#>4Wy72(uc zg_SN94iOwoIoxR?mQYPD7|=-tXkl z^+;Oy`2?5}7zIz3&&Dya2_b5;O_RXG zn$KGZCo4|ToedsEJZ6`tXA=W-gOi)n4>wQfKIB>k1qBpx0O4}vosP|Dv8a{3nIOVqBTyT$8ZBdn#a&Lk2+C(~h;e49AwmrrECRiRHtEAWcM_WgJ z_ZZIC7z#LV;QSLuLh26GZd&CGK4(;VTxgZx_x}!(*2ZNWD z0i_%qibxTIDm6+rnhkL=GWWLN>t#%iF~-FurX+Y-4;#z|Z&R0~M6;2evp(*iPzhiRKP!Lz3B;rZyX5l8l~egGp^rrx{J@ zNoM?#nbwe*+8J|AOzE1M)YYhtO*0zISU4t5F?P$mSZzp&Q^%%u?!k|nJ7C#ZDRpXB zW16?RzFD1UvVK-~N=!=1D%hGm&(@%hNli^M8dB8e9-T4w)TH#}6mNATW`fouNr|yo zSZa#8TN-bYIn|oGwZd4sG0wyf8B&uv3`MZ24A1}K^5&Cia13K(`WS%?;y>ecTlbH(2&T`FDqnHS9Dh6D={v+Y<1J{ z`i(-uTd5m{w(_)|GZ_=OeGXdXs3SvLMyeY(!GBR$KYJPF0 zwMUI{(FU_RvVNn`h{%veO+O}4*AA@{5)~Gyt`!v?9@;oE`eTyJVxt<@Zx$7*uK1~P zo;ffl##M5zM4X+!-sn%g@Q22Sht>(j;%bFPe9FyK5wpuIwUe>O*BF@0X*dG2`j9iq zOr40(KI_-a!|_R}hU^pADOntIU{_;-o@y-XD~(7tbdSzLnpycz05WTqBcIITzSaRb z;WV!w9vKx9_T|Wkj!#cQG3{{zX)9IE8A zPOD$XIwV3{){lscP-k8CY0z2+RsWFD6 zq#jnmNHHWEzZecVlH%=lR(-9BGpqgwAyVfUaapb7!%f|!F-kyKC`xRtkce6#wL>#I zGrMIUbMsX5T2$iYhGy#{6evSh(drJUUQ<(&dfM_P(GG-}5bwXlSV~RC{oC6|L zXMK(d^<$DTma9Zlv^lx+KLu8HS7hQ!?VK3<1zhWixI|P&Ca#;alw~HE?2G>taCRcA zME@0H&ehX8qi|%HZ>BUN=vsU#LYl^S15R4mr1@NwU4V1_#u1_6k?JPl*uPD~LTZKb z+BW$#et8!+4+)DxJyWqlcHrX7POVUh@ZRJz(HAA48pSE?%jD`*oPpAC62-YFDyn{M z)3*xGt{m!ou3s|S_i-TQs1FxW{`#}vN)tjRya7~5GR{TXf)OEs}G~)Ar_V^~wwZoe<%~FS-9nU$X=Qv>% z2%I!R8sYTXh)zgP^4r>ud9}(oxkKd?9&Lzdi%d&-pIQ#p?`TEBB1O)lV`Dx<(HQHETgO4#NKF~KH zR_kXl45+I@JzagUTV_?V)_hsB%Qw|?_W3_i2$|x{7g=9fJ(zIy_SF;SkMf@>-=COd zW|Kp0emcc9=N~m$_6+%P?e+DEGp8obZq}cv;%~17{tp)d*>%skAn?^{{9fB#?~zaV3H zt(`q@StqMMcG0Jzzg(N;IFr4;{A$<#sY?Hsbk%1y{j_}Z3-$}}jSKP(FvMwMHGw`s z{_%!be_x*%UtBo_`uX*5Avt(C)f(vD=;KP-6pc%bbYo`6eL9*$1$?$d`jjH`x-41D zTK8qml7C#*|F4$cD79&scSza!e|}f+Nqj%9=riRwyAwZ8-WAstscF`mgv?~dl-N{$ zt7#SX%tIf=mk&ukgyQ^y0s`XV<9z*mV~oMZU_T#CynkF!d~7`4Gx-MxTA%;B?y$kX z;w1?{P;PaF2W-oJU zCu2%tPwVs7tjDlfKMefUGaY<{WJrtckn_<_&c`~Q_;@ojYkdWLw3U!%NXB2Kvwz67 z{*wLiM~9EkiL&#on3md&|6G+b!%Eq|*M8+(<`1x+^2@%@|Lk|zPp7jVOA+UIZIB@j ze?jp#7!8JCH2&Ad`)KgD)tEs4;21o|@BdvK4%oO(b~WVocFC9PP%dZZ^G-*dMmbe= zvT-=zFv%gtzOvm@JFs1B+eOt&6)10$hspV+5jGEPVr*Lf>A-NzJnInur>VqAK|l}? z1Ox#=KoIyJ66hb`=HOnclr&Il-GG(-wAu9WL!QqbhkQKzC(k5vY}5Gf-O=IYYVRIW zOtMK#i8FT3+^H9BNH?cizei_2pf^QpvOc)A&F$b`teBMHW~H2UH0#avGs0O1|Aepg zVPN(%KkGx;>Mr)~5nkV_O!i~W?6qIC=F20w`#a`xaQE_(2Gp|F@5}RFes}q-#$TTO z_=xQDoo)XikASN>+q>5)@fR$yXR}&u%9T;g$-%uu38{aRoa{{5=Re;jmwoiFy4SjI z@?Z2wKh)9Qy_Mg$TAbs_>!$(qm4ihzLFpdRE{&* z4>|vno>ceu!Tld7Ujy@7*?hd=%6b_6<#*f9sC;?$qkLKSer7)+{=+`K|L{l9ezLuL z-O}G0mC5|RIdjLH=F?|Y{{+dwy>w}5P!TKVPiH=Tl0Ren>39}ZzWwRTS6pJ+*S6Pu znT&tO-O)L>&wcTH{%cpnCnn)`%|GmQv?}?prN_DE?^Xu4CwBO|X>$x8bwzBjXLeI6 zeYGZ)_;Q2WL2$byv@fAm3gu-H9WpG-&n%|P8rjgu$4l6?2~TOaw#SHDSO%0|Rh**j z0NTvWBkm=4Qm=~NLA~h@p?-~`q?KT=vv;=0G-i@Hws5 z%Z>EcW^Tak$R?Oakd|#sThX3MsaoDakG7J4_KWP%$%eFC)p}^VcL&f02e=Ja?#3<2 z!IrBi+D_@Zo~%gGMv0s@Sac2R6{J6R+()C!T!*Dgq5y5l=#8SgVY9sxJ>Q@;)Nk;c z)z0tc<4KlZBI2oZzY%-xJk@IVwKI4Z8F_gxK^qP_>&1SGwn?<~tGDFEgl%ZUV?jF% z59L_>gXDhA%1YSsUnsZnJXz5lI`u3E|Y!>#=*qC?Pw3Z8M^3P7}^g3+5#zPAF4w;7XSEabK)*v194`>2-| zw56f57uRNe%{9<&%}$w>UL0_I0WxN21Eb4Ta3iX zvsqBnQ2Hpc8@(FSP=DZtf2W z!t(&;lvC!EE278o4)515rwPShQnbrt+_e1cVZs6*S!iSSGP0$?^(RVJRf$gm%R-Cy2?(?0nW&TL8O zAfgW%O?h8>{uE8Q-Cpr4>uI;kZ_q|*G@W>O7`5+Rn4+C7Ogz)ZVl$};MH^YPS^S9{ zswyemw$Q7IKf!TkK^s~YGvETVZ0pC+o{BWRzLs7(zKjy>FR=9J577PM0&YVJXvYYX zPYt9$o|Pe@;X6Vc6;{1|8&dviZBp}PKW=x(nhsmf3NQJU6iV5{-e0g$&Rla$M&+v;$_`wr^$NTxOm=NQXLtjJr?JepA00ZG!>Ztvt3hgUduGg7!{|yk{<;EgJtv zTP~YJe8+}EDvgEcE~DT`$WgSRj-kEx)FO!u4^U)7&C~7lXqyYjUaz?p#mb&NNV%;X z+FVg&Pj}tkMo{Mf+EtS_x)5mM98D%X545sc_923^qujn1Z2{q!$5S2J4TJOKYqWY{ zKeUzg;+$b^okAAuUs&L(h52LRVE-$BI(76(v_&jQTxZ%+92ZLASp^lN#)hX+xIHM> z8EG>w1*45Lw$Xz2ZZ5h*j#c0;YfVZSRZ4V&Ad0$_qJ1*AfrH2&k6Y!??fog;yZ$o# z{zFxQcCUc;kJkEB8#9u)o$X6>Yqr6gQV_hHa9<@eKMbmsQuu(fVPFY_vXyc@J4|S&vy6l1(rP9%EyOJKqG}qr; z23V7pBT%3AC74G4OT;Z5uj32LM5}!u+NbM^Z@x@QZa-(ixsp6M)YNLH*SK{I+8Dp# zw))%#nC5zL#cG$2Hk5j7vmVD8!Ev)GcZoI7!{spH^B(F(Wo4imDg^BYw9nR}y*Qaa zXuJjOy=k(lhHjd%Jl)@InAOJdd4Y~(S^3Hobq%w7R|{=p?@?~^j5emwt|riu8|{Jn zA~@M&@PnT)2*gxj}AY0mxDvv2-y1gdmY+C!^?aVxcxEVcF1VQ$@;x@ zQx0#bK(!U7l9CCn0qqH4Z|Z2u?K{^zm;z`wsxWmkoU^8riXH9&+LY2`o#(*wm_k|GmA=ses_!9IZB#(ObtuUo%C5}^FEB`?p>y1{ek zq4vvQSh=HY;LWPr&{mS($#sR<9xkSMbo+^p{$&T%w%kD*RkCnh!CBE4dv0-m29_gv z7Q;LL=CrBhBA^b36a9Z9_Jcd3op6kP(ZhC7d}S_hJ+cDrGED^MP~da;wk2~>&o4rI z(Sg)5WjVvPD=5dTMYGbD+v}s*iJzttw4J56MzNH?x@Rlec5^#?67;GLu5WCayhp9a zd6RN`b*EdgbY+bvl-nxXKsm)!bBONMt}&J|E>kG3wSceF9-TadV=)&!*wUL`z0sB1 z(9=tEGg#ZL1u0%n%2YVQ*HO9y=g*UK(=S{53+EKlt;!NwB5)n->0;#RaGh}3g3Pw= zt+xec9(zpD9+uniDvMIq(bbPqfZIN6D-kC?7N*9K8v~=m@Z(W~F z-QJ7asiKdAo3L;0CW`u+X`(CZO~uyIgWuay)a{JtIe*Me;OhgFHLLCM+laN?j@-It z!uA2`OUvBTlj!|t?-6kBu78;2VMXZLt1@4Q(DOxn^{$K0 z5X*rRtY2?Nxj%w|#x{I>E8M!!}*4wpA6Jpg%s|0qW$~;cBp)s=yL;j|IO_)1$Ck~ z-Tt8@xQ9+*(NDH;eW_1x&>sdBYzYfT<)&!&uS45q&H~rT2N+);?R~Tr_!@uVlxTu} z8p!L0f%MI(wz`nlEuiiA>U_LuYtHEpx8~&ooF8y*u=+O8Sq_BLNxcX2^(aOE3iP*5 zXX(;@-*Y=`z&@eeCkE;XK%WhCk$yPEvS9E;PncDrq=IX38ePPV;(7)?6zm0PLk+0Y zX|93+e6FTD45{4S8F(2emw>)A0NbTZpL~>{9Y4jnilSY;)!)YZk&zVbnHlmS!N-c| zah+&EdvcoJwISY*Os6RGWsB;$PY7K10=K=spI261 z&Cn8VqmDXTf4tcqz;%b7>kC}dQ+jm_Yv1cCach2(p7^OSsWWsw{e9qhHZCcTa`naw z*8GPT=!;Un~-OPbkLyoIo4hPSW6D4Lzo1 zo7!KY<3e>bx$1priH-UoXr>SH!aemUlrZ`VyCvE>~en_(01!2T6^s=?z4sB z{omGSamm=ep?t35>vP&~38Jc0^aT6MDTP?M%Hri+&*EzE+Z;w8?9s1kkRW-12 zJK{FoHO93{?q@DnT$b5)&z0i*!g;4lZo7+im2F3=?x-5%{PRD2NFgp01paFYWE3y& z;NGZ_lu_mL--f^3`x70Tni8Lw@bTB+Z=5gt+3%Yb{wjHeik&{QUQ&0hq;63^vR;#@ zNOgFV7WHdqsLMIHH)$eeRQ-$UVW?3P?w9&@fmsR4{M`y>rLTZ?g4vdqpI6n?(`wXI zDWh1~%qo`qiz*^Pe7t7LWeW~Kjj6=QuWWq6lyaIafe%4qW!wM&fa{_UFK z$M`F4%@qhj^n ztujyiZHh5Tsp#E=d%398-@)C%U89i(WInI{RDAZssXrcZ_^c9tJpbugT=uh+9FLYh zCjX~9s`!ell1$sY&wiT!WUHEMy{h_utx^;X69oRb1pJEr>GQ>po?xOipS;2p?pnn9 zcrhc$s>(l|`1HyBGiCGX_{WE~pKprDA={b4{&j~kwF=w2x6pj+VdzI4_0hgNTJy(a zg^=f;Z@c{;1J5W@+QGegb;pd%?IWMc&fZU)wSD=UC$gaZGj>+{75cM&{#Ya2(edfC zegFIgtsOm}%ja{p=~EiJ&pP_k@juQcU+Lq|2f^p&+8?v}gSVA$kC4nW|Enkd*&B+o zcBo+CfAIMfP{7{3ewlBL!cSW?OwqnMz7!0||93Cb;Ov3gQ2I(w%d}E~ zv}yk+a%)d6)-td^tx{qs#Mj@&-$}z6Q>eDt98yKfqq{uSNx?L9+RwL`?%*?8XTp5; zc+om~F7+~KW<7yx&+5>jy_>^Bm&IVWZ!p^v@{W{i@2ZqP7t0>B$bfn}LPu*mL-~S! zv~0?BsM+`moJtx(57ih1t4?;KodV|2{%hC3v$GB1N$azu!=ev5yhiB}htCV{oq=_C zUa>g@bwDHSR(D-E}t#Da}sI9q`#eJHwTi43Kkwb%M7n{A7Q%MfW`?7;* z!RCY({ca7Z)O8H0StFb#4*OL%cyUis{pxe7y8J!AUdcFefK_YgsPH;Pe5lVJPgxAE zyZ3Hck+=#}JAZ}Y4lQW2mKxf*)e3g+a6dX~U_)K&W3F)I{5Yu8)eZ71-lSCgBGy84 z1K#@hf>qB}krJ~{lN&X=0|ffQZF(IR9auq!Y+c0eo2JpQq)6Jnosp%c*QSrw zR|A*IT1&~b{a{l&8)eP+tBKc;$E^OttGWWKJ@jLr`svp+{S5;5&4$Pu8q)2`BzkE> zY1rC3MOXh}G-#WZB6uAOC}podmNr}G7x;nf^eIM%R~$_W>B`UsC+ot=p>v?*b{DAa z=%IM*ih@2<+7vRy)lCpk#B=1KH z&(mg&9bH<#pXxN)Mk(v-F62$#@%Q1#Bh@ zToYL_e-~x$xM%Qt#}KSbd)lVM45lvjqowz=Li)vhj>D)$vi{x=2O&862XbI{9tGtIN|_?Ax)r=IzUClkV1{73Z#p7jufSeuc}jHUrm__Dxz*KjSV~ zjnB?j&)82VT^*}M*;1}n_n=swrQgfnsLi%;~zEdF=a|^;p}m z0ibS>8(Q64Oz61U`tGYw(E53%z>{j**`dbc;7PJfJx=FV1~yR0P3Hq_Gy0J&*qlMN z;-P>{Ntyi_bn^V2WNO<%?7Lsa&@DHv)8D`E@oKyDSe~$`hxRdw|Q1ee?vVQlcWD%)dpxWtbpL6XW%+~?5V_$2Ip2RMyQ$5k_cPYiR~)~DHC$H4 zf-*=`)Ayjx>O=F->_%941G=%tRC@i~D)vrwbgSL4AX+b1cfiktGou5x1cfyP>N@xZ zt?@+7*5mWha+-!P_G~dW@!Q`h7?HLvK;oIKXcxf`5c|u_x^zLpO-qmDy8do zr68HSrV+ONF--q;5XcoTvWM@BC=E{Tr#B0qVkdSS)1ll^&8##)9nE6z4}_V~7s=)i zuQ_Al*6|9C<#ZT2MpDif>dIcm9Hz5s?Pe&$mR3KeLZ@b%Szc8qK)tPZf4HB0_p}&6 zIf6wi3ebf&_vpBsaQ*UfzyRPnD&6-AeeF7sH1b~vxe^>9sMl^f(S0W!H$4ohRYt$N z)5EIO9aM;x?Y)@BdsWtRUEVRe7A-T}myZkG0@pjxdi-|OYpcliVf|UW*K$CeN>)}M zPMXY4hUW*j!}ncsDPy0m$M+F%c7-REU3Rl7ZZlzitJ=_XZDIYmgJT%#38-1)1lQMk z-9a<$b^RX9*iw_W?^_(lm#hA5_apFjNgieyQ$#=J#P1OEq6;lJ!I_*ru!n8gf6(%1 z=Z}`_%kF8hAE1881<)(cYT$LnK8I%J`uf%9Ce!G%D%?x(lAbQ@sYh0Hz@(`-KQSGS zBZ!{vPjSwoQ@YP$o7Ec9K{Z(S-JX^-@^m?P+#%NTVde!Ev?ZU8&v#e{CHC+UdZmpH zQpR1PSZ`XcOm*c>K84FL=37`lwXGfUT28Ti#r26JUB1CdLH)1bIAWo{4Fg?(1GI1V zi>}L)EpXdAiXKZVNv~=qqi*g8n?Lk`!_SwYuDeMN4h~^;2TdmZ*ZL6Gjay-g@d@kX(6mHO8su#a*79?rvQXXcV( z{v&DA?PVeN{X3}N=hJ?L7gFpOf^Fw>4?UUCfizv9CROBl==x1>oz4z@G!R zJSgf*PORI_q}nE0v(G%>`o?SNQpR=7+Ad|3zN5-Om1`}b#u-QD*OomfA3NALEXHXT zJ#(fhBo0deLyRqZ_4XXJ=~@B&CJv$=H>Lr+j^S-%m><{;+YIv?Zqa!+*-73^G{WHh z8rsG6I33@214SJU)ooU?zct^{hnbyZ0Vc5@Sx~A zkX>pi@A9^$7q5g8E;rl8r_#7J8_CrAt=Z9L3A%f?3)6u;`oT|kP4JwpXP$d%(Dt3j zYKK>JB}qjb^uJE3N8(*fFmk;soa=Lk-Y9>KEdOa9sHwzwdhBcJwX~uFuYV?YPv2*+ zD$k@iZ;@)n_Y%(&VH9;LmqC{M_;IcKbp{STP3fH`F7#mc08rjb^uWYMs3$g2K1WCG zpN2J@%mRPj#2oEc=r|Y7y?nv-ICqp+%Iuf)mv*g$2{FZ$Rku8}oAZ^^^LFFfg)Vgr z$2r`VHVb;ga@9Fbxz50K6XR>F5-YCrdBi$K%l0m>e{e5_dbew2k&Me(fqv2Se)!Mq z(uR2uq0WH2r;}kx*^ZX`rXqSAFLch)-CCTV_)#We`mCo(cXx07W&3Y1W$-en+(Ctmm%@C#CmUeC4rSUr zWMg~=@HvRsSGz7=I4J`^b1RQ%?T{mj!;?z*^>U5r-xNZ-p^tH)T z=@I>Ks07a4?zC>b{p6Xw8$Gi8C?x#c7~ABj@31eB8N1toKHZb+4eU2RQfIXV>dfZIv+gXkagx*)Jd98X8`?KW4bj!L{2!I&$s}=J|90T@qSa z*R@a`{r0XctZQ3btKynCf#Uj+#+l;jjo7NxH={M%ILSvB;hkH-`G%m*Aajn^0vt1z z$kU5hd4~%8o&j(jOj{H@#MjqQ*|`s$bLkk2JLskaM^{m>Pl!HYF5OqBIE*gQlJt9- z1PP^Pld68pa7{Oe-CA&p&VH2umlO99nbUN%Ul^O9a=$ELVB~rDT6S8{gL-cz^QypG6}sqizdhmnL-7 zp-uITZ8*an>ZIMHrV)DyU*o?yz5yJX4WYPpv#iXsU(460$C?i&s6Y97O~==IC5}vG zn;M*EBj9cD@prRPZx5oW3Bmdk>uOO^V=dpvq%%LRsO7B*V@h^_TMh5bH|;vB+oJqS*B^V98% zrdzO&saK~~6q!?l-%c`lVgSYbDVD=-w<~PHH1K*&AN;PSn`nKC^NzmRq<%qkPE-18 z4<9~<@-cyHKYJS8qYsxKcH8?jJh=COwR78mI(`Ykdk%gtM^n~Kzgqr+y9_I+nklsDdIxS#D3;M$DO{dDB|(PZS#>40lAaPZheC*7Y3$ck?L zLBsDq2+rTEYRE>)?`2SL&bb`@gU|gV zvL4R!2mhZQ*~h1+SUm}&=XCT`Y&LYlXZ-je-|9n~dxK9*>5Mx7xI5i6qd7e-1)Z&@ zq3h+uc&n@RtaSLK-;iUMfM=!`+-`~J+&JElWHR#S|9JmzXV*<~S?+e*Wt|hT>-InX z^-x?W2nYg#fFSVK1cuO&{qQB(+KldPW235R>*MTfR>_5B+1x=T7m-wIjgLyMCd-Bx zmFz9cF-Dbbl@M8uO;yS5WjQfLC8x`Ba=J=3%5rC;N^Yu>(o}MwET`d@Tq+6AbeCll zeu-2`m}Oa&)I%kg$jra3O2T~nte4c1V-4C!Dj)t{Q?{2~8ze#fo5>`m&saX`Ey^#iWw!NU9j4T4n2r)l9;2ePp?Zw@U6MsbURy zjiW3ZF_li1J7DE)WjQrYB^y){erbWVN>s@WBvqUmn_F3yy)oO?cmck}N-Ep}r;@!? zQmjgjR!PY!IZh>ER>f5kR>D^$byLYnSR-tu%G$8zNCvO;P)S&gd{{gDVzMSKAj@5l zOm4)4$HEa1=0hYEx{+7e77N9sSPwg_8zw3*sW8BaN_NQ1per85x`t&E&VyCP!aGZ< zBsJ1=mgP9iuu^7~HIgb>jTx80uXss*Sx(1q@scVLfzP+7KPo1MU=~|FU6W-wL4frtFgbaw33p_%zH60^8(r0VWqA8oQ4|FQmcbab%Ce+~{#4%Sol{C`J#{>n`C>6G;hKWWW?U&9mg zi}?}E2mjAJW^d2WW@hL(IALg(437%8v5{YK8OCnKvCz#Z%ehd74Qkmwf8juEyet#! zFESz`m2^@2q)Z`c8OxIb?{Pf+C0Yz{$uf4J z3H!~D_oFPAMo~3m5>!99YgCr=S+hnZl!iGQxg287ID9clLu;LLTk}FO^2Dq0XdJ%f z#iW=s3P>6jf*0}vD&Te4;1EfL>r+{7jMYGiMUhHJGKC}+1}u_gjWy?1D2)b8gI5{x zUo&eRu%~flE6X_Gd9`CzQi5z-wX(I5fmlk4Z0mp{hZofb2P2j(sc^+Cb0Nk(vZz1t zBpybUn2vedqJSDbaWtYxI%9u1VR3jEwPP&4p%Uf|!Qqc!u{0xIhyyS|E!+Aa%g)%4 zpvP40lmQ8sy7+h>*>j52A&GHM_%th6`gmx9%- zjcrKAp@E4opZeH8NE4L{uHR+bia3Dqt3PTmS+0|b1=X21${SvWdE?8RCfj;e=Awrn zb=H&~2pI=ZS!^aAE5)(cHZ6tarpmVV2rDg7w)H}USbr4mbZl!)6gtkl0ahwj=4v1f z(cuKa2Zz>L2|E-$p2P+7!FqQ=$#c#WD65cgk#R&J%aRbcl;ljH(?_QdxlT>KIwK5d#LuH0jiB)K#fJc$cg z8kUE`h5NWAd%IGYI5r*2K}~_;CdnnX7V&yP%9iQ#6{?)A{>RPK6lUe;LR=Oy*M$9@A`vj|xf}M(a@%0H3%fqL> zcuB5cy$mG+G1}TXd_Lrr$DwV;x00E4@wQ5v#MLZ9wB^!=o987N2Lo2v zHM8D)-ieWHRY|-xeD34jlA8+~_7X*%BFP;w3D%zWRj5QRMv z`w4Y(QV)r1y;QYiTOJ2S8lJ?Z3~u3jm-zh}=()(g1Y@nhD6T-cln zI;(7a__|b5scck|B*~J5Z#K?&NagmMjoTZyr}!pbf`A|(2nYg#fFK|U2m*qDARq_` z0)l`b@E=aV-d1vv@L>aglVv+wCw$}2|5dJoY}|&rIpUjm2?BzEARq_`0)l`bAP5Ko zf`A|(2nYiIJOUXuDqEZWc9F)|4k@_j4!6`9aR*zPF&Uja;f}OO+@5E`onA)VRo5Xk zJt$t9t1-=#n3`hCpZ`nt zw`}mQcnJc6fFK|U2m=3Q1lGC9E;fFS8E*NnxfJW)!q>;w-^<6>OXH{31nK;ObOC<2 zSu9# zP^(Rh^TqfybnESYw8ZQv!hIMk?=#?98E2?va8g#Ce93q`8}#vPnOgq>*+dUw?3H=+ z*C7-p&kx~o99Sbe14Td76yu?g=hJ)hm^tV-e*q-fx5d~R5mfbPIZXB11#Mf8q5FdK zC_`tBqEn1%@T1oS+Oy4YxOU3|X*X8@`uzutGs60Q?{1CTP|CcPp?`ViJZ=TSm^AE; zx;|^(elfq2%u4RDPpi5bopZnj2JdO7;*TG#G#+jfzzDcu5HuTNz zrB*-d1?CmvKB6(sh=FarZDTpHa5GH2{f=Rr8scBSvcC7`x)6PK0QbR7O$R0t_jeJX zuak&=or~)?HHZQpH{!HSI(c^X5cg4yzNAxlTnO?`)tI2KboA5Tl@9V=#55N|=lBHP9!ocI-{O*;; zer@?0$K(viSEvxn6?qxsOoH0#Y zKAU0xXy5PaOJ_A}LPM5>!n?c|I8!oX%PxX-1@vdm+~1uf+FJRQ2Q4Ihi!x^^`h4fH zk*u+?u+230)yqV=+<>B9(OvpU3o;o@Jr@5+c`O{1M>^-7y<(bnobmWDJhl+VB~mc% z7x(e5M?dIWF@_fC>}%7+UTuhv(;(Q*W?0C|w)FSLmEdx>6Xf;%F!*j$7CiTo5}Y|IC;4?u&&`w|N9yU)G7|3)}}i#)p{@mL<{ub-A_lL}C=yngzr3uJWb3VQU%uXkJGg)zZg0sY}a+?e;QO^0F_ z%c2!jShd}Pu^rgLCC|5F3=H_b<^gLwB-Arj-LRrjA&jXO3jQt^3C2xfF2z@3{IXLJ z^+N_sh#afK_!X!J92AUeL@}0(^8WcwYK=j3wFa%(_AWixqAy@PBF6iU_aW*yTByix zv{}5KVvGii=Q9E@&Jggp2gr_Kyc$b~Q@eCL-ov2Bry1%-9v_X*v3iW_0Ca}~#`Gyc zvCps#GYH0cgCavuVazyNeR@Pex}!>O3&s-xjLAh0s{$2_eaA2s8^gY5^Css9j0u3c zzdw)f0}JyHg`h3t3C>+~#If>hPv;#NS7{>GZFKstc6y_+HO2;+!v^-S;qg)c<&w`O z@S<;=f@8=UyDGpdAH`TimVLovb=VgS+eo+V^rfiZD8_t&>8g^H&v7Vw3}ZA|=TwX( zf&D(i8ZXB`}IK{}jkfX7qdGHtu)81Zq>3yVgyBWLU;lAfM*^*jb1#v&rP$ScJc zdT+?+N8YeucN$5*>CUlod<;bgh3sOYTm22_dWJfNSe0`XJv*T6A?qLKf|#J`X>z36~Y! zk#=k7%$u%CO4400O)kiO`BBGa510W(LL%wn;#1*miz#4Vy%2bpV$|2A0n7-M;bvGL z+Bf+J$aAMV-BkKJC^+by?os=y`nq@5z=iomF-?Cs_FDvKJc~oY$WY+*|Gr&KUF)6& z$)qYf$cFySn5{>cu1dbv^!~zgWOA{y1lw+zZStjgw(OvRTY{)f{Bw#7Xs7$DY2iG@ z_1Jdi$F|c;k7m-5^HPHl5pRk1bOvp8s1&QT=PYen(*^9;K7=<* z=fR`3TfzIvG?kafwx(aRl%!v)L zUkWji>*>1+ZOQrq@5ubl%W1_0L&4dz95h@!1pI~%VWF3sz_J-9=y_vb{juMvepUBZ zx|lQ1N!i7D=%ck-B8NM`*~r2~@86NWyJgYkotgsE9dFUmk5*wl=TL3kiwx_?usk}) zqas6wq~D=J%44@hkmUM-Ozp9Y49`D|>RvP@kDKm;I{`xg%b;OrtFxmn1%HZ|RS^ZlBvS ztKfd>=+*{ucihH`u3k!x{SGkZ+FqJEtpp6}W~+Zw^ewf_%ujDlli^O+rLZ=?4Q=*V zVhw`VQykm+HXS|TxyvlpeAHk%zv?i0rLm*pe&aSQZ*YKy&h)0&q90gpCDw&EMSIag z>(;>b;Hh+rLtj**#~V~hN1xm+U1L%-i?!9jiC!g!6c z><317P9DHKhSh;nLt6ru%~8!fAZ+q%IRCUP`O(dr9`4g0h7bB#hy6NLvFBpAAA14(~o1FI{fK)JK^NZvv#=*gwaY4O|z^xb`@LxifEjA9aMy=NJb^q3fw2DMMN+~$R+}_)5quc5 zpDq2ZEzFE>236+Tkm&LQAUt0)lJaIOUiXZ=ymXxbxMo+mC|74==*){p;x3 z{QW4Gxz;QY>L`7H>z)7{DYUKad-kFBww6=Y`fsi3elzLEx zN-0vRb)Cx?Whfhwv_reLAu^O9?a(e2ib^s?WO#OpC`zsCT+x;x4T@x_Oc|cAv5lLZ z$8%o2U)~Sz*Z2MK9vqHCNABac*0s*z|Ns3Rcb>4p)C{(6AB##t7M9kULj7n9`TVo3 z*tBGdwBOK;W@1Q`Fyby7K1HpHkh^v1|uB#^gK;tSws$1IO)0 z^Q+JJ%MXv((cnYi5R{LdS{6xJ6Ww{^f$e;1Ks`2D=O|HDT6o*lj^?e5c6!s8+mcYc zFu+1JnyNC*+6fSCyhqGP{CDapB|X}N7rSe+&4DRsS2YCsE@?r7{6c=OdLnBHo8Q_i z?#B{wsOf14{JkCSH1UQ;vl%0(uOZ90D-LvYkg4uj#Jq{h!-Nc=`cbxMA7rz60ekkW z4bsfRoqKkm{>2P_xX(d0=+7;<{f~j_E_YqkYve`@GtXt0r+b6B`vBOXE>^UnN22>N zJ$dZc44_>ENprLzqK}o#`n7>n>ru*(VmG$)Za&a^oi#Zy5`VGNS6__(15W!#VU;`w z_Qv*O!>{zkPW*}soi8lEIzvsG6V1w&X8`RO)Oc^gJGC?;^)QHM0|nGYKy1 z7bvcOI79So1*$wQ^OP~oE~`;XKvMjx~CV!*U)~e5tg)X#b|fIpwLRt zSze{kyv7jA67+vMN7g*tjcJw6Ls!(m=Cczp?qaM&&k$Zm{>rJwaNB4XbYmRTKfG7+ z-weR7M~#7I2ZpwP&9XZD#vesE{ei{>QIj5DIWe3N!z1@BN?jhAb>RkIx%sy5SG$X4o=i{krZ*J;152=}d31V-4?kW_UCnI1+?Mb1twaf4^0z zJ~{O^r+p1^Ze=KDCH07OD^ODvuU}j9PyRCWAl6Uq3JXS$!NRYxaP#AF?AI^d9X z|71QMKVeJQQ=U0^J*an&i#d#j*R^m;trI_)-yP^aNi+PV^!;Kk{}6ow){P1iHLrGd z(SbaNYxroAhD>#==*4ZqGf_3n@Ol({(~iLz_qx(PUk1CbRsi)GOf#3@MQIifJMxY# z7Rtk4K`}1U2bpZ z;@R%qnZR>CI)eOr5~evC!~Ii#F~34v4A+_qe|1Q888Aaf9i00V%9fk8o+IfBN$j1I zN3)eDYlpD;+bfh~4WTY#=TS{?(o;%ixVE}`kq^~P4z3Tj#^;8Pm{i@s*OZP!+L7#L z*lR|!ACfm*$NCG4SXjXIBB2HE+YaZ~Uf!VHk`Cb|Dihl5;6okxwU(>6F2v`lV36-D zdJO2A__T*c^6|{mq_bAFYRl0NZb@_xGR+qx{nUEhrEPC=>LWZ-d|dVacp1C(uz|j; zR9aJdkrkY5BPWl@UQd4&|1E&~X~?)4+D#kK8il zfD}9_P4X@tq23Dom1d`b{_J&I*UrFYSQ750$2gqBoS71!>YKjX^NlY%Oj`4?x-%q!Yd> z#hcCHRZTn!9VgC~_9(#wa_{<4OZ@g46QoIC^;i$A)oXt6YR?~(PU0< z{#mNzRCDY_+X}qf#4+qhsJbm=1<*A?Z{M9*u}(`qf2+TkH?+4$;_aAfM*0T_uAYGt zX0>gdPns#2NV5`$f4g!Wl}bJ7a#+ua z2Rb}tR0GO@ofb0r6sc|GS#&hc#y^u6;K0~ozQlJU+C=8Sx^H##tnJmm;=R#)^Cb*1 zy3PFxb2w=y5c*~AKy6%R@_^eU?NVGW8sTzx179{&&9Qg-SB8qq|kjz>G;+3+>C^0f~D4FSYDODg$}LDk7p){b&x;wG6Mq>cv$&+ zYyH$6J%qDN2dPwFX!g7a>A6T2119kwjSs<*ZIxgX;V~y|#$ME>hHDCF2=t^^&zUBc7LzV^mOM%M* zR*8=`5P$VQf98SsS%Ci+pL+0LKmSit=dlL;{{HLx34A|+?!$tR z(tqFoo%DZLe#k*Fi9{ ztAU!Yp$EivSkc@yigsunTYS6=|Fy*vduVeP&JMa%rc7jamNNs zeV#6u4IW(=->P9rf1=QlP76*T@d}Cfh-CXJ3a<9q#S1r-afcvBS@?jch6oV4pL99) z^dAUk6Z7z0a91Rjg1^>mgU+`qXdM_P$sE|%phF>`d*Ru0qSa>>aaUD&(x`-D%0A60)4~XxQCyEnZ zj@y@87EN611;YO(p6o0Wr^frsywPLzPNwjqTmp2}q-} z-i@;aSBLKXI>VIOv&4-p#o05#^T{WvSCf}BV!K#Xz7fLgdqH-gDFkis#f{RxpoV@@ zxOZ`}B6Rb_Pu<9$=^-%*MqCAmF|c;8I)a<^bxv%|Yc#FY<2> zZUJ!%F21I-f=>=2LP_l2)*L4wW-v*4fJ|bCXp9KVu3*Hii>N!6fjB_Y`J*zp(4<>NAkt2sL_G^morYA9s*CZ z7Q)(xiQ;{6{G|Tpgm%8$#Kbk>%QK@vWHH2_;pK;);NqF9F8x0Bg0Y=D;u7CBoIJlc zOYZh%55D*Nt(7f`%t843w^h+_Tf2+k%s{>xZ#sEWws4NyRh>dFyEN!)?Tf@biA6nD zL`ER;pGDPA;9bs8CjJ!OT`*4J9qE}$*SLvXa3GCemu6yswiTr8+s6w|ZUo9$ASsap zu?fNL(4_hkUwJl=?$Z|qf7G=(%A(SI8TnAbaKVi+@EX}hooauWxI_wE8@x&|87`Rg zsnjuO+Ia`DK|j2aIf#=-=feNoSg@HgqJ2t$fj-XD$z^o^S970?K40 z@-&QkUhrrXUYb~@@_gn@e!Vb7A`T{t*+g>z-S-S+#FEJy??k(;iD2k;5r^d5!==^} zgr`ID<*HyK#F!;IPu5jE*usmAINB zvc{!LfWg zlMps5eThPzQAw$+MdHNrw3?I9&D#U$Y+GM1*y+c?FGc+lBb>s5BW^HahCo>d%(#~% zI0+ZI5&2=kih!7%LN%sr{5&2%1-0iI1H5R)l`!I9qJ9LcRVspwk^DM@C&e&g5(@cs zM$Z*ex~vvH8YtJ{lzU3#Rhi%_H1}kh8+c*nefC-M02dvJW*Bjw&O5ls*#aH>S#Sbi zQ0FMYX?d)-z5JrqfCL2vF6!H{znu(PArSRR(84%keS-#W1ktaSF zr}iz z$+3ue?t0JOq8+O_>6Tyn+7# D^C&%N diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/data_level0.bin deleted file mode 100644 index ea3192e8ec5112eb8b4f2a7bde5d13bbb95e6ac0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6284000 zcmeFtfdBvi0Dz$VsTV1P3IhfV7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjxE(qc00000802p~jU9!M z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA hz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VqIS}Y^00961 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/header.bin deleted file mode 100644 index 3e0932a7d0033aedf7dad4109641188fbb1f6091..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 100 tcmZQ%K!6v_2sVh-BLU&Jz-XxSe<%=u@)e*ojQ_7mJJntEx_t^%8~|Sk4$lAp diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/length.bin deleted file mode 100644 index 1dc89f8e47e591af4c36a35046077f0ba1d1ef9d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4000 ocmeIuF#!Mo0K%a4Pi+hzh(KY$fB^#r3>YwAz<>b*1`NCh1`nVB0RR91 diff --git a/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin b/tests/data/chromadb/ff9cc21e-277f-4dc2-b92c-5d55f29311f2/link_lists.bin deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/data/embedding.txt b/tests/data/embedding.txt deleted file mode 100644 index c3cc374c2..000000000 --- a/tests/data/embedding.txt +++ /dev/null @@ -1 +0,0 @@ -0.025692760944366455, -0.004980836994946003, 0.008317121304571629, -0.013715836219489574, -0.02352249063551426, 0.024290846660733223, -0.02944018319249153, -0.00994145404547453, -0.009779694490134716, -0.027930431067943573, 0.015367128886282444, -0.008276681415736675, -0.014302213676273823, -0.009388776496052742, 0.00832386128604412, 0.005149336066097021, 0.027593431994318962, -0.010999629274010658, -0.0037170927971601486, -0.009934714064002037, -0.011660145595669746, -0.0008045837748795748, -0.003956361673772335, 0.006497330032289028, -0.011350107379257679, 0.00026391190476715565, 0.02470872551202774, -0.018925832584500313, 0.027418192476034164, -0.01903367228806019, 0.01558280736207962, -0.024061689153313637, -0.02167573943734169, -0.016270285472273827, -0.01949198916554451, 0.0019411110552027822, 0.0031981151551008224, -0.020031187683343887, 0.02670375630259514, 0.006214251276105642, 0.010089733637869358, 0.028092190623283386, 0.01414045412093401, 0.0019175211200490594, -0.01567716710269451, -0.0036530629731714725, -0.021567899733781815, -0.019896388053894043, -0.022781094536185265, -0.004411309491842985, 0.017240840941667557, 0.0107906898483634, -0.03022201918065548, -0.02000422775745392, -0.01438309345394373, 0.004252920392900705, 0.0053582750260829926, 0.0011028273729607463, -0.002139940159395337, -0.007750964257866144, -0.002953791292384267, 0.009193317033350468, -0.0018888761987909675, -0.013452977873384953, -0.0053515350446105, 7.161216672102455e-06, -0.027714751660823822, 0.017106041312217712, -0.0243986863642931, -0.013816935941576958, 0.011727545410394669, 0.008579980581998825, 0.017362158745527267, 0.026488076895475388, 0.03019505925476551, -0.024317806586623192, 0.019235869869589806, -0.011181607842445374, -0.007831843569874763, 0.0052234758622944355, 0.010689590126276016, -0.004522519186139107, -0.02803827077150345, 0.03092297725379467, 0.004411309491842985, -0.0002287376846652478, 0.020597344264388084, 0.015218849293887615, -0.0026269028894603252, -0.011997144669294357, -0.000433464243542403, 0.01713300123810768, 0.025962360203266144, 0.0021685848478227854, 0.014019135385751724, 0.01903367228806019, -0.003508153837174177, 0.007326345890760422, -0.015286249108612537, -0.021244380623102188, -0.02191837877035141, 0.006305240560323, -0.038336943835020065, -0.007050007116049528, -0.0011550621129572392, -0.01621636562049389, 0.00021378337987698615, -0.0058064828626811504, 0.011936484836041927, 0.0009318006923422217, -0.02216101810336113, 0.00864063948392868, -0.002648807829245925, -0.01830575428903103, -0.0008745109662413597, 0.0035452235024422407, 0.0331067256629467, -0.009227016940712929, 0.0033312295563519, -0.01769915781915188, 0.033942483365535736, 0.0009014708339236677, 0.02021990530192852, -0.006891618017107248, 0.029035786166787148, -0.016782522201538086, 0.01173428539186716, -0.010817649774253368, -0.010837869718670845, -0.020597344264388084, 0.012765500694513321, 0.018831472843885422, 0.007211766671389341, 0.025301842018961906, -0.012408282607793808, 0.008842838928103447, -0.017173441126942635, 0.016782522201538086, -0.01964026875793934, -0.0016723547596484423, -0.006847808137536049, 0.02189141884446144, 0.00038122947444207966, -0.001764186774380505, 0.0025746680330485106, 0.0011213623220100999, 0.01900671236217022, 0.004953877069056034, -0.002926831366494298, -0.021244380623102188, -0.000591011019423604, -0.0057357135228812695, 0.015407568775117397, -0.006274911109358072, 0.002881336724385619, 0.007225246634334326, -0.025113124400377274, 0.013365358114242554, -0.0008104812586680055, -0.015097529627382755, 0.010844609700143337, 0.006082821637392044, 0.005641353782266378, -0.007258946541696787, 0.016148965805768967, 0.03019505925476551, 0.021567899733781815, -0.0033952591475099325, -0.006325460504740477, 0.00723872659727931, 0.015528888441622257, -0.0021972297690808773, -0.02944018319249153, 0.00900459848344326, 0.021837498992681503, 0.01800919696688652, 0.011788205243647099, -0.012246523052453995, -0.013668656349182129, -0.002812251914292574, -0.008344081230461597, 0.020233385264873505, 0.004542739130556583, 0.00920005701482296, -0.02398080937564373, -0.0023421391379088163, -0.005742453504353762, -0.009098958224058151, 0.00953705608844757, -0.008317121304571629, 0.025059204548597336, -0.010238012298941612, 0.008916978724300861, -0.009948194026947021, -0.6888787150382996, -0.009193317033350468, 0.03343024477362633, -0.02873922698199749, -0.00014174997340887785, 0.023697730153799057, -0.0012350992765277624, 0.008654119446873665, -0.00398669159039855, 0.0304646585136652, 0.005186405964195728, -0.0051931459456682205, 0.02055690437555313, -0.021015223115682602, -0.014827931299805641, -0.02871226705610752, 0.00853280071169138, -4.6784862206550315e-05, -0.0058098528534173965, 0.02152745984494686, -0.03210921213030815, 0.010029073804616928, 0.0010969298891723156, -0.0033093246165663004, 0.006588319316506386, 0.0008395473705604672, 0.01066937018185854, -0.0343199223279953, 0.007326345890760422, 0.0034542339853942394, -0.007420705631375313, 0.03016809932887554, 0.011377067305147648, 0.004475339315831661, 0.029871542006731033, -0.02570624090731144, -0.04669450223445892, 0.014720091596245766, -0.023684250190854073, 0.022363215684890747, -0.018723633140325546, -0.017106041312217712, 0.015461488626897335, -0.007818363606929779, -0.003892331849783659, 0.011929744854569435, 0.022093618288636208, 0.0016386548522859812, 0.014693131670355797, 0.012880080379545689, -0.015879366546869278, 0.009267456829547882, -0.001182022038847208, 0.019653748720884323, -0.0015181779162958264, -0.030060261487960815, 0.024169526994228363, 0.010136912576854229, -0.009078738279640675, -0.008398001082241535, 0.005782893393188715, 0.010298672132194042, -0.02371121011674404, -0.029305383563041687, -0.020934343338012695, 0.007595944683998823, -0.014086534269154072, 0.014625731855630875, -0.0051324861124157906, -0.030491618439555168, 0.0009301156969740987, -0.009449436329305172, 0.006423190236091614, 0.014895331114530563, 0.015030130743980408, 0.006517549976706505, 0.004394459538161755, -0.017348678782582283, 0.012597001157701015, 7.60879265726544e-05, 0.007467885501682758, -0.004407939501106739, -0.009024818427860737, 0.0016453948337584734, 0.01888539269566536, -0.003915921784937382, -0.01426177378743887, -0.029062746092677116, 0.014720091596245766, 0.003663172945380211, 0.027175553143024445, 0.032756246626377106, -0.003065000753849745, -0.025908440351486206, -0.020139027386903763, 0.014531373046338558, -0.013270998373627663, -0.023657290264964104, 0.015245809219777584, -0.0029655862599611282, 0.007191546726971865, -0.007279166020452976, 0.003912551794201136, -0.013452977873384953, 0.03216313198208809, -0.007764444220811129, -0.007528544869273901, 0.02085346356034279, 0.0363149531185627, -0.01213868334889412, 0.0026926174759864807, 0.005985092371702194, -0.03135433420538902, -0.00711740693077445, -0.015865886583924294, -0.031300414353609085, 0.01961330883204937, -0.007663344498723745, 0.026717236265540123, -0.0097931744530797, 0.018211396411061287, 0.007346565835177898, 0.024964844807982445, -0.039604056626558304, -0.004445009399205446, 0.03609927371144295, 0.007528544869273901, -0.021028703078627586, 0.00466068834066391, 0.007535284850746393, -0.01730823889374733, 0.0031408255454152822, -0.011309667490422726, -0.01786091737449169, 0.029790662229061127, 0.011046809144318104, -0.007831843569874763, -0.008519320748746395, 0.019545909017324448, -0.02601628005504608, -0.01861579343676567, -0.007919463329017162, -0.0024853635113686323, -0.028874026611447334, -0.019599828869104385, -0.009456176310777664, -0.005092046223580837, 0.0046775382943451405, 0.0014196059200912714, -0.0018686563707888126, -0.013816935941576958, -0.026380237191915512, -0.019047152251005173, 0.004286620300263166, 0.013938255608081818, -0.01400565542280674, -0.008337341248989105, -0.020745623856782913, 0.011464687064290047, -0.02218797616660595, -0.006675939075648785, -0.007097186986356974, -0.018090076744556427, 0.00785206351429224, -0.009719034656882286, -0.009456176310777664, 0.007103926967829466, 0.03437384217977524, -0.013001400046050549, -0.05828724801540375, -0.0003245716216042638, -0.007231986615806818, -0.001015207846648991, 0.026501556858420372, -0.025220962241292, -0.003218335099518299, -0.006962387822568417, -0.0019798658322542906, -0.006301870569586754, -0.012981180101633072, 0.0017439669463783503, -0.00012131944095017388, -0.004650578368455172, -0.005513294599950314, 0.013500157743692398, 0.009887534193694592, 0.02194533869624138, 0.017833957448601723, -0.004785377997905016, 0.03283712640404701, 0.0023202341981232166, 0.011289447546005249, 0.012165643274784088, 0.009739254601299763, 0.01144446711987257, 0.0006099671591073275, 0.016148965805768967, 0.0029419963248074055, -0.0012822790304198861, 0.009166357107460499, 0.037150707095861435, -0.0011314722942188382, 0.02286197431385517, -0.005489704664796591, 0.002625217894092202, 0.0035452235024422407, 0.022848494350910187, -0.013439497910439968, 0.012078024446964264, 0.017510438337922096, 0.015137969516217709, -0.0500914491713047, -0.014962730929255486, -0.005462744738906622, 0.0028274168726056814, -0.0012308867881074548, -0.00835082121193409, 0.02228233590722084, -0.04771897941827774, 0.016135485842823982, 0.012691360898315907, 0.011808425188064575, 0.04362107813358307, -0.01579848676919937, -0.01606808602809906, -0.0028931316919624805, 0.019208911806344986, 0.02895490638911724, 0.009038298390805721, -0.0066119092516601086, -0.001964700873941183, -0.013156418688595295, 0.014639211818575859, -0.004013651516288519, 0.01173428539186716, 0.004552849102765322, 0.009651634842157364, -0.013992175459861755, 0.009408996440470219, -0.002134885173290968, 0.010736769996583462, 0.019060632213950157, 0.01320359855890274, -0.004825817421078682, 0.042273085564374924, -0.015515408478677273, 0.028577467426657677, 0.021810539066791534, -0.007191546726971865, 0.014625731855630875, -0.0012342567788437009, -0.019626788794994354, -0.03162393346428871, 0.0032048551365733147, -0.011484907008707523, 0.006679309066385031, 0.0016133800381794572, -0.00010225795995211229, -0.0037137228064239025, 0.029817622154951096, 0.0326753668487072, 0.0159467663615942, 0.036153193563222885, 0.002785291988402605, 0.014194373972713947, -0.006467000115662813, -0.014720091596245766, -0.012677880935370922, 0.01725432090461254, -0.020260345190763474, 0.005159446038305759, -0.0029723262414336205, 0.010568271391093731, -0.008047522976994514, 0.017591318115592003, 0.013520377688109875, 0.0033699844498187304, 0.0152323292568326, 0.0014676281716674566, 0.018049636855721474, -0.03537135571241379, -0.01247568242251873, 0.018292274326086044, 0.01910107210278511, -0.0033059546258300543, -0.003932771738618612, 0.0033817794173955917, 0.015218849293887615, -0.011862345039844513, 0.03534439578652382, 0.021837498992681503, -0.021540939807891846, 0.01279246062040329, 0.004461859352886677, 0.00847214087843895, 0.0030110811349004507, 0.018925832584500313, 0.024034729227423668, 0.009186577051877975, -0.025409681722521782, 0.00523695582523942, -0.014019135385751724, -0.022174498066306114, -0.030599458143115044, 0.024883965030312538, 0.0006335570360533893, -0.002844266826286912, -0.019559388980269432, -0.00037048765807412565, -0.010116692632436752, -0.00829016137868166, -0.0005076039233244956, 0.004054091405123472, -0.015097529627382755, 0.022336255759000778, 0.0034643439576029778, -0.017321718856692314, 0.0018366414587944746, 0.0284696277230978, -0.0020910752937197685, 0.005189775954931974, -0.017496958374977112, -0.003216650104150176, 0.001592317596077919, 0.048312097787857056, 0.029817622154951096, 0.009240496903657913, 0.020085107535123825, -0.019667228683829308, -0.012468942441046238, -0.025625361129641533, -0.014625731855630875, 0.00926071684807539, 0.005250435788184404, 0.008687819354236126, -0.0042562903836369514, 0.008081222884356976, 0.0023775240406394005, 0.008802399039268494, 0.013021619990468025, 0.009119178168475628, -0.011923004873096943, 0.001608325052075088, -0.006305240560323, -0.016270285472273827, 0.006227731239050627, 0.026272397488355637, 0.011134427972137928, 0.007467885501682758, 0.002615107921883464, 0.00426640035584569, 0.02718903310596943, 0.004232700448483229, -0.002045580418780446, -0.008869798853993416, 0.007427445612847805, 0.015596287325024605, -0.013244038447737694, 0.0023539341054856777, 0.04014325514435768, 0.00963815487921238, 0.02774171158671379, 0.010709810070693493, 0.006059231702238321, 0.01854839362204075, -0.0105952313169837, 0.005604283884167671, 0.0010598601074889302, 0.02652851678431034, -0.010743509978055954, -0.013190118595957756, 0.031300414353609085, -0.008081222884356976, 0.002584778005257249, 0.03464343771338463, 0.012441982515156269, -0.010372811928391457, -0.00261847791261971, 0.0017962016863748431, 0.006814108230173588, -0.007750964257866144, -0.013115978799760342, 0.008896758779883385, -0.03701590746641159, -0.023576410487294197, -0.0037204627878963947, 0.0211904626339674, 0.009658374823629856, -0.004876367282122374, 0.006231101229786873, -0.03356504440307617, -0.017901357263326645, -0.020502984523773193, 0.0068376981653273106, -0.005671683698892593, -0.023118093609809875, -0.03254057094454765, 0.0026841924991458654, 0.0304646585136652, 0.008209281601011753, 0.029521062970161438, -0.007798143662512302, 0.013796715997159481, 0.006258061155676842, 0.0026606025639921427, -0.025719720870256424, 0.003177895210683346, 0.002901556435972452, 0.01035259198397398, -7.677245594095439e-05, -0.031111694872379303, 0.010965929366648197, 0.0031256605871021748, 0.011848865076899529, -0.0029200913850218058, 0.021810539066791534, -0.008748479187488556, -0.04305491968989372, 0.016984721645712852, -0.007211766671389341, -0.00218206481076777, 0.02925146371126175, 0.0038013423327356577, -0.020637784153223038, -0.011363587342202663, -0.03806734457612038, -0.0043304297141730785, -0.01813051663339138, -0.003044780809432268, 0.0020927602890878916, 0.006726488936692476, 0.0010059403721243143, -0.022080138325691223, -0.02427736669778824, 0.0142348138615489, -0.02470872551202774, -0.0005771098076365888, -0.008000343106687069, 0.03391552343964577, 0.018480993807315826, 0.016108525916934013, 0.019047152251005173, 0.026784636080265045, 0.007501585409045219, 0.010521091520786285, -0.02944018319249153, 0.028307868167757988, 0.020691704005002975, -0.011714065447449684, -0.002397743985056877, 0.0027195774018764496, -0.034427762031555176, 0.0016529773129150271, -0.009948194026947021, 0.005732343532145023, 0.01903367228806019, 0.0077307443134486675, -0.021352220326662064, 0.0013311437796801329, -0.015636727213859558, -0.004269770346581936, 0.013722576200962067, -0.003845152212306857, -0.0028223618865013123, -0.0034542339853942394, -0.0019849208183586597, -0.016620762646198273, -0.013297958299517632, 0.008512580767273903, -0.03065337799489498, -0.0015341853722929955, 0.022969814017415047, -0.00829016137868166, 0.01579848676919937, -0.012826160527765751, 0.007407225668430328, -0.01447745319455862, -0.022565415129065514, -0.021365700289607048, -0.01755087822675705, -0.004916807170957327, -0.011646665632724762, 0.01985594816505909, 0.024358246475458145, 0.0391726978123188, -0.008815879002213478, 0.021540939807891846, -0.012428502552211285, 0.012441982515156269, 0.006568099372088909, 0.01679600216448307, -0.004859517328441143, -0.012711580842733383, -0.00723872659727931, 0.03434688225388527, 0.008189061656594276, 0.010305412113666534, -0.0024364986456930637, -0.0010716550750657916, 0.014922291040420532, -0.017928317189216614, -0.019316749647259712, -0.021473539993166924, -0.0284696277230978, 0.021972298622131348, 0.00028497431776486337, 0.004057461395859718, 0.0056750536896288395, -0.032028332352638245, -0.019289789721369743, 0.01897975243628025, 0.0105952313169837, 0.0007190704345703125, -0.007629644591361284, 0.021756619215011597, -0.015960246324539185, 0.009530316106975079, -0.012428502552211285, 0.017537398263812065, 1.3707016250918969e-06, -0.021500499919056892, -0.0242234468460083, 0.008991118520498276, 0.00243986863642931, 0.024627845734357834, -0.013830415904521942, 0.010905269533395767, 0.0035351135302335024, -0.005230215843766928, 0.012812680564820766, -0.0010859774192795157, 0.008344081230461597, -0.006167071405798197, -0.0030211908742785454, -0.0031745252199470997, -0.017968757078051567, -0.0023556191008538008, 0.014558332040905952, 0.0033076396211981773, 0.002446608617901802, -0.017941797152161598, 0.022835014387965202, -5.823754327138886e-05, -0.012597001157701015, 0.01010995265096426, 0.0007725689210928977, 0.03259448707103729, 0.02567928098142147, 0.030275939032435417, 0.00991449411958456, 0.005112266167998314, -0.01182190515100956, -0.010743509978055954, 0.003723832778632641, -0.0029723262414336205, 0.023212451487779617, 0.051008082926273346, -0.002589832991361618, 0.006345680449157953, 0.014639211818575859, 0.012745280750095844, -0.01891235262155533, -0.014490933157503605, 0.025086164474487305, 0.0017254319973289967, -0.010244752280414104, 0.00809470284730196, 0.005482964683324099, -0.01937066949903965, 0.015070569701492786, -0.007454405538737774, -0.004050721414387226, 0.016135485842823982, -0.02376512996852398, -0.00884957890957594, 0.015124489553272724, -0.02449304610490799, 0.0057660434395074844, 0.014046095311641693, 0.0004201949341222644, 0.013655176386237144, -0.016512922942638397, -0.004532629158347845, -0.002337084151804447, 0.008768699131906033, 0.01073003001511097, 0.017321718856692314, 0.0026504925917834044, -0.009550536051392555, 0.006682679057121277, -0.026744196191430092, -0.010548051446676254, 0.002079280326142907, 0.027269912883639336, -0.014881851151585579, -0.026973355561494827, 0.000777202658355236, 0.014693131670355797, 0.005890732631087303, 0.013365358114242554, -0.003211595118045807, -0.003181265201419592, 0.0025258034002035856, -0.002244409639388323, 0.01047391165047884, -0.01570412702858448, -0.03043769858777523, -0.008802399039268494, -0.002785291988402605, -0.008613680489361286, 0.006595059297978878, -0.009024818427860737, 0.008674339391291142, -0.02286197431385517, -0.010150392539799213, 0.0049673570320010185, 0.011437727138400078, -0.005499814637005329, 0.0018012566724792123, 0.029116664081811905, 0.026097159832715988, 0.023697730153799057, -0.017092561349272728, 0.001610010047443211, 0.003814822295680642, 0.028658347204327583, -0.01000211387872696, 0.01338557805866003, -0.012212823145091534, -0.03779774531722069, 0.029170583933591843, -0.0010767100611701608, -0.02691943570971489, -0.028604427352547646, 0.011073769070208073, -0.007144366856664419, -0.028685307130217552, -0.011714065447449684, -0.009193317033350468, -0.0034811939112842083, 0.011983664706349373, 0.017469998449087143, 0.01924934983253479, 0.013345138169825077, -0.016998201608657837, 0.018440553918480873, 0.027849551290273666, -0.008768699131906033, -0.003555333474650979, -0.01017061248421669, 0.005075196269899607, -0.011997144669294357, -0.032756246626377106, -0.010446951724588871, -0.014571812003850937, 0.002780237002298236, -0.009968413971364498, -0.0034508639946579933, -0.009847094304859638, 0.0073196059092879295, 0.0034643439576029778, 0.02701379545032978, -0.00269767246209085, -0.027418192476034164, -0.015259289182722569, 0.017631758004426956, 0.005270655732601881, 0.004583178553730249, -0.01934370957314968, -0.006271541118621826, -0.013695616275072098, -0.018480993807315826, -0.036180153489112854, -0.013237298466265202, -0.005695273634046316, 0.04526562988758087, -0.0021214052103459835, -0.005102156195789576, 0.002330344170331955, -0.017416078597307205, -0.022754134610295296, -0.0177665576338768, -0.006072711665183306, -0.0038620021659880877, 0.006615279242396355, -0.006089561618864536, -0.007137626875191927, 0.02106914296746254, -0.005105526186525822, 0.013715836219489574, -0.022632814943790436, 0.016876881942152977, -0.026272397488355637, -0.013803455978631973, -0.0038620021659880877, -0.0019849208183586597, -0.042812280356884, 0.004974097013473511, 0.029170583933591843, -0.01757783815264702, 0.0009747679578140378, 0.029817622154951096, -0.005432414822280407, -0.009152877144515514, -0.005075196269899607, -0.0036362132523208857, 0.02376512996852398, 0.01798223704099655, -0.01852143369615078, -0.003615993307903409, 0.0013033414725214243, 0.0009368556784465909, -0.01664772257208824, 0.016728602349758148, -0.0211904626339674, 0.0052538057789206505, -0.009604455903172493, 0.006018792279064655, -0.020570384338498116, -0.020772583782672882, 0.0015813651261851192, -0.02225537598133087, 0.012994660064578056, -0.00494039710611105, 0.00832386128604412, 0.013028359971940517, -0.004232700448483229, -0.02616455778479576, -0.015137969516217709, 0.014437013305723667, -0.03380768373608589, -0.009435956366360188, 0.008842838928103447, -0.03019505925476551, -0.01211172342300415, -0.0019006711663678288, -0.017443038523197174, 0.005166186019778252, -0.004468599334359169, 0.0030953306704759598, -0.00967185478657484, -0.006271541118621826, 0.00876195915043354, 0.007872283458709717, 0.008606940507888794, -0.0038552621845155954, -0.014302213676273823, -0.0187640730291605, 0.01411349419504404, -0.006200771313160658, -0.012226303108036518, -0.0076363845728337765, 0.006170441396534443, 0.002357304096221924, -0.04070940986275673, 0.0008176424307748675, 0.00711740693077445, 0.022700214758515358, -0.027687791734933853, 0.02825394831597805, 0.22085529565811157, -0.011073769070208073, -0.0009056833223439753, 0.04289316013455391, 0.000397658150177449, 0.004276510328054428, 0.031084734946489334, -0.002760017290711403, -0.001194659504108131, 0.008499100804328918, 0.005230215843766928, -0.0023202341981232166, -0.0011045123683288693, 0.004478709306567907, 0.0032874196767807007, -0.02061082422733307, -0.023050693795084953, -0.013607996515929699, -0.00835082121193409, 0.009105698205530643, 0.008620420470833778, 0.0035452235024422407, -0.002480308525264263, -0.0066624591127038, 0.0560765415430069, 0.018831472843885422, 0.009294416755437851, 0.0051931459456682205, 0.008337341248989105, -0.025531001389026642, -0.005452634766697884, -0.01535364892333746, 0.006581579335033894, 0.016688162460923195, 0.001151692122220993, 0.007703784387558699, -0.0026066829450428486, -0.008836098946630955, 0.0067332289181649685, 0.012388062663376331, 0.010238012298941612, 0.015124489553272724, -0.0008572397637180984, -0.014153934083878994, 0.012765500694513321, 0.02303721383213997, -0.010756989941000938, -0.016418563202023506, -0.031030816957354546, 0.00626817112788558, -0.025423161685466766, -0.013041839934885502, 0.026663316413760185, 0.00938203651458025, 0.0005804797983728349, 0.005331315100193024, 0.008148621767759323, -0.01120182778686285, 0.015016650781035423, -0.004653948359191418, -0.02104218304157257, 0.014059574343264103, -0.019357189536094666, 0.033511124551296234, 0.005071826279163361, -0.0061872913502156734, -0.04310883954167366, 0.005782893393188715, 0.027849551290273666, -0.0006129159010015428, -0.018507953733205795, -0.023509010672569275, -0.0007687776815146208, 0.021446580067276955, -0.016054606065154076, -0.0035991433542221785, 0.017496958374977112, 0.0243986863642931, 0.04092508926987648, 0.026474596932530403, -0.028577467426657677, -0.01728127896785736, 0.016135485842823982, -0.012374582700431347, 0.0006074396660551429, -0.012940740212798119, -0.006473740097135305, -0.0007304440950974822, -0.016593802720308304, -0.005145966075360775, -0.005921062547713518, -0.010251492261886597, -0.02677115611732006, -0.018440553918480873, 0.0029032414313405752, 0.011720805428922176, 0.012441982515156269, 0.028442667797207832, -0.0008947308524511755, -0.01740259863436222, -0.02549056150019169, 0.06793888658285141, 0.0022258746903389692, 0.009921234101057053, -0.03488607704639435, -0.004849407356232405, -0.004131600726395845, -0.0036598029546439648, 0.0383908636868, -0.012637441046535969, -0.002766757272183895, -0.03820214420557022, 0.0025224334094673395, -0.0063524204306304455, -0.004566328600049019, 0.02024686522781849, -0.021999258548021317, 0.0057357135228812695, -0.012051064521074295, 0.005304355174303055, -0.001956275897100568, -0.029898501932621002, -0.030734257772564888, -0.004387719556689262, 0.009483136236667633, -0.04847385361790657, -0.001224989304319024, -0.0034811939112842083, 0.0067635588347911835, -0.03507479652762413, 0.0044820792973041534, -0.007649864535778761, 0.0034205340780317783, 0.008654119446873665, -0.014989690855145454, -0.024169526994228363, -0.00214162515476346, -0.011242267675697803, -0.00017313295393250883, -0.03671934828162193, 0.013149678707122803, -0.021783579140901566, 0.0024213336873799562, 0.0195863489061594, 0.024654805660247803, -0.017780037596821785, 0.004839297384023666, -0.01643204316496849, -0.035452235490083694, -0.028334828093647957, 0.0005948022590018809, -0.029844582080841064, -0.0006647293921560049, -0.013075538910925388, 0.02388644963502884, 0.004579808562994003, -0.002925146371126175, -0.02228233590722084, 0.004172040615230799, -0.013816935941576958, -0.021298300474882126, -0.004498929250985384, 0.00028286807355470955, -0.00956401601433754, -0.012010624632239342, -0.00591095257550478, -0.1752391904592514, 0.030761217698454857, 0.019667228683829308, -0.01937066949903965, 0.018454033881425858, 0.009280936792492867, 0.00202367571182549, 0.008842838928103447, -0.04170692712068558, 0.002621847903355956, 0.03971189633011818, -0.030087219551205635, -0.03531743586063385, 0.004091160837560892, -0.02036818489432335, 5.897472510696389e-05, 0.015744566917419434, 0.007932943291962147, 0.007602684665471315, 0.034697357565164566, 0.01294748019427061, -0.010433471761643887, -0.017955277115106583, -0.0035351135302335024, -0.006625389214605093, -0.0351017564535141, -0.03922661766409874, 7.645651930943131e-05, 0.0014448808506131172, -0.0038552621845155954, -0.007501585409045219, 0.008499100804328918, 0.02048950456082821, 0.01951894909143448, -0.01414045412093401, -0.012994660064578056, 0.0004629516042768955, -0.01560976728796959, -0.006113151554018259, 0.03216313198208809, -0.0031998001504689455, 0.03378072381019592, 0.018184436485171318, 0.007420705631375313, -0.0026403828524053097, 0.013136198744177818, 0.003250350011512637, -0.0007557189674116671, 0.007306125946342945, -0.024937884882092476, 0.016539882868528366, -0.022727174684405327, -0.01450441312044859, -0.013749536126852036, 0.011120948009192944, 0.008216021582484245, 0.00327056972309947, 0.009516836144030094, -0.017901357263326645, 0.010500871576368809, -0.0048729972913861275, -0.02364381030201912, 0.016499442979693413, -0.010548051446676254, -0.012893560342490673, -0.00773748429492116, -0.0023404541425406933, 0.023145053535699844, -0.008896758779883385, 0.004758418072015047, 0.0036598029546439648, -0.03065337799489498, 0.005365015007555485, -0.002921776380389929, 0.025126604363322258, -0.007899243384599686, -0.01997726783156395, 0.034239042550325394, -0.004953877069056034, -0.011255747638642788, 0.004387719556689262, 0.03305280581116676, -0.0018939311848953366, 0.008916978724300861, 0.013075538910925388, -0.007427445612847805, -0.011228787712752819, 0.015380608849227428, -0.014423533342778683, -0.015758046880364418, 0.01100636925548315, -0.0031627302523702383, -0.021379180252552032, -0.0011896045180037618, 0.009725774638354778, 0.011478167027235031, 0.0211904626339674, -0.01573108695447445, 0.016903841868042946, -0.014302213676273823, 0.015407568775117397, 0.01461225189268589, -0.023684250190854073, 0.006709638983011246, 0.02325289137661457, 0.02318549156188965, 0.005654833745211363, 0.02925146371126175, 0.017510438337922096, 0.0038552621845155954, -0.014949250966310501, 0.008182321675121784, 0.019384149461984634, 0.019357189536094666, 0.004788747988641262, 0.02801131084561348, 0.003875482128933072, -0.020961303263902664, -0.011707325465977192, -0.006628759205341339, 0.034966956824064255, -0.004613508470356464, -0.0173351988196373, -0.0061569614335894585, -0.013466457836329937, 0.001577995135448873, -0.09322724491357803, -0.008424961008131504, 0.003358189482241869, 0.013432757928967476, 0.018925832584500313, 0.0008736684685572982, -0.003612623317167163, -0.0027718122582882643, 0.01585240662097931, 0.01064241025596857, -0.009826874360442162, -0.022336255759000778, -0.0032368700485676527, -0.010575011372566223, -0.005991832353174686, -0.01982898823916912, -0.010069513693451881, -0.013756276108324528, 0.003477823920547962, 0.009307896718382835, 0.001196344499476254, -0.021379180252552032, 0.00947639625519514, -0.019168471917510033, 0.005371754989027977, -0.007501585409045219, -0.027660831809043884, -0.00202367571182549, 0.013068798929452896, 0.003403684124350548, -0.007299385964870453, -0.031111694872379303, -0.014800971373915672, -0.0023775240406394005, 0.0031222905963659286, 0.012313922867178917, -0.03262144699692726, -0.024883965030312538, 0.0142348138615489, 0.0028897617012262344, 0.002163529861718416, 0.001998400781303644, 0.025477081537246704, -0.023091133683919907, 0.003046465804800391, -9.16741046239622e-05, -0.010298672132194042, 0.0016723547596484423, 0.010500871576368809, -0.008438440971076488, -0.005482964683324099, 0.000617970887105912, -0.009631414897739887, -0.011754505336284637, -0.004498929250985384, -0.013769756071269512, 0.0006600957131013274, 0.014437013305723667, -0.005887362640351057, 0.005830072797834873, 0.01803615689277649, -0.005240325815975666, -0.0032806796953082085, 0.0069286879152059555, 0.017941797152161598, 0.011835385113954544, -0.03458951786160469, -0.019815508276224136, 0.001964700873941183, -0.024870485067367554, -0.006288390606641769, 0.02473568543791771, -0.014935771003365517, -0.0007986862910911441, -0.018063116818666458, -0.027350792661309242, -0.011363587342202663, 0.013938255608081818, 0.005749193485826254, 0.0034340140409767628, 0.005024646874517202, -0.016256805509328842, -0.004653948359191418, -0.013500157743692398, -0.0008837783825583756, 0.013601256534457207, -0.007110666949301958, -0.01247568242251873, 0.020395144820213318, -0.03361896425485611, -0.00046842783922329545, 0.02801131084561348, 0.018966272473335266, -0.021783579140901566, -0.011093988083302975, 0.015933286398649216, -0.0159467663615942, 0.020961303263902664, -0.006136741489171982, 0.004734828136861324, -0.018844952806830406, 0.014086534269154072, -0.05801765248179436, 0.015501928515732288, -0.023913409560918808, -0.021864458918571472, -0.002763387281447649, 0.0009596030577085912, 0.01922239176928997, 0.004431529436260462, -0.002188804792240262, -5.686848453478888e-06, -0.008114922791719437, 0.007825103588402271, -0.012131943367421627, -0.003044780809432268, -0.024048209190368652, 0.016512922942638397, 0.0234011709690094, -0.012394802644848824, 7.687776815146208e-05, 0.003184635192155838, -0.018440553918480873, -0.0013404112542048097, 0.006510809995234013, 0.009483136236667633, 0.002879651729017496, -0.006082821637392044, 0.007784663699567318, 0.021365700289607048, -0.029548022896051407, -0.013075538910925388, 0.041787806898355484, -0.011046809144318104, -0.0017119520343840122, 0.03022201918065548, -0.0016159075312316418, -0.006709638983011246, 0.013345138169825077, 0.012367842718958855, 0.01163992565125227, 0.032270971685647964, -0.0011314722942188382, -0.03529047593474388, -0.009280936792492867, -0.0407363697886467, -0.013857375830411911, -0.008135142736136913, -0.017227360978722572, 0.007279166020452976, 0.006510809995234013, 0.00022368271311279386, 0.006625389214605093, 0.004610138479620218, -0.0010876624146476388, -0.006948907859623432, 0.03882221877574921, -0.01866971328854561, -0.0025207484140992165, -0.0220127385109663, 0.007090447004884481, -0.031084734946489334, 0.041760846972465515, -0.0008618735009804368, 0.018817992880940437, 0.004664058331400156, -0.014693131670355797, -0.0033177495934069157, -0.002930201357230544, 0.0201929472386837, 0.007804883643984795, -0.03254057094454765, -0.02342813089489937, -0.002923461375758052, -0.005290875677019358, -0.010938969440758228, 0.015838926658034325, 0.0169442817568779, -0.0021500501316040754, -0.00321328011341393, 0.002048950409516692, 0.010682850144803524, 0.011040069162845612, -0.0004273561353329569, -0.01532668899744749, 0.01508404966443777, 0.041059888899326324, -0.0029049264267086983, -0.010932229459285736, 0.00011489540338516235, -0.027391232550144196, -0.0031677852384746075, -0.0224036555737257, 0.014760531485080719, 0.016472483053803444, 0.024209966883063316, 0.014032615348696709, 0.023630330339074135, -0.03272928670048714, -0.014490933157503605, 0.019411109387874603, 0.007703784387558699, 0.00597161240875721, -0.0010472226422280073, 0.0040338714607059956, -0.03685414791107178, -0.0017304869834333658, 0.018480993807315826, -0.03747422620654106, -0.028685307130217552, -0.0031172356102615595, 0.02997938171029091, 0.005452634766697884, 0.014437013305723667, 0.030356818810105324, 0.007548764813691378, -0.018117036670446396, -0.005031386855989695, -0.014275253750383854, -0.026272397488355637, -0.01866971328854561, 0.03019505925476551, 0.016445523127913475, -0.005974982399493456, -0.009523576125502586, -0.012502642348408699, 0.025504041463136673, 0.005321205127984285, 0.017240840941667557, -0.009752734564244747, 0.0011205198243260384, -0.02358989045023918, -0.01400565542280674, -0.0038316722493618727, -4.798278496309649e-06, -0.015124489553272724, -0.003356504486873746, -0.013176638633012772, -0.014315693639218807, 0.013149678707122803, 0.006948907859623432, 0.0540815107524395, 0.01706560142338276, -7.092764280969277e-05, 0.018507953733205795, 0.009328116662800312, -0.006349050439894199, 0.020408624783158302, -0.020637784153223038, -0.02570624090731144, -0.001013522851280868, 0.015758046880364418, -0.0008585035102441907, 0.01888539269566536, -0.008465400896966457, -0.0009233757155016065, -0.022605855017900467, 0.0023758390452712774, 0.03208225220441818, 0.006372640375047922, 0.0003909181978087872, 0.007400485686957836, 0.021379180252552032, 0.010507611557841301, -0.0010952448938041925, -0.027054235339164734, -0.011902784928679466, 0.01786091737449169, -0.007939683273434639, 0.005250435788184404, -0.0004490504215937108, 0.03237881138920784, 0.014207853935658932, -0.015407568775117397, -0.015933286398649216, -0.008451920934021473, -0.007157846819609404, 0.009119178168475628, -0.006463630124926567, 0.008923718705773354, 0.007339825853705406, 0.0258275605738163, -0.005941282492130995, -0.018602313473820686, -0.02155441977083683, -0.056130461394786835, -0.00016586641140747815, -0.007879023440182209, 0.010372811928391457, -2.594624675111845e-05 diff --git a/tests/data/lancedb/requirements.lance/_latest.manifest b/tests/data/lancedb/requirements.lance/_latest.manifest deleted file mode 100644 index 38d9af183730dc0462592fa654d3509d55b5b5b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 237 zcmcb~z`(#IE5uTgT2Z3#9|jn;*osSvGV{_67%iB%OoiCWQj<&aizFD4l}M#!R-~rH z7iU(b#^+=fmsq9c86+(CMTO28e1k?8tLUE<|U^JF)=VIFiS95@o;>3(!Y*PV8gE$N12ke40TNtQw@MR zQ*~1kQ;l^^%u_9NEsatwb-;xPUdI6hxz`qgjqzTrtD&@+`Wk2OokIzSVzkYo77drp| diff --git a/tests/data/lancedb/requirements.lance/_versions/1.manifest b/tests/data/lancedb/requirements.lance/_versions/1.manifest deleted file mode 100644 index f83222174dd39818d6ee8bcf0aba4cfc33fa64fd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 183 zcmZ3?z`(#IE5uTgT2Z3#9|jn;*osSvGV{_67%iB%OoiCWQj<&aizFD4l}M#!R-~rH z7iU(b#^+=fmsq9cTFDMraAW~OGA zx+#X{#=0g+#%a1qrWTgEX%=Y)2B|42$;Re}dLIMnG3W L0V9Ktqn|SXW*86+(CMTO28e1k?8tLUE<|U^JF)=VIFiS95@o;>3(!Y*PV8gE$N12ke40TNtQw@MR zQ*~1kQ;l^^%u_9NEsatwbuvLEs90|o~F-;<|>2l@Q-WIyjPt1$1G zbalE!%|)%e#XHTS_x7eWg!qPqhXjRMg@uIsTKP|-OZ)!!JbDlB|5;yqt6+LCWR8ER zuhlf~fKXrik-k2DLH<780d$qw;r=1MfwbXJ1A~#`8uLQKd;_gQ{er^-rdmy*3jDis zTDFgGXsDHwqoX6et9MYScg<31L%tzap}s!BK~wFm#3g)!r}|n2hKGiUm(L0D4-2Eh zP>qI%Pnl=0@$bKof4{!|_p6`gKi^zz^OChX(jV3sX~CJ~d+gD^*;w7sg}Xjlhk4x& za97PPAl)lrsV{c(;@LYHa2d?jbmT2trZX+qv#@!768z~F%l-Gaf}>?6N@VS?ybY|y z9SIJn6b0~T53Io5{DHy=zb+D_6Xs5 zmW$CT(H1%mtRs(H>x`Xi-Npo2LoUBO5B4k%p*1}L+r_!i^+pPwZ-0t=o!A4jPL{!# zw-wT$%}cQRArpw~KY;#MDDl6uf$FK0r!tKA)ll94dmW6P<%aVjPV?!9mMP<_TCt{I z`$NzDT=Gm@&2$SI!;Rldv1j!v_G@w=3~L$yM|@7f*#OxliDGhfGZUDo_9Of+__huumUS#9T_27lZ6ij|M7@x&<;CCB61KqXWuwG9CN;-T| z8cuk~-G{uEY?iIyv-H+UeolX-!jx?==E6Q^IY?4W)+NF8Lt0pJaXnsnG+Vm5WwbKU zW+S9l)JCVET5`QMJLtLX@Lkgs(E9d`CtkbEsn0ODpOX|D`55a@&0y+_HQ1xmUf!%j z2KZgs%8ZWA#oF298J#O0)ky1Wo zVo_%nJEa#i9%ssmXWJe=6Vi=+uJc>cF7!fL6VQ7>)UzLK?zcjyw6zvz4Ce<FlVTzl+NRxx4=M%kW+w{y?4nK`FH^#9=IJJEOjJ_ylrQ|>mJrX0I(1kE~_ z<9eM}5MvXIQMDZSZOLVG&LmG{8s_29yWK3bj=t6JKm550x#n|lp zZZNNm;q*T8+Er`ODZ4kExZMdC4SmOG#-Ze05svHs0G~}aREs@6z@vTDT(6-%Ob^e) z8Sfr*svW1gz>|ao-lI_+sMHNYnrU!4^8_3Rj{wp8E>m@x*~RzpT6w`w{HVmG*``1> zl*N1~f&J0p!coZS@q_nn_nyr!G?&*+m!x0TdF)>G3vNH_Ldo0oWn6u}2v3@RV14yJ zLH?%gXuN45OR9`Oy~_nGy6-ZqT6U4vU;ro0moqvOp4GXjY!?xRZj;;a+j9=#hhd+U z#%@p9S;y^|p5>^fCUgPwY8wddw;!qZ`JUw;SQnd)aOc`CJab?T()yrU( z{>lEk+vwam?EdW4STVv%t!`MP{F?lR(LULw{+sZr?{~?y+b28@Z$UF7gJpQ9@~gt8PpBg8i_3#Y0(zb;~FJ8zOZG$OzV;kvM#7W0a4cJD3FG1q;^}V*Vs8+3q#3?V3oL2T>LEMZKo>@a ztz)x&K7w`h2&k28itF@G!?*r%%3rmqME#5dy?gKtEqbEv+K1wCWs^De;)uC8)Vv&8 zV*t?HLA$iwaH+o~4q7^yb$q=Z6DDkC4~L}S&bOBG*`e+@JL@FwtXC@D7biW^K*?e* zSng{t*P7*vWi8r+&#jX*yT-E1)EGXXYynvLH=ur(@WJ^&>^9mKaL-|Aw6U{1uK#3+ z$ZVmStgFumdwJ(7j&IYofE)Z^V_$2@U5h5mfrD0I`Q`5*aLFnz-_6hX0X%NE9)3)% z;CWUJ<%l1Dl&p}e;>_XYQhk7*MkR#(m@>czOJm)Ua1-5!1V}@0bj>`>xOkl1x)lw> zzMX}(88NWEfjMiXkqH88#B~V^8J!8Rjhiq=dpNpGnFU9B?Z&JTUDVy#+OolGd-dYW zec%$c9jFJf;!}>`B>2(09qj+zL@f*r!k`zWu)bF*Yt(BR;)j7QuzE z&(@W*jh3o;^>+5Sorx@Zpls3%AY8_^D;MK{#}2gTO1QA11jRn7SAlQ|pZ8qE;noC2 zoMF2!X0S;@(HE@KjJ;r_c^1dyudLzUnXC3FLT89Il3oBhHx_;D59`|Gat*e*I}BE? z9sZ1qlgf~|hjc?rpxe^OwTE~My)yW|^C^t@qQg%&-3x?C3TY5_L(Ku-q5TJ(UE9-vux&7bW!izzdk0%0N_vUt5h z+y+5yjdA3Gkz(GJ221WrCGT8R;%Ygv_8bh{Gn3!2u!0keK#Qx{K4 zJNejb2N1n0_BqTn62{r}07Kn9%96pm;Lhihq;*~}!e;hD;}JY<&0*o%=IZXfK`d^Q zr25aUgmbxvN=`SmBit|myNFMm_My55Z-LSsk2swh&p5IiZhNl7Hqa3LIyJ%zdp(s$ zXIp{)8aLC zbP7AHJ;SSfocOMQeCoqPbe8kQc_Z%WCe8#%Lm^=~`!uwJq&dDH7yL?dSWMagk9Uni zRsR4Ip9meqN$1wgQy2G3nD})$f3jx}_Z%#PLvj@4tL}1i%n_Jd?8AqT>8AS5`O1k~ z;M#+>gjK}w+buw99{+cf3#_1dUHhj$C_t)@pe5O^!}Bv>YV1H)&vIe&FaA`V5;9XP|m1-y0| zu-+YFNaHOdnEv}Z ze4;z&F;6mRU%361*vQnqfzt!_Uz`~e{6zW=Zk1ewU)rltXe#m^C^WX;l@_EIuCw=Hv&Gy?26ll+>tusA|K-z~ zsO)H&8)@%lLL0z>kXJxHN9lEb4YpNm81)OP&wm4HcSh&Sd(U*o=)PJ=niw}14@T** z8K%By1Jg$SWG^?jra80~IzjFCysrBFnJdy<2ppB0R}2)G#a7P-EODp@L$M|jBDv8K-7{8~#BVn!o3n9oW7;bZ^Nm{j=%bi>Nr1g4pV zu0_(BoH!I4OfH4GwW{&<9A_->%qB0kvj!u4iz+aAe-eJYlmP+l#=(GSm6I+)yD_7> z4l~shS{KyQ_khlr4=>zbqa}sMp!#5gC0=N=)sB%SB3}DVcvc_Q1l|{%i|g{Gu_<5WXN`WUoK zJ3x_f0g~^NpXyu4Hp}Xers{*^t9B?cC)O&Bf9@3CM5fs#UNL~bcM2iLdORFyRhu9C zlg9`Lkj@SSmdr3(kK@Z@!EVe|e4SB2T(k?~EzY9wAJoe-^`yM6Uxv`glDW}wB)upl0}I;tp(R*;3S0I%sl}e+n7|r@nkv$n(G^^GuvRdNrNp5B9bEA&Z+h zozrvqqzA8v>#9Tzi6b5X`A_bUzm1U(kO;du`FSq(NW37+4lS20k4}s3hBf)snFJ5QhR?$Q1h{iTJ)$<1H@IghR zWA#97eF<$R)rTROE}U+op>A7+YR2z6Z(y~O)5&{f)adSMUz_2PUTxGOrx9RsWiXKD z$H18#fV{KNLhSFIE|~W83=-Dkmw^`<@v`uLyw!cb8r&l9g60B0mORx8q}s<>II@tqK_@+D?Q8b zbeH}>7@?A<;V&nRgi)717C-GXP8OL*a^4Tp8C!5&>*vy|yS`XiOB;z>QOr2$bjl!l zO2nt?<(QOW{rdY#)=gK0eqn?oKp4Wv^SKHxAze;+%`6Dqw^Z30(NXTX#sd60rQm?F zpN!@XNH0LM^KDehM%XO9Cj9lneXynJ5geU#1D3q)%)2%j!N?r2!|AI6&FqC_!56O%zx_8Vss@_Jcfco8TNE-+(k9lB0bABhQ4L+tioaroZPt z8$MFKb41>N#Q8`W2J8CuAW+VLC+MXf6{s!irn@Y+;xNQZ56eBanOQp-kv0WM-}a{R`KH&c@`6xh2E}l$9;3 z$$Kcj!QC%=khh%=#QlsshWxzcJtcv?;-uf1&}{qJD7oJ9wBodr**thx7U4k`kq>~| z?~uG?vDc5xO^KcQG(Nrwo&{rxli)?Z)vI!>I6J9E!)$}>ck z#Rh6!z$bg!!n}xnCAy=ska7=L!5c84Jqf=#`JxiR0hEb>(YCR~zd>$!6E9JQVh-eS z@p!YLjC?6~?G}bU8$u_eC08XB+G0vYgXQ5RV_`q&0xJ3q^Kv(lmtCt-8bpM9c8`^<7?=VvXdR zsV|b4STErrWha^ZUDHNTcW*;!*tbCn%v{x$^kxzF448mgKkBK3Gr~te?6x$TpD3if3iCJFP`;*18ge2CjV&}zdf8CQ z5MLnq8QN=Y_4vXb@HXuYkOu(0%bj6k*K~aCRTqdKik8J^vI%`n_L9#Aeob-#orhjQVM<#2&%q}x`))hX-2TMZ`c{R^)bw!=-g%W>|_ zrUDO8yw>6E1BrAj(rwV`qT?nqng@C3iGQqRam`nEpW?n+>2P!0J?!|}gvD-aiu630 za7pMY=-=uL4%S!>_fl5!_F?V$_fzRW_|B5^=7Z_S*PJvVX_TK_l)bmg_br`bds3vPJgDi;~s z!myb%iJ+Erco3yGMc_y(Rg?x#~IcoSoWaE}A>j7zOu}=1(<2BJ+GG%Pij-g{2ohzer zLb^7%JDd}|FS0V0H*pvSwOs?9$~W@5we?`@s6Qy`Og;}L#9YJ1W+y@5+N3DM8l0qz z5@`N~w&uaH@7Po`Gl_C)AdQV86Bsw65nj>W33JRlk%r5}aejXl;c>|eQH|$vq3f<3 z8!i(MOJYt5L&?`hqw|SwxV}|$l`;cPnK}^8Gs+3@!Jy`HUXThKX67o3*Jwe3*-ktZ z^ijF`xuH7DxjqIKd&8>3^?4+Afw7Yh!-GMaA^Vs!&MyANA6myMk0LHZYNN62bf_V$ z_IuCY%u7Z`-BsxC5(>j6PJ#25`!U<3KYG`72jf3K@Nne( z zZZDv{H^;^!gIVX>N!T$rNHJ~~$qrjs%3{9<(}&}lMx`)#QX*^cw>#GTbDa6DGL}s> zH(~0l7<6;^CJmeT2;3azVxr$DY}EEMJJ4Gf51RaC-@XS!EA4gM@NX;lXww;YA2WmH z7{Km^kHN1V_3@ZR3F}!4Ffk>SWxUperN%~hsP`5Op6US6RSo0?Nk4d~VKsahG8juv zu0l7x9sFo@DGy6J&fWFCKu)>^^oy?p%h#skG(%l^=9EAzHT#MtdmrQb3%Wpi!5dr5 z<*v6ULs^J1BzGOdFJ{hVI+q)%TaSE2z1_}Wuakr6jly7EcT?!$Gzy&ZI%9Rq%WPPJ zHN5#chq=ck0gxYTKBfcSvFfGJxwCNJc9`RSjBB<@1Z%BBa4swXUTo_R)pMtT?uczX z&R+w!rfugf$Mh?vvx7jNAy~T6NWSse9?C6(vBh--oE+oWp0X>fdize?aCAAR9>FQD z+H{6#9A~dX>J8;`MLJ8H*-&+i3InPYI^XIqZC|g;T%2^|*=`=(E~=F8Nsr`f{4e3% zikC3IK_m5fOD(zgy(nnip#lO88K3N8eRyBmXSdB|QA}_69qJQ3`BdK_U=q0+^!Ka- z>O1ZpS6_ZNq$A9dnm~w>g^_Xs^6EIL>ApP}SCtL)m{RAjK~1eD)H#Ji4<> zueQd>X&_;@CRvS$l_-pqN{$h3`)C#(!_)`o;>UddiOnYp5E-ck;S+ zF}y6p7dO6Q^7mHx;8DQb)|Ng|92{E1XoEAX>w!e}ao#Ctd`%xa>9%E0whqRdXBy+I zr^}G$2X1dEEu-7ijMJ(hROL$>SS%bVxKGG49E za@Su2`ox)wt-cNMr47-lLkc&%>11IpV7Z3g}A6cKfGF}dkg_7PJ zu2?=t!X4H&T1Pc*r->^|?a|hLHHyB?4x9i1iO;Ez-YNCkcEe4tE#S;)kZyfRLa#18 zl*teK!I{vuDBs!1e(a3Gm*?YwW~YSe#LkBvmr`dO5NpD&w;wyB;k z>Bj~x&cFABmwZ&61~quM->eR{x&EI2y;zA^WtObU8aQDCEMonz_LbFGeEd1y?Eedv zJ5V?Iv;9|t->NlCa%v7YHecnHr$Z#|T~`?46^Q%jI*`(MvvPH_1hg+! z96S=xLK|a~KcJldjM;Q>!1sd#q(!U8W98`&pf`L1E@G!Koa?CeF{G&rW@GM~7hpEp zNlson1NQ_TC7f#l30+&rgq`r@Ts=r@q9GGM)STh{h+>BC%lHxV8?cyPbu+!{rJgyB zu&(qk>$SW;3Y_%nauiOSj)m1JjxxBfY)F=y-6aA3pO75a( z0K44l$;2u0rypi8CutHk(bUFwStod#+-694fzr4SjQX9M?_JDjuJEb#RyguONB(kw z!^U;HVXVP3Mwo?p?~K4>(r&`4(~zHDBzO^j*u3OV28QA4hIcVzh94L$>7*KtECJY` z4cnUpz}xM0WY7CEk>*_Kcil$lfBFb{*qI6VFiPMo3%x5B`w3w)dQ*He7NZZVV{H z{m1u90}6E644+QQxGuKn=jsMu&wsALwxCs~k!lWm%0|NCHOqLo(Rv83zY*q4TPeE~`$8f7f%|8Zar3KdK>9^sFI?;I&GwtE;8PNtsKlYrKDxQUKA;}w z;(Q~fK1G}BSFy*P9#D3w6%wvt)!9c%M&nVSHKLY!{KIA*@2B7bF2U($&Vu8aRd#oQ zW9T?;D+tW#W!690~lp`znn!|QLd?|Pw>i@Y9NBf!(wnd`uA44%K>fMSNko9V| z&^_w+R*v}Fqc@8Am}PjarpJE#TnDE^S3%^oyUOmMT3{L3hj3X#S=AL8;TVXS7qxhu zWruI(HBf2qaJJdSl2$p0b~a z99DZMiZkA0Vvluqzrxio<4g9wZ6t6HNLR3dog0!?_{6D>IL7$|xW)|Qbe_aJt)X|V zF*J9d*vV7fai*IYueEricn($$s0*G6p44NVcy_Pa@}PKId3MqsB+Y7!GMjY4onSROqb@KS!gsB=68tLpCOW|o{U8t;Zd{k0NSX|3kAl--Pr9aBTAhXNcdcPdz+OiD zC$K{7iJlk9C%Dw2GknR6mo^f;CZTe)~Fv#vbRn?mCUyiz6jE zcS)Ibh>7015VDwRdY6&L7Ijew1M#HANN~~Z#W!?&3xew=7Nwx}@E&~oQBPcuG#?+G z&E_t#X;g!FOn+sCq}73HC|isnUv#qyKYVwq(XDkBRWq6q)%5CJ)<)Zjx6B@ogl8yt z_`IbPjOnsM;5m$npCi@3x)M^3)R9-My?|P~&19Mrd^R`(j(=#5FS6>_cmmSw%pqC= zokbi8V|ei6@%VG-6Q18|JBT$;tlNf>x4}Ap6ZoXUAt3w(ou#_VJ&{{!ULhQq0)#)H z&hk*KBfZ(lj~Wmct;@ULv$4_jR?zShXX&6^{61>p7fZXr*dhdcy}7Y>^26 zIq4Cl(|9d4tLXvkzT6&u|8!Cat5~2R7hI%1ZE>4$FNx~IBpef>VV)w(6C@1$$C3ylNfI>hs?@4k7WcaZ~<-eQlpHI%P~w_s;1oA8CO zO4J6Lj`Bddp5pA-5=l#t{uZP`6?-ju~^xjCEGp(*VD1IjhoqqXafFiJ4 z;QEiBO;G38bnHA1;Ljx!fnogh+22533iU^7*VLP|slaT^Or63-O~^Zo;f2hzbqh{qnbQyP@S&T7e?`_g?OMo3L@2Urb z?}p;l0lScJRPEt(6_fl6@xTTX5dLh=(_-Sx#n``7M|r^VzdWFRBqqsUL=Vb@M-a27 z7;}#|0XiEtk9Ck|_#8ySDE4uFJ()Ni#Xh3cG->RiO1Eaq>XJ^JUQF00^co*9F&jtS z_dxwWZBbx;ojp#ngTpR3-uED*{v_>YEdRN*zDEC3KeF`h1$=e4WtsrQ(;?|MZ6 zH@I9uzmI3a$E?^S<`BtS)68j!8nMOG%B0%^vyglsC%h2aM;2QBT&5SsO#6nd$0jJ` z$I)=48xZEhh0W1G{KjdY)uGNseHhb$nRUeQnjUkTqzNs|7)B z&U|HQE;!yiS4^|RNPn{xRegbU4U&%IbX{qWNiOkl9re!US-jCGXY8flOg(%m0Xz#$ zL?(d5L13`hg%hXXpP@@QWf5p}r4Q0`fxH+V-M9}u6K?PWrYA_ZMWWESq$4=Z5z;(E z*pur(*bS>+)>eQ1@Idk)rEPLEsRphkLLXA^vL!|@*xv=+{@J{nA}wWK~xWUX&>kx4r-*V}eA zST1~8hdlD!-7X`|8zY?ME7lf3pEx^}atOlIBt__F^7XK~p%)XHl`aKE zC{OR08qfIDx&?cGz8l{AJQaTIP-(W-gRRy@(snmE`FK7j={{u;hv38a60s)~-aV^q z6L!gWRLOfHc`tdwij{D%qcNZM){`9fP1^=YxJwM-fn+IaS2R|mAll$3!$*$5tdw+|D$ zE#n`u9ta$g1us#r$)rn_dtdCRZ|`#L-yIlXqfEWxMrX?X9=C#*a~+{LBL&`{KdjbW zvR=|$w1wrobtt}6?urY+P=5S%Ct-ddkiJKORm5rB$3?}A##hlcJ3<;$P+g-XU!JdB zlXC@3O~KWEvr6a;YI2-Kn8C#QUkom!d??R#iIKnPQ;E2i4Z7#XNEh;%ZX3|UAQi~N zkyo50@(__3(cVY%0R=HY8j!DFmq@+-SRz~l%0>7CtNS&X4S8VcR>cMKqXp3YZY_!S zp%9J$c@U($N2Q!lE=qq>qOq%llRgCMdq#N3bKd3=hE;Qs?NM%pQ@pQ{2HOS#Df$yAj613B%e4O$T@^`#^Qyk?q132kSzOnlSR%F(rCdcdOP{#FJFLERA zgO4Icpup3!n_3dCXv2rB$?$#tF)s9O&jW4P)lY-vr)N*0@Z39_o|7iKY$NTSt5|A$ zWM9vR1LvA4zq8Wu9#>GM>-93eAI9LFjLtRy|KMLBf*EP2w?TpoW}XgjPuLT3Hrdh_jX zOl0evt=Q|%1iWUoic@xo8pCa|{G+)n{3hvD=393n{9SOEy*lSk9zg>0_9nP3u#W1z z;+;g^hVZZiUw*w)LLLuowLCcS38Pt&UVPgI)WdShi+eRQLv;u8bx60N{9^{+opwuj zF;2RZyzvISw`CYy3F{6b3mI`}BajBbPSIBZ^p3F1quY>tFK%BSBXkpSo~uOqA86J& zd3@DLrynPc%BN0ok=jRZ=CnSxx?85m_MqE@&T{S>e^@j<4#=aFke(H}fNpdgvU^8!;{6yD9_Qze zi!K9}FA;e#qwGlJLhOp=bn??b*o`e2T)gJ#*}kOH=x4@y-Qmmp-ISfxld0!w9vgi3 zBYQbFAL9I%;#=zwrZr+YlFw8KgM=PoBGcJf=BB(Ga)5R3m;t8Q35w7Mq(|jv-S|D=E0kutq2LaN0Nb4d09wG5Q7L4=@?tPnrYr7TG+@)Z+k%>(D4_oB` z+xEJM>s(HOz8x>s=y}pXZ0w=oltE1<9TF?@9xUrq$rmPXr95|q+lWIJD(Nt!Y)#}c ziom(HS~EFeu1p>pPBb&aF%R}bMvqh!SvzsBO8rK-I0;D0vz+ve*v?{|28mY zZ)F#(7vP4gY~x^#$8+2e{RkJ=ujR_yo2Ym0u7gp@wIS~1R9IkWqnfKtq5S+$Mz`6L zdm(J9f0|;^t1m9|B<)fj&*raI@loq^cH&Qa^{LYdZc|waf7b3pi(dwK!Js)FcTEtt zx%<~Tsy1c0mT?!c!K-B@<2SB%<8QXZrNj)rr%?fKGt5HX|L_2G7?TQ3++4A!^J1iR z!T_TmSa*6B_iCPsm1~?NU0*NhqmIh&KFY=&XBzRG+itk^L${J|b0-^fy~pk>q{j}JTkj~OIHlpj z)Df_$=VpFj*C^gPr6-K@+5vgjW8p`mDQ+VhX0chNZQ$2XYxcZ@vE1&832xT!iKhlG zLC*|lJYD`2d;IK*(koNc9G!u|CC2b)W0;$_{dLyA&J%WEQaXRFH3zoDwt>OrR-8L~ ziccECV*S^-m11dk-9LHNXK&BjKnE{Vac21<-Z) z`Xz&4*F`%B_-YMb8!o{cPk*tNl)3zc6+c`SQlA1r*nPon0q$89QJyuklH1Oo4QZMMxD3u zs)8Okb$0^QV?LbfS;(s9Pi6KK+M#>#dKh$W1nub;w!fK+&4+{n^$%Vhxs@9$Z?V^6 zhST^GY%0JT^~;3RuO&*0OYgX+sRe7|RxG7QM1lQ;eW1791EZhJ zWSVy?Ah>%O_jOQ|$TiyPrL%@~9ew$ArxmdA@HRZAv{cVu`9$mO51Ge{!M<5B?6aJV z^DaL|`SS_M$udKnV-XZqvQ`aF!jxpX>y!@%!<+p0?U?#{h0Pdnk@g(^7Amw}zflM}_bOUX)zNoykQkcDf(* zA8;MQ=x5F+|=wDTROv|)~0HovR?4^@I|Eeh1~TGS@DeB82l{;x-atNzpq`w z^)9O=!`T-gp;ar)lxwR3Poi&IGdfE?$!C+)b6Jq+efB*3JQ~cr#*^pH;-cq=k1kid zpP7kkDC?J;MT3@Ra?FEbFtTq8=4u-BDLamnAL@wS$7i#OVRiKpw#|JR6LS}3Sza^8 zPr_#M;saB0^t#P(GUg5>zV}izN1J2itw1yi+{kPOw#5F&cCl&vFCW`_0n};Z$ZJ2b z#Z7g?;rFUkHvEDq|7y@h`m^y95T-NQ6HMKInGrXp^v@bTkB*<{Zz9bU5LPn(FNbN? z8VHPly=%e!gQc|QlDOgvQHYSn%4-9bZj8$=l)`D*|uuaB1^(!JtnY#&Vc=` zw1k7tPP0S)z2)E%`W^K2dh$=xZ%~l64PMMP7d3)5<9wM>poK)(3VAz2kZJ^>hwGr` z-2u3|x;t8&iGZ2(H*4xw57<2<8T$3^0e$t(Q!Uc5j?+9eY`Yc?ZdysaR3?e}GMJJ` z_4>?9s#byFzHHFC5()X4ZQ<6q4ea%TQ*2lLO1Yg4Ag=-r6>rEgYPttZ^Qo{vZQzEu?q^O=JCopTJp8AE^xTOj+ynFk70c{c6_3;1*0cpXn?1+{0SLACwlRUWTBu@mPMMBNAW3y~%48`Ezd+cvG(+ z9Q=v~sY$Qx;P<&W-mYO?+3J`Pt?3J2_4y-yL=BZV8y8$~M8Ze*VNpHQx}u4P**JIw zSE$!5+-QE(+cz{=X_w=SI13AHHxm!Equ-f41IIv?8hknf)~os>eP#7JoP@;}8>(%_ zJw@B=er^%vMXbno4dJdm81LUf^S>R3Bpu_#Dd1`Ci$nAqVcx_paO+_?E?k)av^V^? z;C+TBqTEIHn|qV>(wzylcXWcLUm8eL{hN}uOJOueuD5*0OGX$xZWJZ1lS3s=z~wUZ)uGkd$MVBQ$IY?F&hViQ39$ z{eCF)J;eDq)1D3FL2Dz}JmWEhXZiT*_X9Y^^sIo^%d0cZQ=5jIrdI4{cNmmCt>tC)BXu9+QI#x%PC z`yY11mLmg{tbXfRcH=aB`{x)CS8)(9*8b1Cm`fmA1?I z99YY#Wc8kC)Z8>iqTZ_E@~;Mk+<9jX2#$I9I0INS`B2_93WlC9gx`&OWBK1C zEUCVZL+jb0>$^d!?maPAhAQcBInm(~BTWNzX5_=B0O<~O=7+kZqr#ASobd5|7Hu8%zq(%*_NTS+*X z$m{NGqRziDPcdI?qfpN<(n*Z;mgMUoA$c;mw|gl0KvVn>drjap@b>HRQFRc%ZKV=E zpWyF%p2JT^*Kv*X9_akmABd;OM=WHb_A92EsPugDk}~Zr9-rm4!&jfu+(_3;89mUo zT7OoJRvL8fGVV34;MDKMLSvKXLF#EFKLFo{9OjojN5X0Sv0d9X?FK=tJ(6#b-z>e$ z9#ysC_czBvBYCFK4{VrmcbM4W6DKcBdT9d=h_B!RgUHXK?!68wX?j)Y`Rfm+F~R}3 ze949p9u}#^UTE39kt}$f_C^|J3lnGXyvumduevFPotwd;ybyY`#!ByA_c(EndSl`_ zh;5!tTs4LC!4P(Dazmjw^ zLRS#)NynG=frVF2!s9oLlkNu6fGB+MsvH-H_dA1D(V1{{qgM@oM0tTqy1)I%S;?R@SD9WHk+5jc(E+KoBQIrQni4`xc`KxZuY7KCRNx^MhO6X6S3tv%CV zvR?t8v~Qg7F<863576s4X>?p3@CQ8xHN(`!{%)hkw1*yehP+A#iL@cwCS~L71=zF&@^zaH4KA^tK2S!}RXg=8Cg1uby z;O=I>k@$&`j^)O z%r$(H%|49Vwwn#Ww-$@C>|e77fy;W2kD-#gA8ni5;ne3m(zFwN zI8iS8fm`>fCkLu0fVfAjTNV2xKaOcpe__(gbDTK7Mz3s1nhoFAEx;vL+v1&PlR&%0 z6}tVCi8Z7YpHwdNHzvM42&C13_=*w6k{;KQ3D=oS0xzuw9N`|9 z2O%CjC3sS${!{#Oqe!1U1Ca-ip5iN}uIEB`MtTP`kCd^XVKp1cf3TdE5sWey;=k9p zE_@3YJyLn=4IjH@px{;Fpbn%-?y&fW<|y(f>RnE{m-^`ioH*_2)`N4G*%3n0zvaPi+UsJ35@i9$3+f6K7(m*umPF@dEAbN zIH1QWHhY0qiDuI_bbc1x_1#&xV6Yk9|1g5DQL!+fhoMZ~9Qw@HS3L%03e5?Dzl!N^ z-xW^2L;jI?0GesGK+ed%VTyc=3 zPS+S^T0oeOCMSN=&(S;_xq}Muy_*iwwLoaar!V3Je?Z^T1|a9eLqYaT z9{uDMlD=lUhcrNutF>#m5?*(zqo#g!gGW_6q}h-JrtVM3V`!=#D^>&Xw?rO*5zi~_ z#`ja#e#k=7?{e~&Yn*%{9$VlDCRo6P_W5yk2Pb~R&|3XOUjyMS&HgjI>h%(@q@7}W zFC`LRenpy5$m(YT<*|psCSe_Ej9Q#D1n(2~ob~QC5f9l77rhU}#fRsXszs&xl>vFL4oX=z5iRo-QecyNmJC z)$2%Dq;~&18v=WUgT}BJ$^m|Xz!>UZnX(OB71Rqd`wS<~>`FSXHriAU$B<*Cg5R*d zUo4}w$)wp7`W|*)?arxA7^wEfiBDYsopMQ|Eob*ySmDH-y)bX*2hx?oYN%mlDlYk%I^g@uk;V#mUlKp_vg{(2dSnCY>fVrO!MN`KHkIn-a$3 zm%$TI-G;<9TzEX13)IV4&P7H!Bg~dRczuH}Ht}GT9l6nL zij0bI_6$&F24g1l1kxe&yHo>tM*O!L4|VrdIM6(@$8R{JxnXo2Xftdh`W2n!q&0xJ zMCzsHLI;y=jAj7N)JD{@=qh*}$?vKn=ORqOml69V^6)iW{fza-{$=kVi<@J?i& zqQ3>sh%>~#r*7d)r=>vW!pOtQB0r@pNaQIJaTnzI_GoYqBIXd(UVeY4?6z^S`gV|9zi-x1;gj`~G+Pzt{bDJBYq9XlASv<~ujc>hFL5H0|{M`7kI) zO;;@gXJfqxU!Snx5EIS+eTvyM|GB~x}t;HIx=_S+reFLUW`akde=Zz+X&6`#8j@FCl-hbcye;({;-%&g^qrQP|fOn9Oud$w^y_>yL$0c-B`m0kljkPqi zP2$#S+RZj-Vyx--zm*lm5BCW(XliUA7Dgot4+^7t>iP$Tb?NM?X<%wxJJ3I9QiOLv sxUY%U|L?HGL*8@$eMtL%A2Qa_(A3g4xtOA str: - return self.answer - - -def test_rag_tool(): - adapter = MockAdapter(answer="42") - rag_tool = RagTool(adapter=adapter) - - assert rag_tool.name == "Knowledge base" - assert ( - rag_tool.description == "A knowledge base that can be used to answer questions." - ) - assert ( - rag_tool.run("What is the answer to life, the universe and everything?") == "42" - ) From 80942bf38c51f2329520289573c404f74817d8e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 26 Feb 2024 06:01:05 -0300 Subject: [PATCH 012/391] properly adding serper tool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 6ed1c5d65..08e5df185 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -7,6 +7,7 @@ from .tools import ( DirectoryReadTool, FileReadTool, GithubSearchTool, + SeperDevTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index d4a886a73..9da634e3d 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -31,8 +31,6 @@ class SeperDevTool(BaseTool): results = response.json()['organic'] stirng = [] for result in results: - print(result) - print('--------------') try: stirng.append('\n'.join([ f"Title: {result['title']}", From 9e560ff9517f3f62c80c59965a7c3c8e8abfabe8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 26 Feb 2024 06:15:15 -0300 Subject: [PATCH 013/391] adding new scrapping tools --- src/crewai_tools/__init__.py | 2 + src/crewai_tools/tools/__init__.py | 2 + .../scrape_element_from_website.py | 43 +++++++++++++++++++ .../scrape_website_tool.py | 36 ++++++++++++++++ 4 files changed, 83 insertions(+) create mode 100644 src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py create mode 100644 src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 08e5df185..e417720d7 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -14,6 +14,8 @@ from .tools import ( PDFSearchTool, PGSearchTool, RagTool, + ScrapeElementFromWebsiteTool, + ScrapeWebsiteTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index e2382eb9b..ecea1cb3f 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -12,6 +12,8 @@ from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool +from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool +from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py new file mode 100644 index 000000000..1996172b2 --- /dev/null +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -0,0 +1,43 @@ +import requests +from bs4 import BeautifulSoup +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field +from ..base_tool import BaseTool + +class FixedScrapeElementFromWebsiteToolSchema(BaseModel): + """Input for ScrapeElementFromWebsiteTool.""" + pass + +class ScrapeElementFromWebsiteToolSchema(FixedScrapeElementFromWebsiteToolSchema): + """Input for ScrapeElementFromWebsiteTool.""" + website_url: str = Field(..., description="Mandatory website url to read the file") + css_element: str = Field(..., description="Mandatory css reference for element to scrape from the website") + +class ScrapeElementFromWebsiteTool(BaseTool): + name: str = "Read a website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema + website_url: Optional[str] = None + css_element: Optional[str] = None + + def __init__(self, website_url: Optional[str] = None, css_element: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + self.css_element = css_element + self.description = f"A tool that can be used to read {website_url}'s content." + self.args_schema = FixedScrapeElementFromWebsiteToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get('website_url', self.website_url) + css_element = kwargs.get('css_element', self.css_element) + page = requests.get(website_url) + parsed = BeautifulSoup(page.content, "html.parser") + elements = parsed.select(css_element) + return "\n".join([element.get_text() for element in elements]) + + + diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py new file mode 100644 index 000000000..8ec16c1ab --- /dev/null +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -0,0 +1,36 @@ +import requests +from bs4 import BeautifulSoup +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel, Field +from ..base_tool import BaseTool + +class FixedScrapeWebsiteToolSchema(BaseModel): + """Input for ScrapeWebsiteTool.""" + pass + +class ScrapeWebsiteToolSchema(FixedScrapeWebsiteToolSchema): + """Input for ScrapeWebsiteTool.""" + website_url: str = Field(..., description="Mandatory website url to read the file") + +class ScrapeWebsiteTool(BaseTool): + name: str = "Read a website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema + website_url: Optional[str] = None + + def __init__(self, website_url: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + self.description = f"A tool that can be used to read {website_url}'s content." + self.args_schema = FixedScrapeWebsiteToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get('website_url', self.website_url) + page = requests.get(website_url) + parsed = BeautifulSoup(page.content, "html.parser") + return parsed.get_text() + From cff6082f1c5843c6a704900710388b7b1cb99a90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 26 Feb 2024 06:20:15 -0300 Subject: [PATCH 014/391] improving scrapping tools --- .../scrape_element_from_website/scrape_element_from_website.py | 3 ++- .../tools/scrape_website_tool/scrape_website_tool.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index 1996172b2..54de3cd39 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -19,6 +19,7 @@ class ScrapeElementFromWebsiteTool(BaseTool): args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema website_url: Optional[str] = None css_element: Optional[str] = None + headers: Optional[dict] = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} def __init__(self, website_url: Optional[str] = None, css_element: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -34,7 +35,7 @@ class ScrapeElementFromWebsiteTool(BaseTool): ) -> Any: website_url = kwargs.get('website_url', self.website_url) css_element = kwargs.get('css_element', self.css_element) - page = requests.get(website_url) + page = requests.get(website_url, headers=self.headers) parsed = BeautifulSoup(page.content, "html.parser") elements = parsed.select(css_element) return "\n".join([element.get_text() for element in elements]) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 8ec16c1ab..e672a9b1d 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -17,6 +17,7 @@ class ScrapeWebsiteTool(BaseTool): description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema website_url: Optional[str] = None + headers: Optional[dict] = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} def __init__(self, website_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -30,7 +31,7 @@ class ScrapeWebsiteTool(BaseTool): **kwargs: Any, ) -> Any: website_url = kwargs.get('website_url', self.website_url) - page = requests.get(website_url) + page = requests.get(website_url, headers=self.headers) parsed = BeautifulSoup(page.content, "html.parser") return parsed.get_text() From 79eec51c9a59697f4f3f8eec35189965b0ea24ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 26 Feb 2024 06:52:22 -0300 Subject: [PATCH 015/391] Cutting new version with improved scrapping --- .../scrape_element_from_website.py | 6 +++++- .../tools/scrape_website_tool/scrape_website_tool.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index 54de3cd39..bee6c22ab 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -19,7 +19,11 @@ class ScrapeElementFromWebsiteTool(BaseTool): args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema website_url: Optional[str] = None css_element: Optional[str] = None - headers: Optional[dict] = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} + headers: Optional[dict] = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', + 'Accept-Language': 'en-US,en;q=0.5', + 'Referer': 'https://www.google.com/' + } def __init__(self, website_url: Optional[str] = None, css_element: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index e672a9b1d..240948a33 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -17,7 +17,11 @@ class ScrapeWebsiteTool(BaseTool): description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema website_url: Optional[str] = None - headers: Optional[dict] = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} + headers: Optional[dict] = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', + 'Accept-Language': 'en-US,en;q=0.5', + 'Referer': 'https://www.google.com/' + } def __init__(self, website_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) From 467b05532f3b2d1d409094d9b80c275fab595d2e Mon Sep 17 00:00:00 2001 From: "Slava Kurilyak (slavakurilyak.eth)" Date: Tue, 27 Feb 2024 22:33:04 -0300 Subject: [PATCH 016/391] Fix NoneType object has no attribute 'query' error by updating app attribute --- src/crewai_tools/tools/github_search_tool/github_search_tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 7b6066e00..3b90f16ea 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -41,4 +41,5 @@ class GithubSearchTool(RagTool): loader = GithubLoader(config={"token": self.gh_token}) app = App() app.add(f"repo:{github_repo} type:{','.join(self.content_types)}", data_type="github", loader=loader) + self.app = app return super()._run(query=search_query) \ No newline at end of file From 640b5a9461e974b78d36cc5f7b4b4dbc74c85275 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 29 Feb 2024 03:09:14 -0300 Subject: [PATCH 017/391] adding intial selenium scrapping tool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../selenium_scraping_tool.py | 78 +++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index e417720d7..63dfedc15 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -16,6 +16,7 @@ from .tools import ( RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, + SeleniumScrapingTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index ecea1cb3f..261437d5f 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -14,6 +14,7 @@ from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool +from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py new file mode 100644 index 000000000..6af3e18cb --- /dev/null +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -0,0 +1,78 @@ +from typing import Optional, Type, Any +import time +from pydantic.v1 import BaseModel, Field + +from bs4 import BeautifulSoup +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.firefox.options import Options + +from ..base_tool import BaseTool + +class FixedSeleniumScrapingToolSchema(BaseModel): + """Input for SeleniumScrapingTool.""" + pass + +class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): + """Input for SeleniumScrapingTool.""" + website_url: str = Field(..., description="Mandatory website url to read the file") + css_element: str = Field(..., description="Mandatory css reference for element to scrape from the website") + +class SeleniumScrapingTool(BaseTool): + name: str = "Read a website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = SeleniumScrapingToolSchema + website_url: Optional[str] = None + driver: Optional[Any] = webdriver.Chrome + cookie: Optional[dict] = None + wait_time: Optional[int] = 3 + css_element: Optional[str] = None + + def __init__(self, website_url: Optional[str] = None, cookie: Optional[dict] = None, css_element: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if cookie is not None: + self.cookie = cookie + + if css_element is not None: + self.css_element = css_element + + if website_url is not None: + self.website_url = website_url + self.description = f"A tool that can be used to read {website_url}'s content." + self.args_schema = FixedSeleniumScrapingToolSchema + + self._generate_description() + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get('website_url', self.website_url) + css_element = kwargs.get('css_element', self.css_element) + driver = self._create_driver(website_url, self.cookie, self.wait_time) + + content = [] + if css_element is None or css_element.strip() == "": + body_text = driver.find_element(By.TAG_NAME, "body").text + content.append(body_text) + else: + driver.find_elements(By.CSS_SELECTOR, css_element) + for element in driver.find_elements(By.CSS_SELECTOR, css_element): + content.append(element.text) + driver.close() + return "\n".join(content) + + def _create_driver(self, url, cookie, wait_time): + options = Options() + options.add_argument("--headless") + driver = self.driver(options=options) + driver.get(url) + time.sleep(wait_time) + if cookie: + driver.add_cookie(cookie) + time.sleep(wait_time) + driver.get(url) + time.sleep(wait_time) + return driver + + def close(self): + self.driver.close() \ No newline at end of file From ec97e15a3a3601311b98f254270315f5d7c843cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 29 Feb 2024 03:09:48 -0300 Subject: [PATCH 018/391] Adding new description generator --- src/crewai_tools/tools/base_tool.py | 9 +++++++++ .../code_docs_search_tool.py | 1 + .../tools/csv_search_tool/csv_search_tool.py | 1 + .../directory_read_tool.py | 1 + .../directory_search_tool.py | 1 + .../docx_search_tool/docx_search_tool.py | 1 + .../tools/file_read_tool/file_read_tool.py | 1 + .../github_search_tool/github_search_tool.py | 1 + .../json_search_tool/json_search_tool.py | 1 + .../tools/mdx_seach_tool/mdx_search_tool.py | 1 + .../tools/pdf_search_tool/pdf_search_tool.py | 1 + .../tools/pg_seach_tool/pg_search_tool.py | 1 + .../scrape_element_from_website.py | 19 ++++++++++++++----- .../scrape_website_tool.py | 19 ++++++++++++++----- .../tools/txt_search_tool/txt_search_tool.py | 1 + .../website_search/website_search_tool.py | 1 + .../tools/xml_search_tool/xml_search_tool.py | 1 + .../youtube_channel_search_tool.py | 1 + .../youtube_video_search_tool.py | 1 + 19 files changed, 53 insertions(+), 10 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index dc679f833..2f19184ea 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -13,10 +13,12 @@ class BaseTool(BaseModel, ABC): """Used to tell the model how/when/why to use the tool.""" args_schema: Optional[Type[V1BaseModel]] = None """The schema for the arguments that the tool accepts.""" + description_updated: bool = False @model_validator(mode="after") def _check_args_schema(self): self._set_args_schema() + self._generate_description() return self def run( @@ -56,6 +58,13 @@ class BaseTool(BaseModel, ABC): }, }, ) + def _generate_description(self): + args = [] + for arg, attribute in self.args_schema.schema()['properties'].items(): + args.append(f"{arg}: '{attribute['type']}'") + + description = self.description.replace('\n', ' ') + self.description = f"{self.name}({', '.join(args)}) - {description}" class Tool(BaseTool): diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index fd0acf4ca..54ba69d01 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -28,6 +28,7 @@ class CodeDocsSearchTool(RagTool): self.docs_url = docs_url self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." self.args_schema = FixedCodeDocsSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 8cc06e263..dcfdd82c8 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -28,6 +28,7 @@ class CSVSearchTool(RagTool): self.csv = csv self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." self.args_schema = FixedCSVSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index 94fcce076..8b569e5f6 100644 --- a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -23,6 +23,7 @@ class DirectoryReadTool(BaseTool): self.directory = directory self.description = f"A tool that can be used to list {directory}'s content." self.args_schema = FixedDirectoryReadToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 39c34fc93..2cd888a8b 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -28,6 +28,7 @@ class DirectorySearchTool(RagTool): self.directory = directory self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." self.args_schema = FixedDirectorySearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 1a52e5f3b..135837a6b 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -28,6 +28,7 @@ class DOCXSearchTool(RagTool): self.docx = docx self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." self.args_schema = FixedDOCXSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 8c2e8dcca..8c7643852 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -22,6 +22,7 @@ class FileReadTool(BaseTool): self.file_path = file_path self.description = f"A tool that can be used to read {file_path}'s content." self.args_schema = FixedFileReadToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 3b90f16ea..cb2815aad 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -31,6 +31,7 @@ class GithubSearchTool(RagTool): self.github_repo = github_repo self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." self.args_schema = FixedGithubSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 89e515e78..578f06bc9 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -28,6 +28,7 @@ class JSONSearchTool(RagTool): self.json_path = json_path self.description = f"A tool that can be used to semantic search a query the {json} JSON's content." self.args_schema = FixedJSONSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index 0f4deb056..e34c0fa08 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -28,6 +28,7 @@ class MDXSearchTool(RagTool): self.mdx = mdx self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." self.args_schema = FixedMDXSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index ba54e34ca..bb85673ba 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -28,6 +28,7 @@ class PDFSearchTool(RagTool): self.pdf = pdf self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." self.args_schema = FixedPDFSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index f625bebc9..8b9707185 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -24,6 +24,7 @@ class PGSearchTool(RagTool): if table_name is not None: self.table_name = table_name self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." + self._generate_description() else: raise('To use PGSearchTool, you must provide a `table_name` argument') diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index bee6c22ab..36bc088e5 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -1,3 +1,4 @@ +import os import requests from bs4 import BeautifulSoup from typing import Optional, Type, Any @@ -18,20 +19,28 @@ class ScrapeElementFromWebsiteTool(BaseTool): description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema website_url: Optional[str] = None + cookies: Optional[dict] = None css_element: Optional[str] = None headers: Optional[dict] = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', - 'Accept-Language': 'en-US,en;q=0.5', - 'Referer': 'https://www.google.com/' + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', + 'Accept-Language': 'en-US,en;q=0.9', + 'Referer': 'https://www.google.com/', + 'Connection': 'keep-alive', + 'Upgrade-Insecure-Requests': '1', + 'Accept-Encoding': 'gzip, deflate, br' } - def __init__(self, website_url: Optional[str] = None, css_element: Optional[str] = None, **kwargs): + def __init__(self, website_url: Optional[str] = None, cookies: Optional[dict] = None, css_element: Optional[str] = None, **kwargs): super().__init__(**kwargs) if website_url is not None: self.website_url = website_url self.css_element = css_element self.description = f"A tool that can be used to read {website_url}'s content." self.args_schema = FixedScrapeElementFromWebsiteToolSchema + self._generate_description() + if cookies is not None: + self.cookies = {cookies["name"]: os.getenv(cookies["value"])} def _run( self, @@ -39,7 +48,7 @@ class ScrapeElementFromWebsiteTool(BaseTool): ) -> Any: website_url = kwargs.get('website_url', self.website_url) css_element = kwargs.get('css_element', self.css_element) - page = requests.get(website_url, headers=self.headers) + page = requests.get(website_url, headers=self.headers, cookies=self.cookies if self.cookies else {}) parsed = BeautifulSoup(page.content, "html.parser") elements = parsed.select(css_element) return "\n".join([element.get_text() for element in elements]) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 240948a33..623d785dc 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -1,3 +1,4 @@ +import os import requests from bs4 import BeautifulSoup from typing import Optional, Type, Any @@ -17,25 +18,33 @@ class ScrapeWebsiteTool(BaseTool): description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema website_url: Optional[str] = None + cookies: Optional[dict] = None headers: Optional[dict] = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', - 'Accept-Language': 'en-US,en;q=0.5', - 'Referer': 'https://www.google.com/' + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', + 'Accept-Language': 'en-US,en;q=0.9', + 'Referer': 'https://www.google.com/', + 'Connection': 'keep-alive', + 'Upgrade-Insecure-Requests': '1', + 'Accept-Encoding': 'gzip, deflate, br' } - def __init__(self, website_url: Optional[str] = None, **kwargs): + def __init__(self, website_url: Optional[str] = None, cookies: Optional[dict] = None, **kwargs): super().__init__(**kwargs) if website_url is not None: self.website_url = website_url self.description = f"A tool that can be used to read {website_url}'s content." self.args_schema = FixedScrapeWebsiteToolSchema + self._generate_description() + if cookies is not None: + self.cookies = {cookies["name"]: os.getenv(cookies["value"])} def _run( self, **kwargs: Any, ) -> Any: website_url = kwargs.get('website_url', self.website_url) - page = requests.get(website_url, headers=self.headers) + page = requests.get(website_url, headers=self.headers, cookies=self.cookies if self.cookies else {}) parsed = BeautifulSoup(page.content, "html.parser") return parsed.get_text() diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 130f6f164..0a61eae53 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -27,6 +27,7 @@ class TXTSearchTool(RagTool): self.txt = txt self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." self.args_schema = FixedTXTSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index f4cffa9c9..37744f2b6 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -28,6 +28,7 @@ class WebsiteSearchTool(RagTool): self.website = website self.description = f"A tool that can be used to semantic search a query from {website} website content." self.args_schema = FixedWebsiteSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 9259b819f..90cedfa56 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -28,6 +28,7 @@ class XMLSearchTool(RagTool): self.xml = xml self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." self.args_schema = FixedXMLSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 9b4e51688..fcdfe78c9 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -28,6 +28,7 @@ class YoutubeChannelSearchTool(RagTool): self.youtube_channel_handle = youtube_channel_handle self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." self.args_schema = FixedYoutubeChannelSearchToolSchema + self._generate_description() def _run( self, diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 7b26c8e90..20aa9691d 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -28,6 +28,7 @@ class YoutubeVideoSearchTool(RagTool): self.youtube_video_url = youtube_video_url self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." self.args_schema = FixedYoutubeVideoSearchToolSchema + self._generate_description() def _run( self, From 51358b3cc4dfa07732e38eeea91b0240aaa7412a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sat, 2 Mar 2024 12:41:55 -0300 Subject: [PATCH 019/391] updating scrapping tool --- .../tools/scrape_website_tool/scrape_website_tool.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 623d785dc..cd8fd50d3 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -14,7 +14,7 @@ class ScrapeWebsiteToolSchema(FixedScrapeWebsiteToolSchema): website_url: str = Field(..., description="Mandatory website url to read the file") class ScrapeWebsiteTool(BaseTool): - name: str = "Read a website content" + name: str = "Read website content" description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema website_url: Optional[str] = None @@ -46,5 +46,8 @@ class ScrapeWebsiteTool(BaseTool): website_url = kwargs.get('website_url', self.website_url) page = requests.get(website_url, headers=self.headers, cookies=self.cookies if self.cookies else {}) parsed = BeautifulSoup(page.content, "html.parser") - return parsed.get_text() + text = parsed.get_text() + text = '\n'.join([i for i in text.split('\n') if i.strip() != '']) + text = ' '.join([i for i in text.split(' ') if i.strip() != '']) + return text From d19814d7a3b8fc22f35f8016c906fbc8b917840d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 3 Mar 2024 12:29:32 -0300 Subject: [PATCH 020/391] fixing TYPO --- src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index dcfdd82c8..cd99ebfd2 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -13,7 +13,7 @@ class FixedCSVSearchToolSchema(BaseModel): class CSVSearchToolSchema(FixedCSVSearchToolSchema): """Input for CSVSearchTool.""" - pdf: str = Field(..., description="Mandatory csv path you want to search") + csv: str = Field(..., description="Mandatory csv path you want to search") class CSVSearchTool(RagTool): name: str = "Search a CSV's content" From 7f683b21f57dd1a2180b21eee166305ba62a9563 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 3 Mar 2024 12:29:46 -0300 Subject: [PATCH 021/391] preparring new version --- src/crewai_tools/__init__.py | 2 +- src/crewai_tools/tools/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 63dfedc15..642ad703c 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -7,7 +7,7 @@ from .tools import ( DirectoryReadTool, FileReadTool, GithubSearchTool, - SeperDevTool, + SerperDevTool, TXTSearchTool, JSONSearchTool, MDXSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 261437d5f..99860a14f 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -5,7 +5,7 @@ from .directory_read_tool.directory_read_tool import DirectoryReadTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool -from .serper_dev_tool.serper_dev_tool import SeperDevTool +from .serper_dev_tool.serper_dev_tool import SerperDevTool from .txt_search_tool.txt_search_tool import TXTSearchTool from .json_search_tool.json_search_tool import JSONSearchTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool From 8ffbd9665b04b491021aeb96484ce670a8b2bbeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 3 Mar 2024 12:32:13 -0300 Subject: [PATCH 022/391] avoinding organic error on serper dev tool --- .../tools/serper_dev_tool/serper_dev_tool.py | 38 ++++++++++--------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 9da634e3d..3fbf5ea61 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -6,14 +6,14 @@ from typing import Type, Any from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool -class SeperDevToolSchema(BaseModel): +class SerperDevToolSchema(BaseModel): """Input for TXTSearchTool.""" search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") -class SeperDevTool(BaseTool): +class SerperDevTool(BaseTool): name: str = "Search the internet" description: str = "A tool that can be used to semantic search a query from a txt's content." - args_schema: Type[BaseModel] = SeperDevToolSchema + args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" n_results: int = None @@ -28,18 +28,22 @@ class SeperDevTool(BaseTool): 'content-type': 'application/json' } response = requests.request("POST", self.search_url, headers=headers, data=payload) - results = response.json()['organic'] - stirng = [] - for result in results: - try: - stirng.append('\n'.join([ - f"Title: {result['title']}", - f"Link: {result['link']}", - f"Snippet: {result['snippet']}", - "---" - ])) - except KeyError: - next + results = response.json() + if 'organic' in results: + results = results['organic'] + stirng = [] + for result in results: + try: + stirng.append('\n'.join([ + f"Title: {result['title']}", + f"Link: {result['link']}", + f"Snippet: {result['snippet']}", + "---" + ])) + except KeyError: + next - content = '\n'.join(stirng) - return f"\nSearch results: {content}\n" + content = '\n'.join(stirng) + return f"\nSearch results: {content}\n" + else: + return results From 37aa8d6b63b70a7c9a67fc4017f9dc5962e7c9b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 3 Mar 2024 22:14:30 -0300 Subject: [PATCH 023/391] Adding docs for all tools --- .../tools/code_docs_search_tool/README.md | 28 +++++++++++++ .../tools/csv_search_tool/README.md | 31 ++++++++++++++ .../tools/directory_read_tool/README.md | 40 +++++++++++++++++++ .../tools/directory_search_tool/README.md | 27 +++++++++++++ .../tools/docx_search_tool/README.md | 29 ++++++++++++++ .../tools/file_read_tool/README.md | 29 ++++++++++++++ .../tools/github_search_tool/README.md | 36 +++++++++++++++++ .../tools/json_search_tool/README.md | 27 +++++++++++++ .../tools/mdx_seach_tool/README.md | 29 ++++++++++++++ .../tools/pdf_search_tool/README.md | 29 ++++++++++++++ .../tools/pg_seach_tool/README.md | 28 +++++++++++++ .../tools/scrape_website_tool/README.md | 24 +++++++++++ .../tools/selenium_scraping_tool/README.md | 33 +++++++++++++++ .../tools/serper_dev_tool/README.md | 30 ++++++++++++++ .../tools/txt_search_tool/README.md | 31 ++++++++++++++ .../tools/website_search/README.md | 29 ++++++++++++++ .../tools/xml_search_tool/README.md | 29 ++++++++++++++ .../youtube_channel_search_tool/README.md | 29 ++++++++++++++ .../tools/youtube_video_search_tool/README.md | 32 +++++++++++++++ 19 files changed, 570 insertions(+) create mode 100644 src/crewai_tools/tools/code_docs_search_tool/README.md create mode 100644 src/crewai_tools/tools/csv_search_tool/README.md create mode 100644 src/crewai_tools/tools/directory_read_tool/README.md create mode 100644 src/crewai_tools/tools/directory_search_tool/README.md create mode 100644 src/crewai_tools/tools/docx_search_tool/README.md create mode 100644 src/crewai_tools/tools/file_read_tool/README.md create mode 100644 src/crewai_tools/tools/github_search_tool/README.md create mode 100644 src/crewai_tools/tools/json_search_tool/README.md create mode 100644 src/crewai_tools/tools/mdx_seach_tool/README.md create mode 100644 src/crewai_tools/tools/pdf_search_tool/README.md create mode 100644 src/crewai_tools/tools/pg_seach_tool/README.md create mode 100644 src/crewai_tools/tools/scrape_website_tool/README.md create mode 100644 src/crewai_tools/tools/selenium_scraping_tool/README.md create mode 100644 src/crewai_tools/tools/serper_dev_tool/README.md create mode 100644 src/crewai_tools/tools/txt_search_tool/README.md create mode 100644 src/crewai_tools/tools/website_search/README.md create mode 100644 src/crewai_tools/tools/xml_search_tool/README.md create mode 100644 src/crewai_tools/tools/youtube_channel_search_tool/README.md create mode 100644 src/crewai_tools/tools/youtube_video_search_tool/README.md diff --git a/src/crewai_tools/tools/code_docs_search_tool/README.md b/src/crewai_tools/tools/code_docs_search_tool/README.md new file mode 100644 index 000000000..6c58a49bc --- /dev/null +++ b/src/crewai_tools/tools/code_docs_search_tool/README.md @@ -0,0 +1,28 @@ +# CodeDocsSearchTool + +## Description +The CodeDocsSearchTool is a powerful RAG (Retrieval-Augmented Generation) tool designed for semantic searches within code documentation. It enables users to efficiently find specific information or topics within code documentation. By providing a `docs_url` during initialization, the tool narrows down the search to that particular documentation site. Alternatively, without a specific `docs_url`, it searches across a wide array of code documentation known or discovered throughout its execution, making it versatile for various documentation search needs. + +## Installation +To start using the CodeDocsSearchTool, first, install the crewai_tools package via pip: +```shell +pip install 'crewai[tools]' +``` + +## Example +Utilize the CodeDocsSearchTool as follows to conduct searches within code documentation: +```python +from crewai_tools import CodeDocsSearchTool + +# To search any code documentation content if the URL is known or discovered during its execution: +tool = CodeDocsSearchTool() + +# OR + +# To specifically focus your search on a given documentation site by providing its URL: +tool = CodeDocsSearchTool(docs_url='https://docs.example.com/reference') +``` +Note: Substitute 'https://docs.example.com/reference' with your target documentation URL and 'How to use search tool' with the search query relevant to your needs. + +## Arguments +- `docs_url`: Optional. Specifies the URL of the code documentation to be searched. Providing this during the tool's initialization focuses the search on the specified documentation content. \ No newline at end of file diff --git a/src/crewai_tools/tools/csv_search_tool/README.md b/src/crewai_tools/tools/csv_search_tool/README.md new file mode 100644 index 000000000..66dd46cc0 --- /dev/null +++ b/src/crewai_tools/tools/csv_search_tool/README.md @@ -0,0 +1,31 @@ +# CSVSearchTool + +## Description + +This tool is used to perform a RAG (Retrieval-Augmented Generation) search within a CSV file's content. It allows users to semantically search for queries in the content of a specified CSV file. This feature is particularly useful for extracting information from large CSV datasets where traditional search methods might be inefficient. All tools with "Search" in their name, including CSVSearchTool, are RAG tools designed for searching different sources of data. + +## Installation + +Install the crewai_tools package + +```shell +pip install 'crewai[tools]' +``` + +## Example + +```python +from crewai_tools import CSVSearchTool + +# Initialize the tool with a specific CSV file. This setup allows the agent to only search the given CSV file. +tool = CSVSearchTool(csv='path/to/your/csvfile.csv') + +# OR + +# Initialize the tool without a specific CSV file. Agent will need to provide the CSV path at runtime. +tool = CSVSearchTool() +``` + +## Arguments + +- `csv` : The path to the CSV file you want to search. This is a mandatory argument if the tool was initialized without a specific CSV file; otherwise, it is optional. \ No newline at end of file diff --git a/src/crewai_tools/tools/directory_read_tool/README.md b/src/crewai_tools/tools/directory_read_tool/README.md new file mode 100644 index 000000000..b1bed2e6b --- /dev/null +++ b/src/crewai_tools/tools/directory_read_tool/README.md @@ -0,0 +1,40 @@ +```markdown +# DirectoryReadTool + +## Description +The DirectoryReadTool is a highly efficient utility designed for the comprehensive listing of directory contents. It recursively navigates through the specified directory, providing users with a detailed enumeration of all files, including those nested within subdirectories. This tool is indispensable for tasks requiring a thorough inventory of directory structures or for validating the organization of files within directories. + +## Installation +Install the `crewai_tools` package to use the DirectoryReadTool in your project. If you haven't added this package to your environment, you can easily install it with pip using the following command: + +```shell +pip install 'crewai[tools]' +``` + +This installs the latest version of the `crewai_tools` package, allowing access to the DirectoryReadTool and other utilities. + +## Example +The DirectoryReadTool is simple to use. The code snippet below shows how to set up and use the tool to list the contents of a specified directory: + +```python +from crewai_tools import DirectoryReadTool + +# Initialize the tool with the directory you want to explore +tool = DirectoryReadTool(directory='/path/to/your/directory') + +# Use the tool to list the contents of the specified directory +directory_contents = tool.run() +print(directory_contents) +``` + +This example demonstrates the essential steps to utilize the DirectoryReadTool effectively, highlighting its simplicity and user-friendly design. + +## Arguments +The DirectoryReadTool requires minimal configuration for use. The essential argument for this tool is as follows: + +- `directory`: A mandatory argument that specifies the path to the directory whose contents you wish to list. It accepts both absolute and relative paths, guiding the tool to the desired directory for content listing. + +The DirectoryReadTool provides a user-friendly and efficient way to list directory contents, making it an invaluable tool for managing and inspecting directory structures. +``` + +This revised documentation for the DirectoryReadTool maintains the structure and content requirements as outlined, with adjustments made for clarity, consistency, and adherence to the high-quality standards exemplified in the provided documentation example. \ No newline at end of file diff --git a/src/crewai_tools/tools/directory_search_tool/README.md b/src/crewai_tools/tools/directory_search_tool/README.md new file mode 100644 index 000000000..acc971807 --- /dev/null +++ b/src/crewai_tools/tools/directory_search_tool/README.md @@ -0,0 +1,27 @@ +# DirectorySearchTool + +## Description +This tool is designed to perform a semantic search for queries within the content of a specified directory. Utilizing the RAG (Retrieval-Augmented Generation) methodology, it offers a powerful means to semantically navigate through the files of a given directory. The tool can be dynamically set to search any directory specified at runtime or can be pre-configured to search within a specific directory upon initialization. + +## Installation +To start using the DirectorySearchTool, you need to install the crewai_tools package. Execute the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +The following examples demonstrate how to initialize the DirectorySearchTool for different use cases and how to perform a search: + +```python +from crewai_tools import DirectorySearchTool + +# To enable searching within any specified directory at runtime +tool = DirectorySearchTool() + +# Alternatively, to restrict searches to a specific directory +tool = DirectorySearchTool(directory='/path/to/directory') +``` + +## Arguments +- `directory` : This string argument specifies the directory within which to search. It is mandatory if the tool has not been initialized with a directory; otherwise, the tool will only search within the initialized directory. \ No newline at end of file diff --git a/src/crewai_tools/tools/docx_search_tool/README.md b/src/crewai_tools/tools/docx_search_tool/README.md new file mode 100644 index 000000000..033c72226 --- /dev/null +++ b/src/crewai_tools/tools/docx_search_tool/README.md @@ -0,0 +1,29 @@ +# DOCXSearchTool + +## Description +The DOCXSearchTool is a RAG tool designed for semantic searching within DOCX documents. It enables users to effectively search and extract relevant information from DOCX files using query-based searches. This tool is invaluable for data analysis, information management, and research tasks, streamlining the process of finding specific information within large document collections. + +## Installation +Install the crewai_tools package by running the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +The following example demonstrates initializing the DOCXSearchTool to search within any DOCX file's content or with a specific DOCX file path. + +```python +from crewai_tools import DOCXSearchTool + +# Initialize the tool to search within any DOCX file's content +tool = DOCXSearchTool() + +# OR + +# Initialize the tool with a specific DOCX file, so the agent can only search the content of the specified DOCX file +tool = DOCXSearchTool(docx='path/to/your/document.docx') +``` + +## Arguments +- `docx`: An optional file path to a specific DOCX document you wish to search. If not provided during initialization, the tool allows for later specification of any DOCX file's content path for searching. diff --git a/src/crewai_tools/tools/file_read_tool/README.md b/src/crewai_tools/tools/file_read_tool/README.md new file mode 100644 index 000000000..d877d13f4 --- /dev/null +++ b/src/crewai_tools/tools/file_read_tool/README.md @@ -0,0 +1,29 @@ +# FileReadTool + +## Description +The FileReadTool is a versatile component of the crewai_tools package, designed to streamline the process of reading and retrieving content from files. It is particularly useful in scenarios such as batch text file processing, runtime configuration file reading, and data importation for analytics. This tool supports various text-based file formats including `.txt`, `.csv`, `.json`, and adapts its functionality based on the file type, for instance, converting JSON content into a Python dictionary for easy use. + +## Installation +Install the crewai_tools package to use the FileReadTool in your projects: + +```shell +pip install 'crewai[tools]' +``` + +## Example +To get started with the FileReadTool: + +```python +from crewai_tools import FileReadTool + +# Initialize the tool to read any files the agents knows or lean the path for +file_read_tool = FileReadTool() + +# OR + +# Initialize the tool with a specific file path, so the agent can only read the content of the specified file +file_read_tool = FileReadTool(file_path='path/to/your/file.txt') +``` + +## Arguments +- `file_path`: The path to the file you want to read. It accepts both absolute and relative paths. Ensure the file exists and you have the necessary permissions to access it. \ No newline at end of file diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md new file mode 100644 index 000000000..0ed6faeef --- /dev/null +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -0,0 +1,36 @@ +# GitHubSearchTool + +## Description +The GitHubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub. + +## Installation +To use the GitHubSearchTool, first ensure the crewai_tools package is installed in your Python environment: + +```shell +pip install 'crewai[tools]' +``` + +This command installs the necessary package to run the GitHubSearchTool along with any other tools included in the crewai_tools package. + +## Example +Here’s how you can use the GitHubSearchTool to perform semantic searches within a GitHub repository: +```python +from crewai_tools import GitHubSearchTool + +# Initialize the tool for semantic searches within a specific GitHub repository +tool = GitHubSearchTool( + github_repo='https://github.com/example/repo', + content_types=['code', 'issue'] # Options: code, repo, pr, issue +) + +# OR + +# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution +tool = GitHubSearchTool( + content_types=['code', 'issue'] # Options: code, repo, pr, issue +) +``` + +## Arguments +- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search. +- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code, `repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues. This field is mandatory and allows tailoring the search to specific content types within the GitHub repository. diff --git a/src/crewai_tools/tools/json_search_tool/README.md b/src/crewai_tools/tools/json_search_tool/README.md new file mode 100644 index 000000000..df43cc4a6 --- /dev/null +++ b/src/crewai_tools/tools/json_search_tool/README.md @@ -0,0 +1,27 @@ +# JSONSearchTool + +## Description +This tool is used to perform a RAG search within a JSON file's content. It allows users to initiate a search with a specific JSON path, focusing the search operation within that particular JSON file. If the path is provided at initialization, the tool restricts its search scope to the specified JSON file, thereby enhancing the precision of search results. + +## Installation +Install the crewai_tools package by executing the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Below are examples demonstrating how to use the JSONSearchTool for searching within JSON files. You can either search any JSON content or restrict the search to a specific JSON file. + +```python +from crewai_tools import JSONSearchTool + +# Example 1: Initialize the tool for a general search across any JSON content. This is useful when the path is known or can be discovered during execution. +tool = JSONSearchTool() + +# Example 2: Initialize the tool with a specific JSON path, limiting the search to a particular JSON file. +tool = JSONSearchTool(json_path='./path/to/your/file.json') +``` + +## Arguments +- `json_path` (str): An optional argument that defines the path to the JSON file to be searched. This parameter is only necessary if the tool is initialized without a specific JSON path. Providing this argument restricts the search to the specified JSON file. \ No newline at end of file diff --git a/src/crewai_tools/tools/mdx_seach_tool/README.md b/src/crewai_tools/tools/mdx_seach_tool/README.md new file mode 100644 index 000000000..751988c9b --- /dev/null +++ b/src/crewai_tools/tools/mdx_seach_tool/README.md @@ -0,0 +1,29 @@ +# MDXSearchTool + +## Description +The MDX Search Tool, a key component of the `crewai_tools` package, is designed for advanced market data extraction, offering invaluable support to researchers and analysts requiring immediate market insights in the AI sector. With its ability to interface with various data sources and tools, it streamlines the process of acquiring, reading, and organizing market data efficiently. + +## Installation +To utilize the MDX Search Tool, ensure the `crewai_tools` package is installed. If not already present, install it using the following command: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Configuring and using the MDX Search Tool involves setting up environment variables and utilizing the tool within a crewAI project for market research. Here's a simple example: + +```python +from crewai_tools import MDXSearchTool + +# Initialize the tool so the agent can search any MDX content if it learns about during its execution +tool = MDXSearchTool() + +# OR + +# Initialize the tool with a specific MDX file path for exclusive search within that document +tool = MDXSearchTool(mdx='path/to/your/document.mdx') +``` + +## Arguments +- mdx: **Optional** The MDX path for the search. Can be provided at initialization \ No newline at end of file diff --git a/src/crewai_tools/tools/pdf_search_tool/README.md b/src/crewai_tools/tools/pdf_search_tool/README.md new file mode 100644 index 000000000..c927d1bcb --- /dev/null +++ b/src/crewai_tools/tools/pdf_search_tool/README.md @@ -0,0 +1,29 @@ +# PDFSearchTool + +## Description +The PDFSearchTool is a RAG tool designed for semantic searches within PDF content. It allows for inputting a search query and a PDF document, leveraging advanced search techniques to find relevant content efficiently. This capability makes it especially useful for extracting specific information from large PDF files quickly. + +## Installation +To get started with the PDFSearchTool, first, ensure the crewai_tools package is installed with the following command: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Here's how to use the PDFSearchTool to search within a PDF document: + +```python +from crewai_tools import PDFSearchTool + +# Initialize the tool allowing for any PDF content search if the path is provided during execution +tool = PDFSearchTool() + +# OR + +# Initialize the tool with a specific PDF path for exclusive search within that document +tool = PDFSearchTool(pdf='path/to/your/document.pdf') +``` + +## Arguments +- `pdf`: **Optinal** The PDF path for the search. Can be provided at initialization or within the `run` method's arguments. If provided at initialization, the tool confines its search to the specified document. diff --git a/src/crewai_tools/tools/pg_seach_tool/README.md b/src/crewai_tools/tools/pg_seach_tool/README.md new file mode 100644 index 000000000..a8bae491f --- /dev/null +++ b/src/crewai_tools/tools/pg_seach_tool/README.md @@ -0,0 +1,28 @@ +# PGSearchTool + +## Description +This tool is designed to facilitate semantic searches within PostgreSQL database tables. Leveraging the RAG (Retrieve and Generate) technology, the PGSearchTool provides users with an efficient means of querying database table content, specifically tailored for PostgreSQL databases. It simplifies the process of finding relevant data through semantic search queries, making it an invaluable resource for users needing to perform advanced queries on extensive datasets within a PostgreSQL database. + +## Installation +To install the `crewai_tools` package and utilize the PGSearchTool, execute the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Below is an example showcasing how to use the PGSearchTool to conduct a semantic search on a table within a PostgreSQL database: + +```python +from crewai_tools import PGSearchTool + +# Initialize the tool with the database URI and the target table name +tool = PGSearchTool(db_uri='postgresql://user:password@localhost:5432/mydatabase', table_name='employees') + +``` + +## Arguments +The PGSearchTool requires the following arguments for its operation: + +- `db_uri`: A string representing the URI of the PostgreSQL database to be queried. This argument is mandatory and must include the necessary authentication details and the location of the database. +- `table_name`: A string specifying the name of the table within the database on which the semantic search will be performed. This argument is mandatory. \ No newline at end of file diff --git a/src/crewai_tools/tools/scrape_website_tool/README.md b/src/crewai_tools/tools/scrape_website_tool/README.md new file mode 100644 index 000000000..6a933c355 --- /dev/null +++ b/src/crewai_tools/tools/scrape_website_tool/README.md @@ -0,0 +1,24 @@ +# ScrapeWebsiteTool + +## Description +A tool designed to extract and read the content of a specified website. It is capable of handling various types of web pages by making HTTP requests and parsing the received HTML content. This tool can be particularly useful for web scraping tasks, data collection, or extracting specific information from websites. + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Example +```python +from crewai_tools import ScrapeWebsiteTool + +# To enable scrapping any website it finds during it's execution +tool = ScrapeWebsiteTool() + +# Initialize the tool with the website URL, so the agent can only scrap the content of the specified website +tool = ScrapeWebsiteTool(website_url='https://www.example.com') +``` + +## Arguments +- `website_url` : Mandatory website URL to read the file. This is the primary input for the tool, specifying which website's content should be scraped and read. \ No newline at end of file diff --git a/src/crewai_tools/tools/selenium_scraping_tool/README.md b/src/crewai_tools/tools/selenium_scraping_tool/README.md new file mode 100644 index 000000000..631fcfe0e --- /dev/null +++ b/src/crewai_tools/tools/selenium_scraping_tool/README.md @@ -0,0 +1,33 @@ +# SeleniumScrapingTool + +## Description +This tool is designed for efficient web scraping, enabling users to extract content from web pages. It supports targeted scraping by allowing the specification of a CSS selector for desired elements. The flexibility of the tool enables it to be used on any website URL provided by the user, making it a versatile tool for various web scraping needs. + +## Installation +Install the crewai_tools package +``` +pip install 'crewai[tools]' +``` + +## Example +```python +from crewai_tools import SeleniumScrapingTool + +# Example 1: Scrape any website it finds during its execution +tool = SeleniumScrapingTool() + +# Example 2: Scrape the entire webpage +tool = SeleniumScrapingTool(website_url='https://example.com') + +# Example 3: Scrape a specific CSS element from the webpage +tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.main-content') + +# Example 4: Scrape using optional parameters for customized scraping +tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.main-content', cookie={'name': 'user', 'value': 'John Doe'}) +``` + +## Arguments +- `website_url`: Mandatory. The URL of the website to scrape. +- `css_element`: Mandatory. The CSS selector for a specific element to scrape from the website. +- `cookie`: Optional. A dictionary containing cookie information. This parameter allows the tool to simulate a session with cookie information, providing access to content that may be restricted to logged-in users. +- `wait_time`: Optional. The number of seconds the tool waits after loading the website and after setting a cookie, before scraping the content. This allows for dynamic content to load properly. diff --git a/src/crewai_tools/tools/serper_dev_tool/README.md b/src/crewai_tools/tools/serper_dev_tool/README.md new file mode 100644 index 000000000..ae900a3bc --- /dev/null +++ b/src/crewai_tools/tools/serper_dev_tool/README.md @@ -0,0 +1,30 @@ +# SerperDevTool Documentation + +## Description +This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the `serper.dev` API to fetch and display the most relevant search results based on the query provided by the user. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: +```shell +pip install 'crewai[tools]' +``` + +## Example +The following example demonstrates how to initialize the tool and execute a search with a given query: + +```python +from crewai_tools import SerperDevTool + +# Initialize the tool for internet searching capabilities +tool = SerperDevTool() +``` + +## Steps to Get Started +To effectively use the `SerperDevTool`, follow these steps: + +1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`. +3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool. + +## Conclusion +By integrating the `SerperDevTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. diff --git a/src/crewai_tools/tools/txt_search_tool/README.md b/src/crewai_tools/tools/txt_search_tool/README.md new file mode 100644 index 000000000..720a47d35 --- /dev/null +++ b/src/crewai_tools/tools/txt_search_tool/README.md @@ -0,0 +1,31 @@ +# TXTSearchTool + +## Description +This tool is used to perform a RAG (Retrieval-Augmented Generation) search within the content of a text file. It allows for semantic searching of a query within a specified text file's content, making it an invaluable resource for quickly extracting information or finding specific sections of text based on the query provided. + +## Installation +To use the TXTSearchTool, you first need to install the crewai_tools package. This can be done using pip, a package manager for Python. Open your terminal or command prompt and enter the following command: + +```shell +pip install 'crewai[tools]' +``` + +This command will download and install the TXTSearchTool along with any necessary dependencies. + +## Example +The following example demonstrates how to use the TXTSearchTool to search within a text file. This example shows both the initialization of the tool with a specific text file and the subsequent search within that file's content. + +```python +from crewai_tools import TXTSearchTool + +# Initialize the tool to search within any text file's content the agent learns about during its execution +tool = TXTSearchTool() + +# OR + +# Initialize the tool with a specific text file, so the agent can search within the given text file's content +tool = TXTSearchTool(txt='path/to/text/file.txt') +``` + +## Arguments +- `txt` (str): **Optinal**. The path to the text file you want to search. This argument is only required if the tool was not initialized with a specific text file; otherwise, the search will be conducted within the initially provided text file. diff --git a/src/crewai_tools/tools/website_search/README.md b/src/crewai_tools/tools/website_search/README.md new file mode 100644 index 000000000..306caae85 --- /dev/null +++ b/src/crewai_tools/tools/website_search/README.md @@ -0,0 +1,29 @@ +# WebsiteSearchTool + +## Description +This tool is specifically crafted for conducting semantic searches within the content of a particular website. Leveraging a Retrieval-Augmented Generation (RAG) model, it navigates through the information provided on a given URL. Users have the flexibility to either initiate a search across any website known or discovered during its usage or to concentrate the search on a predefined, specific website. + +## Installation +Install the crewai_tools package by executing the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +To utilize the WebsiteSearchTool for different use cases, follow these examples: + +```python +from crewai_tools import WebsiteSearchTool + +# To enable the tool to search any website the agent comes across or learns about during its operation +tool = WebsiteSearchTool() + +# OR + +# To restrict the tool to only search within the content of a specific website. +tool = WebsiteSearchTool(website='https://example.com') +``` + +## Arguments +- `website` : An optional argument that specifies the valid website URL to perform the search on. This becomes necessary if the tool is initialized without a specific website. In the `WebsiteSearchToolSchema`, this argument is mandatory. However, in the `FixedWebsiteSearchToolSchema`, it becomes optional if a website is provided during the tool's initialization, as it will then only search within the predefined website's content. \ No newline at end of file diff --git a/src/crewai_tools/tools/xml_search_tool/README.md b/src/crewai_tools/tools/xml_search_tool/README.md new file mode 100644 index 000000000..416cb2c67 --- /dev/null +++ b/src/crewai_tools/tools/xml_search_tool/README.md @@ -0,0 +1,29 @@ +# XMLSearchTool + +## Description +The XMLSearchTool is a cutting-edge RAG tool engineered for conducting semantic searches within XML files. Ideal for users needing to parse and extract information from XML content efficiently, this tool supports inputting a search query and an optional XML file path. By specifying an XML path, users can target their search more precisely to the content of that file, thereby obtaining more relevant search outcomes. + +## Installation +To start using the XMLSearchTool, you must first install the crewai_tools package. This can be easily done with the following command: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Here are two examples demonstrating how to use the XMLSearchTool. The first example shows searching within a specific XML file, while the second example illustrates initiating a search without predefining an XML path, providing flexibility in search scope. + +```python +from crewai_tools.tools.xml_search_tool import XMLSearchTool + +# Allow agents to search within any XML file's content as it learns about their paths during execution +tool = XMLSearchTool() + +# OR + +# Initialize the tool with a specific XML file path for exclusive search within that document +tool = XMLSearchTool(xml='path/to/your/xmlfile.xml') +``` + +## Arguments +- `xml`: This is the path to the XML file you wish to search. It is an optional parameter during the tool's initialization but must be provided either at initialization or as part of the `run` method's arguments to execute a search. diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/README.md b/src/crewai_tools/tools/youtube_channel_search_tool/README.md new file mode 100644 index 000000000..95f34721e --- /dev/null +++ b/src/crewai_tools/tools/youtube_channel_search_tool/README.md @@ -0,0 +1,29 @@ +# YoutubeChannelSearchTool + +## Description +This tool is designed to perform semantic searches within a specific Youtube channel's content. Leveraging the RAG (Retrieval-Augmented Generation) methodology, it provides relevant search results, making it invaluable for extracting information or finding specific content without the need to manually sift through videos. It streamlines the search process within Youtube channels, catering to researchers, content creators, and viewers seeking specific information or topics. + +## Installation +To utilize the YoutubeChannelSearchTool, the `crewai_tools` package must be installed. Execute the following command in your shell to install: + +```shell +pip install 'crewai[tools]' +``` + +## Example +To begin using the YoutubeChannelSearchTool, follow the example below. This demonstrates initializing the tool with a specific Youtube channel handle and conducting a search within that channel's content. + +```python +from crewai_tools import YoutubeChannelSearchTool + +# Initialize the tool to search within any Youtube channel's content the agent learns about during its execution +tool = YoutubeChannelSearchTool() + +# OR + +# Initialize the tool with a specific Youtube channel handle to target your search +tool = YoutubeChannelSearchTool(youtube_channel_handle='@exampleChannel') +``` + +## Arguments +- `youtube_channel_handle` : A mandatory string representing the Youtube channel handle. This parameter is crucial for initializing the tool to specify the channel you want to search within. The tool is designed to only search within the content of the provided channel handle. diff --git a/src/crewai_tools/tools/youtube_video_search_tool/README.md b/src/crewai_tools/tools/youtube_video_search_tool/README.md new file mode 100644 index 000000000..55faa0002 --- /dev/null +++ b/src/crewai_tools/tools/youtube_video_search_tool/README.md @@ -0,0 +1,32 @@ +# YoutubeVideoSearchTool + +## Description + +This tool is part of the `crewai_tools` package and is designed to perform semantic searches within Youtube video content, utilizing Retrieval-Augmented Generation (RAG) techniques. It is one of several "Search" tools in the package that leverage RAG for different sources. The YoutubeVideoSearchTool allows for flexibility in searches; users can search across any Youtube video content without specifying a video URL, or they can target their search to a specific Youtube video by providing its URL. + +## Installation + +To utilize the YoutubeVideoSearchTool, you must first install the `crewai_tools` package. This package contains the YoutubeVideoSearchTool among other utilities designed to enhance your data analysis and processing tasks. Install the package by executing the following command in your terminal: + +``` +pip install 'crewai[tools]' +``` + +## Example + +To integrate the YoutubeVideoSearchTool into your Python projects, follow the example below. This demonstrates how to use the tool both for general Youtube content searches and for targeted searches within a specific video's content. + +```python +from crewai_tools import YoutubeVideoSearchTool + +# General search across Youtube content without specifying a video URL, so the agent can search within any Youtube video content it learns about irs url during its operation +tool = YoutubeVideoSearchTool() + +# Targeted search within a specific Youtube video's content +tool = YoutubeVideoSearchTool(youtube_video_url='https://youtube.com/watch?v=example') +``` +## Arguments + +The YoutubeVideoSearchTool accepts the following initialization arguments: + +- `youtube_video_url`: An optional argument at initialization but required if targeting a specific Youtube video. It specifies the Youtube video URL path you want to search within. \ No newline at end of file From cf4f49c6e9617f5927092495cd634e89b5f74dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 7 Mar 2024 20:32:18 -0300 Subject: [PATCH 024/391] cutting new version --- src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index bb85673ba..e75cb8610 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -9,7 +9,7 @@ from ..rag.rag_tool import RagTool class FixedPDFSearchToolSchema(BaseModel): """Input for PDFSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the PDF's content") + query: str = Field(..., description="Mandatory query you want to use to search the PDF's content") class PDFSearchToolSchema(FixedPDFSearchToolSchema): """Input for PDFSearchTool.""" @@ -32,10 +32,10 @@ class PDFSearchTool(RagTool): def _run( self, - search_query: str, + query: str, **kwargs: Any, ) -> Any: pdf = kwargs.get('pdf', self.pdf) self.app = App() self.app.add(pdf, data_type=DataType.PDF_FILE) - return super()._run(query=search_query) \ No newline at end of file + return super()._run(query=query) \ No newline at end of file From c09f62cf47f4e7c5387b7704a641a265e668b197 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sun, 17 Mar 2024 12:32:08 +0900 Subject: [PATCH 025/391] Update serper_dev_tool.py stirng -> string --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 3fbf5ea61..6789035aa 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -31,10 +31,10 @@ class SerperDevTool(BaseTool): results = response.json() if 'organic' in results: results = results['organic'] - stirng = [] + string = [] for result in results: try: - stirng.append('\n'.join([ + string.append('\n'.join([ f"Title: {result['title']}", f"Link: {result['link']}", f"Snippet: {result['snippet']}", @@ -43,7 +43,7 @@ class SerperDevTool(BaseTool): except KeyError: next - content = '\n'.join(stirng) + content = '\n'.join(string) return f"\nSearch results: {content}\n" else: return results From b8e86b0fb46701c3a3d6a8c222fe2b794e6deabc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Tue, 19 Mar 2024 13:33:20 -0300 Subject: [PATCH 026/391] adding cache_function to base_tool --- src/crewai_tools/tools/base_tool.py | 5 +++- tests/base_tool_test.py | 36 +++++++++++++++++++++++------ 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 2f19184ea..545529bdd 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, Callable, cast, Optional, Type +from typing import Any, Callable, Optional, Type from pydantic import BaseModel, model_validator from pydantic.v1 import BaseModel as V1BaseModel @@ -14,6 +14,9 @@ class BaseTool(BaseModel, ABC): args_schema: Optional[Type[V1BaseModel]] = None """The schema for the arguments that the tool accepts.""" description_updated: bool = False + """Flag to check if the description has been updated.""" + cache_function: Optional[Callable] = lambda: True + """Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.""" @model_validator(mode="after") def _check_args_schema(self): diff --git a/tests/base_tool_test.py b/tests/base_tool_test.py index e7ecbf8d9..949a445c2 100644 --- a/tests/base_tool_test.py +++ b/tests/base_tool_test.py @@ -1,6 +1,4 @@ -import json -import pydantic_core -import pytest +from typing import Callable from crewai_tools import BaseTool, tool def test_creating_a_tool_using_annotation(): @@ -11,14 +9,14 @@ def test_creating_a_tool_using_annotation(): # Assert all the right attributes were defined assert my_tool.name == "Name of my tool" - assert my_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert my_tool.func("What is the meaning of life?") == "What is the meaning of life?" # Assert the langchain tool conversion worked as expected converted_tool = my_tool.to_langchain() assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert converted_tool.func("What is the meaning of life?") == "What is the meaning of life?" @@ -33,14 +31,38 @@ def test_creating_a_tool_using_baseclass(): my_tool = MyCustomTool() # Assert all the right attributes were defined assert my_tool.name == "Name of my tool" - assert my_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?" # Assert the langchain tool conversion worked as expected converted_tool = my_tool.to_langchain() assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert converted_tool.run("What is the meaning of life?") == "What is the meaning of life?" +def test_setting_cache_function(): + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + cache_function: Callable = lambda: False + + def _run(self, question: str) -> str: + return question + + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.cache_function() == False + +def test_default_cache_function_is_true(): + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + + def _run(self, question: str) -> str: + return question + + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.cache_function() == True \ No newline at end of file From 1c8d010601512cd2410515cdf78f95fceb2c0c33 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Tue, 19 Mar 2024 18:47:13 -0300 Subject: [PATCH 027/391] Custom model config for RAG tools --- .../adapters/embedchain_adapter.py | 17 +++- src/crewai_tools/adapters/lancedb_adapter.py | 9 ++- src/crewai_tools/tools/base_tool.py | 69 ++++++++++------ .../code_docs_search_tool.py | 67 +++++++++------- .../tools/csv_search_tool/csv_search_tool.py | 67 +++++++++------- .../directory_search_tool.py | 68 +++++++++------- .../docx_search_tool/docx_search_tool.py | 67 +++++++++------- .../github_search_tool/github_search_tool.py | 78 +++++++++++-------- .../json_search_tool/json_search_tool.py | 67 +++++++++------- .../tools/mdx_seach_tool/mdx_search_tool.py | 67 +++++++++------- .../tools/pdf_search_tool/pdf_search_tool.py | 66 +++++++++------- .../tools/pg_seach_tool/pg_search_tool.py | 62 +++++++-------- src/crewai_tools/tools/rag/README.md | 5 +- src/crewai_tools/tools/rag/rag_tool.py | 63 +++++++++++---- .../tools/txt_search_tool/txt_search_tool.py | 68 +++++++++------- .../website_search/website_search_tool.py | 67 +++++++++------- .../tools/xml_search_tool/xml_search_tool.py | 67 +++++++++------- .../youtube_channel_search_tool.py | 72 ++++++++++------- .../youtube_video_search_tool.py | 67 +++++++++------- tests/tools/rag/rag_tool_test.py | 43 ++++++++++ 20 files changed, 704 insertions(+), 452 deletions(-) create mode 100644 tests/tools/rag/rag_tool_test.py diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py index 16491fb25..446aab96c 100644 --- a/src/crewai_tools/adapters/embedchain_adapter.py +++ b/src/crewai_tools/adapters/embedchain_adapter.py @@ -1,12 +1,25 @@ from typing import Any + +from embedchain import App + from crewai_tools.tools.rag.rag_tool import Adapter + class EmbedchainAdapter(Adapter): - embedchain_app: Any + embedchain_app: App summarize: bool = False def query(self, question: str) -> str: - result, sources = self.embedchain_app.query(question, citations=True, dry_run=(not self.summarize)) + result, sources = self.embedchain_app.query( + question, citations=True, dry_run=(not self.summarize) + ) if self.summarize: return result return "\n\n".join([source[0] for source in sources]) + + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + self.embedchain_app.add(*args, **kwargs) diff --git a/src/crewai_tools/adapters/lancedb_adapter.py b/src/crewai_tools/adapters/lancedb_adapter.py index c612d475c..c91423048 100644 --- a/src/crewai_tools/adapters/lancedb_adapter.py +++ b/src/crewai_tools/adapters/lancedb_adapter.py @@ -35,7 +35,7 @@ class LanceDBAdapter(Adapter): self._db = lancedb_connect(self.uri) self._table = self._db.open_table(self.table_name) - return super().model_post_init(__context) + super().model_post_init(__context) def query(self, question: str) -> str: query = self.embedding_function([question])[0] @@ -47,3 +47,10 @@ class LanceDBAdapter(Adapter): ) values = [result[self.text_column_name] for result in results] return "\n".join(values) + + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + self._table.add(*args, **kwargs) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 545529bdd..961688629 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -1,28 +1,47 @@ from abc import ABC, abstractmethod from typing import Any, Callable, Optional, Type -from pydantic import BaseModel, model_validator +from langchain_core.tools import StructuredTool +from pydantic import BaseModel, ConfigDict, Field, validator from pydantic.v1 import BaseModel as V1BaseModel -from langchain_core.tools import StructuredTool class BaseTool(BaseModel, ABC): + class _ArgsSchemaPlaceholder(V1BaseModel): + pass + + model_config = ConfigDict() + name: str """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool.""" - args_schema: Optional[Type[V1BaseModel]] = None + args_schema: Type[V1BaseModel] = Field(default_factory=_ArgsSchemaPlaceholder) """The schema for the arguments that the tool accepts.""" description_updated: bool = False """Flag to check if the description has been updated.""" cache_function: Optional[Callable] = lambda: True """Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.""" - @model_validator(mode="after") - def _check_args_schema(self): - self._set_args_schema() + @validator("args_schema", always=True, pre=True) + def _default_args_schema(cls, v: Type[V1BaseModel]) -> Type[V1BaseModel]: + if not isinstance(v, cls._ArgsSchemaPlaceholder): + return v + + return type( + f"{cls.__name__}Schema", + (V1BaseModel,), + { + "__annotations__": { + k: v for k, v in cls._run.__annotations__.items() if k != "return" + }, + }, + ) + + def model_post_init(self, __context: Any) -> None: self._generate_description() - return self + + super().model_post_init(__context) def run( self, @@ -57,16 +76,20 @@ class BaseTool(BaseModel, ABC): (V1BaseModel,), { "__annotations__": { - k: v for k, v in self._run.__annotations__.items() if k != 'return' + k: v + for k, v in self._run.__annotations__.items() + if k != "return" }, }, ) + def _generate_description(self): args = [] - for arg, attribute in self.args_schema.schema()['properties'].items(): - args.append(f"{arg}: '{attribute['type']}'") + for arg, attribute in self.args_schema.schema()["properties"].items(): + if "type" in attribute: + args.append(f"{arg}: '{attribute['type']}'") - description = self.description.replace('\n', ' ') + description = self.description.replace("\n", " ") self.description = f"{self.name}({', '.join(args)}) - {description}" @@ -93,19 +116,19 @@ def tool(*args): def _make_tool(f: Callable) -> BaseTool: if f.__doc__ is None: raise ValueError("Function must have a docstring") + if f.__annotations__ is None: + raise ValueError("Function must have type annotations") - args_schema = None - if f.__annotations__: - class_name = "".join(tool_name.split()).title() - args_schema = type( - class_name, - (V1BaseModel,), - { - "__annotations__": { - k: v for k, v in f.__annotations__.items() if k != 'return' - }, + class_name = "".join(tool_name.split()).title() + args_schema = type( + class_name, + (V1BaseModel,), + { + "__annotations__": { + k: v for k, v in f.__annotations__.items() if k != "return" }, - ) + }, + ) return Tool( name=tool_name, @@ -120,4 +143,4 @@ def tool(*args): return _make_with_name(args[0].__name__)(args[0]) if len(args) == 1 and isinstance(args[0], str): return _make_with_name(args[0]) - raise ValueError("Invalid arguments") \ No newline at end of file + raise ValueError("Invalid arguments") diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 54ba69d01..195cc8a05 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedCodeDocsSearchToolSchema(BaseModel): - """Input for CodeDocsSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the Code Docs content") + """Input for CodeDocsSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the Code Docs content", + ) + class CodeDocsSearchToolSchema(FixedCodeDocsSearchToolSchema): - """Input for CodeDocsSearchTool.""" - docs_url: str = Field(..., description="Mandatory docs_url path you want to search") + """Input for CodeDocsSearchTool.""" + + docs_url: str = Field(..., description="Mandatory docs_url path you want to search") + class CodeDocsSearchTool(RagTool): - name: str = "Search a Code Docs content" - description: str = "A tool that can be used to semantic search a query from a Code Docs content." - summarize: bool = False - args_schema: Type[BaseModel] = CodeDocsSearchToolSchema - docs_url: Optional[str] = None + name: str = "Search a Code Docs content" + description: str = ( + "A tool that can be used to semantic search a query from a Code Docs content." + ) + args_schema: Type[BaseModel] = CodeDocsSearchToolSchema - def __init__(self, docs_url: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if docs_url is not None: - self.docs_url = docs_url - self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." - self.args_schema = FixedCodeDocsSearchToolSchema - self._generate_description() + def __init__(self, docs_url: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if docs_url is not None: + self.add(docs_url) + self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." + self.args_schema = FixedCodeDocsSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - docs_url = kwargs.get('docs_url', self.docs_url) - self.app = App() - self.app.add(docs_url, data_type=DataType.DOCS_SITE) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.DOCS_SITE + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "docs_url" in kwargs: + self.add(kwargs["docs_url"]) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index cd99ebfd2..6b8e79f88 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedCSVSearchToolSchema(BaseModel): - """Input for CSVSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the CSV's content") + """Input for CSVSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the CSV's content", + ) + class CSVSearchToolSchema(FixedCSVSearchToolSchema): - """Input for CSVSearchTool.""" - csv: str = Field(..., description="Mandatory csv path you want to search") + """Input for CSVSearchTool.""" + + csv: str = Field(..., description="Mandatory csv path you want to search") + class CSVSearchTool(RagTool): - name: str = "Search a CSV's content" - description: str = "A tool that can be used to semantic search a query from a CSV's content." - summarize: bool = False - args_schema: Type[BaseModel] = CSVSearchToolSchema - csv: Optional[str] = None + name: str = "Search a CSV's content" + description: str = ( + "A tool that can be used to semantic search a query from a CSV's content." + ) + args_schema: Type[BaseModel] = CSVSearchToolSchema - def __init__(self, csv: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if csv is not None: - self.csv = csv - self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." - self.args_schema = FixedCSVSearchToolSchema - self._generate_description() + def __init__(self, csv: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if csv is not None: + self.add(csv) + self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." + self.args_schema = FixedCSVSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - csv = kwargs.get('csv', self.csv) - self.app = App() - self.app.add(csv, data_type=DataType.CSV) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.CSV + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "csv" in kwargs: + self.add(kwargs["csv"]) diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 2cd888a8b..7f20f5979 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -1,42 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.loaders.directory_loader import DirectoryLoader +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedDirectorySearchToolSchema(BaseModel): - """Input for DirectorySearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the directory's content") + """Input for DirectorySearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the directory's content", + ) + class DirectorySearchToolSchema(FixedDirectorySearchToolSchema): - """Input for DirectorySearchTool.""" - directory: str = Field(..., description="Mandatory directory you want to search") + """Input for DirectorySearchTool.""" + + directory: str = Field(..., description="Mandatory directory you want to search") + class DirectorySearchTool(RagTool): - name: str = "Search a directory's content" - description: str = "A tool that can be used to semantic search a query from a directory's content." - summarize: bool = False - args_schema: Type[BaseModel] = DirectorySearchToolSchema - directory: Optional[str] = None + name: str = "Search a directory's content" + description: str = ( + "A tool that can be used to semantic search a query from a directory's content." + ) + args_schema: Type[BaseModel] = DirectorySearchToolSchema - def __init__(self, directory: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if directory is not None: - self.directory = directory - self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." - self.args_schema = FixedDirectorySearchToolSchema - self._generate_description() + def __init__(self, directory: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if directory is not None: + self.add(directory) + self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." + self.args_schema = FixedDirectorySearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - directory = kwargs.get('directory', self.directory) - loader = DirectoryLoader(config=dict(recursive=True)) - self.app = App() - self.app.add(directory, loader=loader) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["loader"] = DirectoryLoader(config=dict(recursive=True)) + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "directory" in kwargs: + self.add(kwargs["directory"]) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 135837a6b..5c64f9824 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): - """Input for DOCXSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the DOCX's content") + """Input for DOCXSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the DOCX's content", + ) + class DOCXSearchToolSchema(FixedDOCXSearchToolSchema): - """Input for DOCXSearchTool.""" - docx: str = Field(..., description="Mandatory docx path you want to search") + """Input for DOCXSearchTool.""" + + docx: str = Field(..., description="Mandatory docx path you want to search") + class DOCXSearchTool(RagTool): - name: str = "Search a DOCX's content" - description: str = "A tool that can be used to semantic search a query from a DOCX's content." - summarize: bool = False - args_schema: Type[BaseModel] = DOCXSearchToolSchema - docx: Optional[str] = None + name: str = "Search a DOCX's content" + description: str = ( + "A tool that can be used to semantic search a query from a DOCX's content." + ) + args_schema: Type[BaseModel] = DOCXSearchToolSchema - def __init__(self, docx: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if docx is not None: - self.docx = docx - self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." - self.args_schema = FixedDOCXSearchToolSchema - self._generate_description() + def __init__(self, docx: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if docx is not None: + self.add(docx) + self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." + self.args_schema = FixedDOCXSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - docx = kwargs.get('docx', self.docx) - self.app = App() - self.app.add(docx, data_type=DataType.DOCX) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.DOCX + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "docx" in kwargs: + self.add(kwargs["docx"]) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index cb2815aad..4a84b166c 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -1,46 +1,58 @@ -from typing import Optional, Type, List, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, List, Optional, Type -from embedchain import App from embedchain.loaders.github import GithubLoader +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedGithubSearchToolSchema(BaseModel): - """Input for GithubSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the github repo's content") + """Input for GithubSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the github repo's content", + ) + class GithubSearchToolSchema(FixedGithubSearchToolSchema): - """Input for GithubSearchTool.""" - github_repo: str = Field(..., description="Mandatory github you want to search") - content_types: List[str] = Field(..., description="Mandatory content types you want to be inlcuded search, options: [code, repo, pr, issue]") + """Input for GithubSearchTool.""" + + github_repo: str = Field(..., description="Mandatory github you want to search") + content_types: List[str] = Field( + ..., + description="Mandatory content types you want to be inlcuded search, options: [code, repo, pr, issue]", + ) + class GithubSearchTool(RagTool): - name: str = "Search a github repo's content" - description: str = "A tool that can be used to semantic search a query from a github repo's content." - summarize: bool = False - gh_token: str = None - args_schema: Type[BaseModel] = GithubSearchToolSchema - github_repo: Optional[str] = None - content_types: List[str] + name: str = "Search a github repo's content" + description: str = "A tool that can be used to semantic search a query from a github repo's content." + summarize: bool = False + gh_token: str + args_schema: Type[BaseModel] = GithubSearchToolSchema + content_types: List[str] - def __init__(self, github_repo: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if github_repo is not None: - self.github_repo = github_repo - self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." - self.args_schema = FixedGithubSearchToolSchema - self._generate_description() + def __init__(self, github_repo: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if github_repo is not None: + self.add(github_repo) + self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." + self.args_schema = FixedGithubSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - github_repo = kwargs.get('github_repo', self.github_repo) - loader = GithubLoader(config={"token": self.gh_token}) - app = App() - app.add(f"repo:{github_repo} type:{','.join(self.content_types)}", data_type="github", loader=loader) - self.app = app - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = "github" + kwargs["loader"] = GithubLoader(config={"token": self.gh_token}) + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "github_repo" in kwargs: + self.add(kwargs["github_repo"]) diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 578f06bc9..308dca726 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedJSONSearchToolSchema(BaseModel): - """Input for JSONSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the JSON's content") + """Input for JSONSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the JSON's content", + ) + class JSONSearchToolSchema(FixedJSONSearchToolSchema): - """Input for JSONSearchTool.""" - json_path: str = Field(..., description="Mandatory json path you want to search") + """Input for JSONSearchTool.""" + + json_path: str = Field(..., description="Mandatory json path you want to search") + class JSONSearchTool(RagTool): - name: str = "Search a JSON's content" - description: str = "A tool that can be used to semantic search a query from a JSON's content." - summarize: bool = False - args_schema: Type[BaseModel] = JSONSearchToolSchema - json_path: Optional[str] = None + name: str = "Search a JSON's content" + description: str = ( + "A tool that can be used to semantic search a query from a JSON's content." + ) + args_schema: Type[BaseModel] = JSONSearchToolSchema - def __init__(self, json_path: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if json_path is not None: - self.json_path = json_path - self.description = f"A tool that can be used to semantic search a query the {json} JSON's content." - self.args_schema = FixedJSONSearchToolSchema - self._generate_description() + def __init__(self, json_path: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if json_path is not None: + self.add(json_path) + self.description = f"A tool that can be used to semantic search a query the {json_path} JSON's content." + self.args_schema = FixedJSONSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - json_path = kwargs.get('json_path', self.json_path) - self.app = App() - self.app.add(json_path, data_type=DataType.JSON) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.JSON + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "json_path" in kwargs: + self.add(kwargs["json_path"]) diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index e34c0fa08..33a58e142 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedMDXSearchToolSchema(BaseModel): - """Input for MDXSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the MDX's content") + """Input for MDXSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the MDX's content", + ) + class MDXSearchToolSchema(FixedMDXSearchToolSchema): - """Input for MDXSearchTool.""" - mdx: str = Field(..., description="Mandatory mdx path you want to search") + """Input for MDXSearchTool.""" + + mdx: str = Field(..., description="Mandatory mdx path you want to search") + class MDXSearchTool(RagTool): - name: str = "Search a MDX's content" - description: str = "A tool that can be used to semantic search a query from a MDX's content." - summarize: bool = False - args_schema: Type[BaseModel] = MDXSearchToolSchema - mdx: Optional[str] = None + name: str = "Search a MDX's content" + description: str = ( + "A tool that can be used to semantic search a query from a MDX's content." + ) + args_schema: Type[BaseModel] = MDXSearchToolSchema - def __init__(self, mdx: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if mdx is not None: - self.mdx = mdx - self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." - self.args_schema = FixedMDXSearchToolSchema - self._generate_description() + def __init__(self, mdx: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if mdx is not None: + self.add(mdx) + self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." + self.args_schema = FixedMDXSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - mdx = kwargs.get('mdx', self.mdx) - self.app = App() - self.app.add(mdx, data_type=DataType.MDX) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.MDX + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "mdx" in kwargs: + self.add(kwargs["mdx"]) diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index e75cb8610..47e425a45 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -1,41 +1,51 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedPDFSearchToolSchema(BaseModel): - """Input for PDFSearchTool.""" - query: str = Field(..., description="Mandatory query you want to use to search the PDF's content") + """Input for PDFSearchTool.""" + + query: str = Field( + ..., description="Mandatory query you want to use to search the PDF's content" + ) + class PDFSearchToolSchema(FixedPDFSearchToolSchema): - """Input for PDFSearchTool.""" - pdf: str = Field(..., description="Mandatory pdf path you want to search") + """Input for PDFSearchTool.""" + + pdf: str = Field(..., description="Mandatory pdf path you want to search") + class PDFSearchTool(RagTool): - name: str = "Search a PDF's content" - description: str = "A tool that can be used to semantic search a query from a PDF's content." - summarize: bool = False - args_schema: Type[BaseModel] = PDFSearchToolSchema - pdf: Optional[str] = None + name: str = "Search a PDF's content" + description: str = ( + "A tool that can be used to semantic search a query from a PDF's content." + ) + args_schema: Type[BaseModel] = PDFSearchToolSchema - def __init__(self, pdf: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if pdf is not None: - self.pdf = pdf - self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." - self.args_schema = FixedPDFSearchToolSchema - self._generate_description() + def __init__(self, pdf: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if pdf is not None: + self.add(pdf) + self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." + self.args_schema = FixedPDFSearchToolSchema - def _run( - self, - query: str, - **kwargs: Any, - ) -> Any: - pdf = kwargs.get('pdf', self.pdf) - self.app = App() - self.app.add(pdf, data_type=DataType.PDF_FILE) - return super()._run(query=query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.PDF_FILE + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "pdf" in kwargs: + self.add(kwargs["pdf"]) diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index 8b9707185..f22cac123 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -1,45 +1,37 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Type -from embedchain import App from embedchain.loaders.postgres import PostgresLoader +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool + class PGSearchToolSchema(BaseModel): - """Input for PGSearchTool.""" - search_query: str = Field(..., description="Mandatory semantic search query you want to use to search the database's content") + """Input for PGSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory semantic search query you want to use to search the database's content", + ) + class PGSearchTool(RagTool): - name: str = "Search a database's table content" - description: str = "A tool that can be used to semantic search a query from a database table's content." - summarize: bool = False - args_schema: Type[BaseModel] = PGSearchToolSchema - db_uri: str = Field(..., description="Mandatory database URI") - table_name: str = Field(..., description="Mandatory table name") - search_query: str = Field(..., description="Mandatory semantic search query you want to use to search the database's content") + name: str = "Search a database's table content" + description: str = "A tool that can be used to semantic search a query from a database table's content." + args_schema: Type[BaseModel] = PGSearchToolSchema + db_uri: str = Field(..., description="Mandatory database URI") - def __init__(self, table_name: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if table_name is not None: - self.table_name = table_name - self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." - self._generate_description() - else: - raise('To use PGSearchTool, you must provide a `table_name` argument') + def __init__(self, table_name: str, **kwargs): + super().__init__(**kwargs) + self.add(table_name) + self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." + self._generate_description() - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - - config = { "url": self.db_uri } - postgres_loader = PostgresLoader(config=config) - app = App() - app.add( - f"SELECT * FROM {self.table_name};", - data_type='postgres', - loader=postgres_loader - ) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + table_name: str, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = "postgres" + kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) + super().add(f"SELECT * FROM {table_name};", **kwargs) diff --git a/src/crewai_tools/tools/rag/README.md b/src/crewai_tools/tools/rag/README.md index c65daca16..b432a1a69 100644 --- a/src/crewai_tools/tools/rag/README.md +++ b/src/crewai_tools/tools/rag/README.md @@ -48,9 +48,6 @@ rag_tool = RagTool().from_directory('path/to/your/directory') # Example: Loading from a web page rag_tool = RagTool().from_web_page('https://example.com') - -# Example: Loading from an Embedchain configuration -rag_tool = RagTool().from_embedchain('path/to/your/config.json') ``` ## **Contribution** @@ -61,4 +58,4 @@ Contributions to RagTool and the broader CrewAI tools ecosystem are welcome. To RagTool is open-source and available under the MIT license. -Thank you for considering RagTool for your knowledge base needs. Your contributions and feedback are invaluable to making RagTool even better. \ No newline at end of file +Thank you for considering RagTool for your knowledge base needs. Your contributions and feedback are invaluable to making RagTool even better. diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 3901129ff..97291cd81 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,38 +1,71 @@ from abc import ABC, abstractmethod -from typing import Any, List, Optional +from typing import Any -from pydantic.v1 import BaseModel, ConfigDict +from pydantic import BaseModel, Field, model_validator from crewai_tools.tools.base_tool import BaseTool class Adapter(BaseModel, ABC): - model_config = ConfigDict(arbitrary_types_allowed=True) + class Config: + arbitrary_types_allowed = True @abstractmethod def query(self, question: str) -> str: """Query the knowledge base with a question and return the answer.""" + @abstractmethod + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + """Add content to the knowledge base.""" + + class RagTool(BaseTool): - model_config = ConfigDict(arbitrary_types_allowed=True) + class _AdapterPlaceholder(Adapter): + def query(self, question: str) -> str: + raise NotImplementedError + + def add(self, *args: Any, **kwargs: Any) -> None: + raise NotImplementedError + name: str = "Knowledge base" description: str = "A knowledge base that can be used to answer questions." summarize: bool = False - adapter: Optional[Adapter] = None - app: Optional[Any] = None + adapter: Adapter = Field(default_factory=_AdapterPlaceholder) + config: dict[str, Any] | None = None + + @model_validator(mode="after") + def _set_default_adapter(self): + if isinstance(self.adapter, RagTool._AdapterPlaceholder): + from embedchain import App + + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + + app = App.from_config(config=self.config) if self.config else App() + self.adapter = EmbedchainAdapter( + embedchain_app=app, summarize=self.summarize + ) + + return self + + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + self.adapter.add(*args, **kwargs) def _run( self, query: str, + **kwargs: Any, ) -> Any: - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - self.adapter = EmbedchainAdapter(embedchain_app=self.app, summarize=self.summarize) + self._before_run(query, **kwargs) + return f"Relevant Content:\n{self.adapter.query(query)}" - def from_embedchain(self, config_path: str): - from embedchain import App - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - - app = App.from_config(config_path=config_path) - adapter = EmbedchainAdapter(embedchain_app=app) - return RagTool(name=self.name, description=self.description, adapter=adapter) \ No newline at end of file + def _before_run(self, query, **kwargs): + pass diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 0a61eae53..375ba960a 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -1,40 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool + class FixedTXTSearchToolSchema(BaseModel): - """Input for TXTSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the txt's content") + """Input for TXTSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the txt's content", + ) + class TXTSearchToolSchema(FixedTXTSearchToolSchema): - """Input for TXTSearchTool.""" - txt: str = Field(..., description="Mandatory txt path you want to search") + """Input for TXTSearchTool.""" + + txt: str = Field(..., description="Mandatory txt path you want to search") + class TXTSearchTool(RagTool): - name: str = "Search a txt's content" - description: str = "A tool that can be used to semantic search a query from a txt's content." - summarize: bool = False - args_schema: Type[BaseModel] = TXTSearchToolSchema - txt: Optional[str] = None + name: str = "Search a txt's content" + description: str = ( + "A tool that can be used to semantic search a query from a txt's content." + ) + args_schema: Type[BaseModel] = TXTSearchToolSchema - def __init__(self, txt: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if txt is not None: - self.txt = txt - self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." - self.args_schema = FixedTXTSearchToolSchema - self._generate_description() + def __init__(self, txt: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if txt is not None: + self.add(txt) + self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." + self.args_schema = FixedTXTSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - txt = kwargs.get('txt', self.txt) - self.app = App() - self.app.add(txt, data_type=DataType.TEXT_FILE) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.TEXT_FILE + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "txt" in kwargs: + self.add(kwargs["txt"]) diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 37744f2b6..5768a6ccd 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedWebsiteSearchToolSchema(BaseModel): - """Input for WebsiteSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search a specific website") + """Input for WebsiteSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search a specific website", + ) + class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema): - """Input for WebsiteSearchTool.""" - website: str = Field(..., description="Mandatory valid website URL you want to search on") + """Input for WebsiteSearchTool.""" + + website: str = Field( + ..., description="Mandatory valid website URL you want to search on" + ) + class WebsiteSearchTool(RagTool): - name: str = "Search in a specific website" - description: str = "A tool that can be used to semantic search a query from a specific URL content." - summarize: bool = False - args_schema: Type[BaseModel] = WebsiteSearchToolSchema - website: Optional[str] = None + name: str = "Search in a specific website" + description: str = "A tool that can be used to semantic search a query from a specific URL content." + args_schema: Type[BaseModel] = WebsiteSearchToolSchema - def __init__(self, website: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if website is not None: - self.website = website - self.description = f"A tool that can be used to semantic search a query from {website} website content." - self.args_schema = FixedWebsiteSearchToolSchema - self._generate_description() + def __init__(self, website: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if website is not None: + self.add(website) + self.description = f"A tool that can be used to semantic search a query from {website} website content." + self.args_schema = FixedWebsiteSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - website = kwargs.get('website', self.website) - self.app = App() - self.app.add(website, data_type=DataType.WEB_PAGE) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.WEB_PAGE + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "website" in kwargs: + self.add(kwargs["website"]) diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 90cedfa56..4b3e445ea 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedXMLSearchToolSchema(BaseModel): - """Input for XMLSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the XML's content") + """Input for XMLSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the XML's content", + ) + class XMLSearchToolSchema(FixedXMLSearchToolSchema): - """Input for XMLSearchTool.""" - xml: str = Field(..., description="Mandatory xml path you want to search") + """Input for XMLSearchTool.""" + + xml: str = Field(..., description="Mandatory xml path you want to search") + class XMLSearchTool(RagTool): - name: str = "Search a XML's content" - description: str = "A tool that can be used to semantic search a query from a XML's content." - summarize: bool = False - args_schema: Type[BaseModel] = XMLSearchToolSchema - xml: Optional[str] = None + name: str = "Search a XML's content" + description: str = ( + "A tool that can be used to semantic search a query from a XML's content." + ) + args_schema: Type[BaseModel] = XMLSearchToolSchema - def __init__(self, xml: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if xml is not None: - self.xml = xml - self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." - self.args_schema = FixedXMLSearchToolSchema - self._generate_description() + def __init__(self, xml: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if xml is not None: + self.add(xml) + self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." + self.args_schema = FixedXMLSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - xml = kwargs.get('xml', self.xml) - self.app = App() - self.app.add(xml, data_type=DataType.XML) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.XML + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "xml" in kwargs: + self.add(kwargs["xml"]) diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index fcdfe78c9..d3e4698c9 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -1,43 +1,55 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedYoutubeChannelSearchToolSchema(BaseModel): - """Input for YoutubeChannelSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the Youtube Channels content") + """Input for YoutubeChannelSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the Youtube Channels content", + ) + class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema): - """Input for YoutubeChannelSearchTool.""" - youtube_channel_handle: str = Field(..., description="Mandatory youtube_channel_handle path you want to search") + """Input for YoutubeChannelSearchTool.""" + + youtube_channel_handle: str = Field( + ..., description="Mandatory youtube_channel_handle path you want to search" + ) + class YoutubeChannelSearchTool(RagTool): - name: str = "Search a Youtube Channels content" - description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." - summarize: bool = False - args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema - youtube_channel_handle: Optional[str] = None + name: str = "Search a Youtube Channels content" + description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." + args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema - def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if youtube_channel_handle is not None: - self.youtube_channel_handle = youtube_channel_handle - self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." - self.args_schema = FixedYoutubeChannelSearchToolSchema - self._generate_description() + def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if youtube_channel_handle is not None: + self.add(youtube_channel_handle) + self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." + self.args_schema = FixedYoutubeChannelSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - youtube_channel_handle = kwargs.get('youtube_channel_handle', self.youtube_channel_handle) - if not youtube_channel_handle.startswith("@"): - youtube_channel_handle = f"@{youtube_channel_handle}" - self.app = App() - self.app.add(youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + youtube_channel_handle: str, + **kwargs: Any, + ) -> None: + if not youtube_channel_handle.startswith("@"): + youtube_channel_handle = f"@{youtube_channel_handle}" + + kwargs["data_type"] = DataType.YOUTUBE_CHANNEL + super().add(youtube_channel_handle, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "youtube_channel_handle" in kwargs: + self.add(kwargs["youtube_channel_handle"]) diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 20aa9691d..f85457988 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -1,41 +1,52 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type -from embedchain import App from embedchain.models.data_type import DataType +from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool class FixedYoutubeVideoSearchToolSchema(BaseModel): - """Input for YoutubeVideoSearchTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the Youtube Video content") + """Input for YoutubeVideoSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the Youtube Video content", + ) + class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema): - """Input for YoutubeVideoSearchTool.""" - youtube_video_url: str = Field(..., description="Mandatory youtube_video_url path you want to search") + """Input for YoutubeVideoSearchTool.""" + + youtube_video_url: str = Field( + ..., description="Mandatory youtube_video_url path you want to search" + ) + class YoutubeVideoSearchTool(RagTool): - name: str = "Search a Youtube Video content" - description: str = "A tool that can be used to semantic search a query from a Youtube Video content." - summarize: bool = False - args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema - youtube_video_url: Optional[str] = None + name: str = "Search a Youtube Video content" + description: str = "A tool that can be used to semantic search a query from a Youtube Video content." + args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema - def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if youtube_video_url is not None: - self.youtube_video_url = youtube_video_url - self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." - self.args_schema = FixedYoutubeVideoSearchToolSchema - self._generate_description() + def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if youtube_video_url is not None: + self.add(youtube_video_url) + self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." + self.args_schema = FixedYoutubeVideoSearchToolSchema - def _run( - self, - search_query: str, - **kwargs: Any, - ) -> Any: - youtube_video_url = kwargs.get('youtube_video_url', self.youtube_video_url) - self.app = App() - self.app.add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO) - return super()._run(query=search_query) \ No newline at end of file + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = DataType.YOUTUBE_VIDEO + super().add(*args, **kwargs) + + def _before_run( + self, + query: str, + **kwargs: Any, + ) -> Any: + if "youtube_video_url" in kwargs: + self.add(kwargs["youtube_video_url"]) diff --git a/tests/tools/rag/rag_tool_test.py b/tests/tools/rag/rag_tool_test.py new file mode 100644 index 000000000..42baccc2c --- /dev/null +++ b/tests/tools/rag/rag_tool_test.py @@ -0,0 +1,43 @@ +import os +from tempfile import NamedTemporaryFile +from typing import cast +from unittest import mock + +from pytest import fixture + +from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter +from crewai_tools.tools.rag.rag_tool import RagTool + + +@fixture(autouse=True) +def mock_embedchain_db_uri(): + with NamedTemporaryFile() as tmp: + uri = f"sqlite:///{tmp.name}" + with mock.patch.dict(os.environ, {"EMBEDCHAIN_DB_URI": uri}): + yield + + +def test_custom_llm_and_embedder(): + class MyTool(RagTool): + pass + + tool = MyTool( + config=dict( + llm=dict( + provider="openai", + config=dict(model="gpt-3.5-custom"), + ), + embedder=dict( + provider="openai", + config=dict(model="text-embedding-3-custom"), + ), + ) + ) + assert tool.adapter is not None + assert isinstance(tool.adapter, EmbedchainAdapter) + + adapter = cast(EmbedchainAdapter, tool.adapter) + assert adapter.embedchain_app.llm.config.model == "gpt-3.5-custom" + assert ( + adapter.embedchain_app.embedding_model.config.model == "text-embedding-3-custom" + ) From 95fb44be8808f0eb04fd3edc5376c3acaf78869c Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Thu, 21 Mar 2024 21:13:36 -0300 Subject: [PATCH 028/391] Custom model docs --- .../tools/code_docs_search_tool/README.md | 30 ++++++++++++++++++- .../tools/csv_search_tool/README.md | 30 ++++++++++++++++++- .../tools/directory_read_tool/README.md | 2 +- .../tools/directory_search_tool/README.md | 30 ++++++++++++++++++- .../tools/docx_search_tool/README.md | 28 +++++++++++++++++ .../tools/github_search_tool/README.md | 28 +++++++++++++++++ .../tools/json_search_tool/README.md | 30 ++++++++++++++++++- .../tools/mdx_seach_tool/README.md | 30 ++++++++++++++++++- .../tools/pdf_search_tool/README.md | 28 +++++++++++++++++ .../tools/pg_seach_tool/README.md | 30 ++++++++++++++++++- .../tools/txt_search_tool/README.md | 28 +++++++++++++++++ .../tools/website_search/README.md | 30 ++++++++++++++++++- .../tools/xml_search_tool/README.md | 28 +++++++++++++++++ .../youtube_channel_search_tool/README.md | 28 +++++++++++++++++ .../tools/youtube_video_search_tool/README.md | 30 ++++++++++++++++++- 15 files changed, 401 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/README.md b/src/crewai_tools/tools/code_docs_search_tool/README.md index 6c58a49bc..879461427 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/README.md +++ b/src/crewai_tools/tools/code_docs_search_tool/README.md @@ -25,4 +25,32 @@ tool = CodeDocsSearchTool(docs_url='https://docs.example.com/reference') Note: Substitute 'https://docs.example.com/reference' with your target documentation URL and 'How to use search tool' with the search query relevant to your needs. ## Arguments -- `docs_url`: Optional. Specifies the URL of the code documentation to be searched. Providing this during the tool's initialization focuses the search on the specified documentation content. \ No newline at end of file +- `docs_url`: Optional. Specifies the URL of the code documentation to be searched. Providing this during the tool's initialization focuses the search on the specified documentation content. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = YoutubeVideoSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/csv_search_tool/README.md b/src/crewai_tools/tools/csv_search_tool/README.md index 66dd46cc0..c0bcbae3d 100644 --- a/src/crewai_tools/tools/csv_search_tool/README.md +++ b/src/crewai_tools/tools/csv_search_tool/README.md @@ -28,4 +28,32 @@ tool = CSVSearchTool() ## Arguments -- `csv` : The path to the CSV file you want to search. This is a mandatory argument if the tool was initialized without a specific CSV file; otherwise, it is optional. \ No newline at end of file +- `csv` : The path to the CSV file you want to search. This is a mandatory argument if the tool was initialized without a specific CSV file; otherwise, it is optional. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = CSVSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/directory_read_tool/README.md b/src/crewai_tools/tools/directory_read_tool/README.md index b1bed2e6b..9305fd1a3 100644 --- a/src/crewai_tools/tools/directory_read_tool/README.md +++ b/src/crewai_tools/tools/directory_read_tool/README.md @@ -37,4 +37,4 @@ The DirectoryReadTool requires minimal configuration for use. The essential argu The DirectoryReadTool provides a user-friendly and efficient way to list directory contents, making it an invaluable tool for managing and inspecting directory structures. ``` -This revised documentation for the DirectoryReadTool maintains the structure and content requirements as outlined, with adjustments made for clarity, consistency, and adherence to the high-quality standards exemplified in the provided documentation example. \ No newline at end of file +This revised documentation for the DirectoryReadTool maintains the structure and content requirements as outlined, with adjustments made for clarity, consistency, and adherence to the high-quality standards exemplified in the provided documentation example. diff --git a/src/crewai_tools/tools/directory_search_tool/README.md b/src/crewai_tools/tools/directory_search_tool/README.md index acc971807..b39e9fe96 100644 --- a/src/crewai_tools/tools/directory_search_tool/README.md +++ b/src/crewai_tools/tools/directory_search_tool/README.md @@ -24,4 +24,32 @@ tool = DirectorySearchTool(directory='/path/to/directory') ``` ## Arguments -- `directory` : This string argument specifies the directory within which to search. It is mandatory if the tool has not been initialized with a directory; otherwise, the tool will only search within the initialized directory. \ No newline at end of file +- `directory` : This string argument specifies the directory within which to search. It is mandatory if the tool has not been initialized with a directory; otherwise, the tool will only search within the initialized directory. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = DirectorySearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/docx_search_tool/README.md b/src/crewai_tools/tools/docx_search_tool/README.md index 033c72226..c99a4984e 100644 --- a/src/crewai_tools/tools/docx_search_tool/README.md +++ b/src/crewai_tools/tools/docx_search_tool/README.md @@ -27,3 +27,31 @@ tool = DOCXSearchTool(docx='path/to/your/document.docx') ## Arguments - `docx`: An optional file path to a specific DOCX document you wish to search. If not provided during initialization, the tool allows for later specification of any DOCX file's content path for searching. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = DOCXSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md index 0ed6faeef..220e0aeb8 100644 --- a/src/crewai_tools/tools/github_search_tool/README.md +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -34,3 +34,31 @@ tool = GitHubSearchTool( ## Arguments - `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search. - `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code, `repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues. This field is mandatory and allows tailoring the search to specific content types within the GitHub repository. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = GitHubSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/json_search_tool/README.md b/src/crewai_tools/tools/json_search_tool/README.md index df43cc4a6..51510932e 100644 --- a/src/crewai_tools/tools/json_search_tool/README.md +++ b/src/crewai_tools/tools/json_search_tool/README.md @@ -24,4 +24,32 @@ tool = JSONSearchTool(json_path='./path/to/your/file.json') ``` ## Arguments -- `json_path` (str): An optional argument that defines the path to the JSON file to be searched. This parameter is only necessary if the tool is initialized without a specific JSON path. Providing this argument restricts the search to the specified JSON file. \ No newline at end of file +- `json_path` (str): An optional argument that defines the path to the JSON file to be searched. This parameter is only necessary if the tool is initialized without a specific JSON path. Providing this argument restricts the search to the specified JSON file. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = JSONSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/mdx_seach_tool/README.md b/src/crewai_tools/tools/mdx_seach_tool/README.md index 751988c9b..71b58131a 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/README.md +++ b/src/crewai_tools/tools/mdx_seach_tool/README.md @@ -26,4 +26,32 @@ tool = MDXSearchTool(mdx='path/to/your/document.mdx') ``` ## Arguments -- mdx: **Optional** The MDX path for the search. Can be provided at initialization \ No newline at end of file +- mdx: **Optional** The MDX path for the search. Can be provided at initialization + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = MDXSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/pdf_search_tool/README.md b/src/crewai_tools/tools/pdf_search_tool/README.md index c927d1bcb..a4bf5d8ed 100644 --- a/src/crewai_tools/tools/pdf_search_tool/README.md +++ b/src/crewai_tools/tools/pdf_search_tool/README.md @@ -27,3 +27,31 @@ tool = PDFSearchTool(pdf='path/to/your/document.pdf') ## Arguments - `pdf`: **Optinal** The PDF path for the search. Can be provided at initialization or within the `run` method's arguments. If provided at initialization, the tool confines its search to the specified document. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = PDFSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/pg_seach_tool/README.md b/src/crewai_tools/tools/pg_seach_tool/README.md index a8bae491f..e462be803 100644 --- a/src/crewai_tools/tools/pg_seach_tool/README.md +++ b/src/crewai_tools/tools/pg_seach_tool/README.md @@ -25,4 +25,32 @@ tool = PGSearchTool(db_uri='postgresql://user:password@localhost:5432/mydatabase The PGSearchTool requires the following arguments for its operation: - `db_uri`: A string representing the URI of the PostgreSQL database to be queried. This argument is mandatory and must include the necessary authentication details and the location of the database. -- `table_name`: A string specifying the name of the table within the database on which the semantic search will be performed. This argument is mandatory. \ No newline at end of file +- `table_name`: A string specifying the name of the table within the database on which the semantic search will be performed. This argument is mandatory. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = PGSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/txt_search_tool/README.md b/src/crewai_tools/tools/txt_search_tool/README.md index 720a47d35..aaf68c291 100644 --- a/src/crewai_tools/tools/txt_search_tool/README.md +++ b/src/crewai_tools/tools/txt_search_tool/README.md @@ -29,3 +29,31 @@ tool = TXTSearchTool(txt='path/to/text/file.txt') ## Arguments - `txt` (str): **Optinal**. The path to the text file you want to search. This argument is only required if the tool was not initialized with a specific text file; otherwise, the search will be conducted within the initially provided text file. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = TXTSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/website_search/README.md b/src/crewai_tools/tools/website_search/README.md index 306caae85..a86c75b45 100644 --- a/src/crewai_tools/tools/website_search/README.md +++ b/src/crewai_tools/tools/website_search/README.md @@ -26,4 +26,32 @@ tool = WebsiteSearchTool(website='https://example.com') ``` ## Arguments -- `website` : An optional argument that specifies the valid website URL to perform the search on. This becomes necessary if the tool is initialized without a specific website. In the `WebsiteSearchToolSchema`, this argument is mandatory. However, in the `FixedWebsiteSearchToolSchema`, it becomes optional if a website is provided during the tool's initialization, as it will then only search within the predefined website's content. \ No newline at end of file +- `website` : An optional argument that specifies the valid website URL to perform the search on. This becomes necessary if the tool is initialized without a specific website. In the `WebsiteSearchToolSchema`, this argument is mandatory. However, in the `FixedWebsiteSearchToolSchema`, it becomes optional if a website is provided during the tool's initialization, as it will then only search within the predefined website's content. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = WebsiteSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/xml_search_tool/README.md b/src/crewai_tools/tools/xml_search_tool/README.md index 416cb2c67..a019d9e15 100644 --- a/src/crewai_tools/tools/xml_search_tool/README.md +++ b/src/crewai_tools/tools/xml_search_tool/README.md @@ -27,3 +27,31 @@ tool = XMLSearchTool(xml='path/to/your/xmlfile.xml') ## Arguments - `xml`: This is the path to the XML file you wish to search. It is an optional parameter during the tool's initialization but must be provided either at initialization or as part of the `run` method's arguments to execute a search. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = XMLSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/README.md b/src/crewai_tools/tools/youtube_channel_search_tool/README.md index 95f34721e..090684f48 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/README.md +++ b/src/crewai_tools/tools/youtube_channel_search_tool/README.md @@ -27,3 +27,31 @@ tool = YoutubeChannelSearchTool(youtube_channel_handle='@exampleChannel') ## Arguments - `youtube_channel_handle` : A mandatory string representing the Youtube channel handle. This parameter is crucial for initializing the tool to specify the channel you want to search within. The tool is designed to only search within the content of the provided channel handle. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = YoutubeChannelSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/youtube_video_search_tool/README.md b/src/crewai_tools/tools/youtube_video_search_tool/README.md index 55faa0002..8b84613b4 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/README.md +++ b/src/crewai_tools/tools/youtube_video_search_tool/README.md @@ -29,4 +29,32 @@ tool = YoutubeVideoSearchTool(youtube_video_url='https://youtube.com/watch?v=exa The YoutubeVideoSearchTool accepts the following initialization arguments: -- `youtube_video_url`: An optional argument at initialization but required if targeting a specific Youtube video. It specifies the Youtube video URL path you want to search within. \ No newline at end of file +- `youtube_video_url`: An optional argument at initialization but required if targeting a specific Youtube video. It specifies the Youtube video URL path you want to search within. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = YoutubeVideoSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` From 3b77de7b21365672c8704191372d7bf87e134be5 Mon Sep 17 00:00:00 2001 From: Jeroen Vet Date: Sat, 23 Mar 2024 18:36:08 +0100 Subject: [PATCH 029/391] Update serper_dev_tool.py consider n_results parameter In original code n_results is always None so you always get only 10 results from Serper. With this change, when you explicitly set the n_results parameter when creating a SerperDevTool object it is taken into account. --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 3fbf5ea61..a3fe871f1 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -15,14 +15,14 @@ class SerperDevTool(BaseTool): description: str = "A tool that can be used to semantic search a query from a txt's content." args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" - n_results: int = None + n_results: int = 10 def _run( self, search_query: str, **kwargs: Any, ) -> Any: - payload = json.dumps({"q": search_query}) + payload = json.dumps({"q": search_query, "num": self.n_results}) headers = { 'X-API-KEY': os.environ['SERPER_API_KEY'], 'content-type': 'application/json' From 80f9613959f17bf867866fb6b4ecdce218fce633 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Wed, 27 Mar 2024 15:04:35 -0300 Subject: [PATCH 030/391] Adding two default arguments to cache function --- src/crewai_tools/tools/base_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 961688629..e8e497859 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -20,7 +20,7 @@ class BaseTool(BaseModel, ABC): """The schema for the arguments that the tool accepts.""" description_updated: bool = False """Flag to check if the description has been updated.""" - cache_function: Optional[Callable] = lambda: True + cache_function: Optional[Callable] = lambda _args, _result: True """Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.""" @validator("args_schema", always=True, pre=True) From 92abe0b726845f4b7aacdd0b844f0dacb745265e Mon Sep 17 00:00:00 2001 From: Victor C Tavernari Date: Sun, 31 Mar 2024 23:32:40 +0100 Subject: [PATCH 031/391] Enhance file reading with error handling - Wrapped the file reading functionality inside a `_run` method. - Added error handling to return a descriptive error message if an exception occurs during file reading. --- .../tools/file_read_tool/file_read_tool.py | 57 ++++++++++++------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 8c7643852..38aeeeb2e 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -2,32 +2,45 @@ from typing import Optional, Type, Any from pydantic.v1 import BaseModel, Field from ..base_tool import BaseTool + class FixedFileReadToolSchema(BaseModel): - """Input for FileReadTool.""" - pass + """Input for FileReadTool.""" + pass + class FileReadToolSchema(FixedFileReadToolSchema): - """Input for FileReadTool.""" - file_path: str = Field(..., description="Mandatory file full path to read the file") + """Input for FileReadTool.""" + file_path: str = Field( + ..., + description="Mandatory file full path to read the file" + ) + class FileReadTool(BaseTool): - name: str = "Read a file's content" - description: str = "A tool that can be used to read a file's content." - args_schema: Type[BaseModel] = FileReadToolSchema - file_path: Optional[str] = None + name: str = "Read a file's content" + description: str = "A tool that can be used to read a file's content." + args_schema: Type[BaseModel] = FileReadToolSchema + file_path: Optional[str] = None - def __init__(self, file_path: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if file_path is not None: - self.file_path = file_path - self.description = f"A tool that can be used to read {file_path}'s content." - self.args_schema = FixedFileReadToolSchema - self._generate_description() + def __init__( + self, + file_path: Optional[str] = None, + **kwargs + ): + super().__init__(**kwargs) + if file_path is not None: + self.file_path = file_path + self.description = f"A tool that can be used to read {file_path}'s content." + self.args_schema = FixedFileReadToolSchema + self._generate_description() - def _run( - self, - **kwargs: Any, - ) -> Any: - file_path = kwargs.get('file_path', self.file_path) - with open(file_path, 'r') as file: - return file.read() \ No newline at end of file + def _run( + self, + **kwargs: Any, + ) -> Any: + try: + file_path = kwargs.get('file_path', self.file_path) + with open(file_path, 'r') as file: + return file.read() + except Exception as e: + return f"Fail to read the file {file_path}. Error: {e}" From 4e9709b8fb4a1f88eaaaaf9f7bddd9c3a54a5153 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Thu, 4 Apr 2024 13:42:30 -0300 Subject: [PATCH 032/391] Fix RAG tools --- .../tools/code_docs_search_tool/code_docs_search_tool.py | 7 +++++++ .../tools/csv_search_tool/csv_search_tool.py | 7 +++++++ .../tools/directory_search_tool/directory_search_tool.py | 7 +++++++ .../tools/docx_search_tool/docx_search_tool.py | 7 +++++++ .../tools/github_search_tool/github_search_tool.py | 9 ++++++++- .../tools/json_search_tool/json_search_tool.py | 7 +++++++ src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py | 7 +++++++ src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py | 7 +++++++ .../tools/txt_search_tool/txt_search_tool.py | 7 +++++++ .../tools/website_search/website_search_tool.py | 7 +++++++ .../tools/xml_search_tool/xml_search_tool.py | 7 +++++++ .../youtube_channel_search_tool.py | 7 +++++++ .../youtube_video_search_tool.py | 7 +++++++ 13 files changed, 92 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 195cc8a05..a5d6e5a21 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -50,3 +50,10 @@ class CodeDocsSearchTool(RagTool): ) -> Any: if "docs_url" in kwargs: self.add(kwargs["docs_url"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 6b8e79f88..fe9617f40 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -50,3 +50,10 @@ class CSVSearchTool(RagTool): ) -> Any: if "csv" in kwargs: self.add(kwargs["csv"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 7f20f5979..bb07d44ed 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -50,3 +50,10 @@ class DirectorySearchTool(RagTool): ) -> Any: if "directory" in kwargs: self.add(kwargs["directory"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 5c64f9824..d79efd82c 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -50,3 +50,10 @@ class DOCXSearchTool(RagTool): ) -> Any: if "docx" in kwargs: self.add(kwargs["docx"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 4a84b166c..eead8e49a 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -21,7 +21,7 @@ class GithubSearchToolSchema(FixedGithubSearchToolSchema): github_repo: str = Field(..., description="Mandatory github you want to search") content_types: List[str] = Field( ..., - description="Mandatory content types you want to be inlcuded search, options: [code, repo, pr, issue]", + description="Mandatory content types you want to be included search, options: [code, repo, pr, issue]", ) @@ -56,3 +56,10 @@ class GithubSearchTool(RagTool): ) -> Any: if "github_repo" in kwargs: self.add(kwargs["github_repo"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 308dca726..71bda18fb 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -50,3 +50,10 @@ class JSONSearchTool(RagTool): ) -> Any: if "json_path" in kwargs: self.add(kwargs["json_path"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index 33a58e142..a3a768e99 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -50,3 +50,10 @@ class MDXSearchTool(RagTool): ) -> Any: if "mdx" in kwargs: self.add(kwargs["mdx"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index f22cac123..226fb1ddd 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -35,3 +35,10 @@ class PGSearchTool(RagTool): kwargs["data_type"] = "postgres" kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) super().add(f"SELECT * FROM {table_name};", **kwargs) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 375ba960a..e8e653061 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -50,3 +50,10 @@ class TXTSearchTool(RagTool): ) -> Any: if "txt" in kwargs: self.add(kwargs["txt"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 5768a6ccd..9233e7766 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -50,3 +50,10 @@ class WebsiteSearchTool(RagTool): ) -> Any: if "website" in kwargs: self.add(kwargs["website"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 4b3e445ea..6caf09971 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -50,3 +50,10 @@ class XMLSearchTool(RagTool): ) -> Any: if "xml" in kwargs: self.add(kwargs["xml"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index d3e4698c9..1eb89fe56 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -53,3 +53,10 @@ class YoutubeChannelSearchTool(RagTool): ) -> Any: if "youtube_channel_handle" in kwargs: self.add(kwargs["youtube_channel_handle"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index f85457988..a2dd45661 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -50,3 +50,10 @@ class YoutubeVideoSearchTool(RagTool): ) -> Any: if "youtube_video_url" in kwargs: self.add(kwargs["youtube_video_url"]) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) From 776826ec992b567718862b8eaf772b8eb7693dd4 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Fri, 5 Apr 2024 18:04:45 -0300 Subject: [PATCH 033/391] Fix GithubSearchTool --- .../tools/github_search_tool/github_search_tool.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index eead8e49a..1d64bae34 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -36,18 +36,21 @@ class GithubSearchTool(RagTool): def __init__(self, github_repo: Optional[str] = None, **kwargs): super().__init__(**kwargs) if github_repo is not None: - self.add(github_repo) + self.add(repo=github_repo) self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." self.args_schema = FixedGithubSearchToolSchema def add( self, - *args: Any, + repo: str, + content_types: List[str] | None = None, **kwargs: Any, ) -> None: + content_types = content_types or self.content_types + kwargs["data_type"] = "github" kwargs["loader"] = GithubLoader(config={"token": self.gh_token}) - super().add(*args, **kwargs) + super().add(f"repo:{repo} type:{','.join(content_types)}", **kwargs) def _before_run( self, @@ -55,7 +58,9 @@ class GithubSearchTool(RagTool): **kwargs: Any, ) -> Any: if "github_repo" in kwargs: - self.add(kwargs["github_repo"]) + self.add( + repo=kwargs["github_repo"], content_types=kwargs.get("content_types") + ) def _run( self, From 9f41fb405732b79ff7897019069c4e1cd7b28589 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 7 Apr 2024 14:18:41 -0300 Subject: [PATCH 034/391] Adding timeout to scrapping website tool --- .../tools/scrape_website_tool/scrape_website_tool.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index cd8fd50d3..148a0b320 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -44,7 +44,12 @@ class ScrapeWebsiteTool(BaseTool): **kwargs: Any, ) -> Any: website_url = kwargs.get('website_url', self.website_url) - page = requests.get(website_url, headers=self.headers, cookies=self.cookies if self.cookies else {}) + page = requests.get( + website_url, + timeout=15, + headers=self.headers, + cookies=self.cookies if self.cookies else {} + ) parsed = BeautifulSoup(page.content, "html.parser") text = parsed.get_text() text = '\n'.join([i for i in text.split('\n') if i.strip() != '']) From 873112d696ab8acedc2c921493a6d8939fea7339 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 7 Apr 2024 18:12:16 -0300 Subject: [PATCH 035/391] fxing docs --- .../tools/github_search_tool/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md index 220e0aeb8..e6f47e082 100644 --- a/src/crewai_tools/tools/github_search_tool/README.md +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -1,24 +1,24 @@ -# GitHubSearchTool +# GithubSearchTool ## Description -The GitHubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub. +The GithubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub. ## Installation -To use the GitHubSearchTool, first ensure the crewai_tools package is installed in your Python environment: +To use the GithubSearchTool, first ensure the crewai_tools package is installed in your Python environment: ```shell pip install 'crewai[tools]' ``` -This command installs the necessary package to run the GitHubSearchTool along with any other tools included in the crewai_tools package. +This command installs the necessary package to run the GithubSearchTool along with any other tools included in the crewai_tools package. ## Example -Here’s how you can use the GitHubSearchTool to perform semantic searches within a GitHub repository: +Here’s how you can use the GithubSearchTool to perform semantic searches within a GitHub repository: ```python -from crewai_tools import GitHubSearchTool +from crewai_tools import GithubSearchTool # Initialize the tool for semantic searches within a specific GitHub repository -tool = GitHubSearchTool( +tool = GithubSearchTool( github_repo='https://github.com/example/repo', content_types=['code', 'issue'] # Options: code, repo, pr, issue ) @@ -26,7 +26,7 @@ tool = GitHubSearchTool( # OR # Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution -tool = GitHubSearchTool( +tool = GithubSearchTool( content_types=['code', 'issue'] # Options: code, repo, pr, issue ) ``` From be1a60554f206ebfe3e44770659819e2fcc0e18f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 7 Apr 2024 18:22:09 -0300 Subject: [PATCH 036/391] TYPO --- src/crewai_tools/tools/github_search_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md index e6f47e082..4550e4224 100644 --- a/src/crewai_tools/tools/github_search_tool/README.md +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -40,7 +40,7 @@ tool = GithubSearchTool( By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: ```python -tool = GitHubSearchTool( +tool = GithubSearchTool( config=dict( llm=dict( provider="ollama", # or google, openai, anthropic, llama2, ... From c5fd5196e21d49af9f42ad4f6d5460bb34071652 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Wed, 10 Apr 2024 11:48:23 -0300 Subject: [PATCH 037/391] Fix tool descriptions --- .../tools/code_docs_search_tool/code_docs_search_tool.py | 1 + src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | 1 + .../tools/directory_search_tool/directory_search_tool.py | 1 + src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 1 + src/crewai_tools/tools/github_search_tool/README.md | 3 +++ .../tools/github_search_tool/github_search_tool.py | 5 +++-- src/crewai_tools/tools/json_search_tool/json_search_tool.py | 1 + src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py | 1 + src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py | 1 + src/crewai_tools/tools/txt_search_tool/txt_search_tool.py | 1 + src/crewai_tools/tools/website_search/website_search_tool.py | 1 + src/crewai_tools/tools/xml_search_tool/xml_search_tool.py | 1 + .../youtube_channel_search_tool.py | 1 + .../youtube_video_search_tool/youtube_video_search_tool.py | 1 + 14 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index a5d6e5a21..a69ea7eb4 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -34,6 +34,7 @@ class CodeDocsSearchTool(RagTool): self.add(docs_url) self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." self.args_schema = FixedCodeDocsSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index fe9617f40..a04f227ca 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -34,6 +34,7 @@ class CSVSearchTool(RagTool): self.add(csv) self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." self.args_schema = FixedCSVSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index bb07d44ed..9a988a7fa 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -34,6 +34,7 @@ class DirectorySearchTool(RagTool): self.add(directory) self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." self.args_schema = FixedDirectorySearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index d79efd82c..e6f5b2d55 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -34,6 +34,7 @@ class DOCXSearchTool(RagTool): self.add(docx) self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." self.args_schema = FixedDOCXSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md index 4550e4224..55e01dd50 100644 --- a/src/crewai_tools/tools/github_search_tool/README.md +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -19,6 +19,7 @@ from crewai_tools import GithubSearchTool # Initialize the tool for semantic searches within a specific GitHub repository tool = GithubSearchTool( + gh_token='...', github_repo='https://github.com/example/repo', content_types=['code', 'issue'] # Options: code, repo, pr, issue ) @@ -27,11 +28,13 @@ tool = GithubSearchTool( # Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution tool = GithubSearchTool( + gh_token='...', content_types=['code', 'issue'] # Options: code, repo, pr, issue ) ``` ## Arguments +- `gh_token` : The GitHub token used to authenticate the search. This is a mandatory field and allows the tool to access the GitHub API for conducting searches. - `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search. - `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code, `repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues. This field is mandatory and allows tailoring the search to specific content types within the GitHub repository. diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 1d64bae34..5bfa65542 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -27,7 +27,7 @@ class GithubSearchToolSchema(FixedGithubSearchToolSchema): class GithubSearchTool(RagTool): name: str = "Search a github repo's content" - description: str = "A tool that can be used to semantic search a query from a github repo's content." + description: str = "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." summarize: bool = False gh_token: str args_schema: Type[BaseModel] = GithubSearchToolSchema @@ -37,8 +37,9 @@ class GithubSearchTool(RagTool): super().__init__(**kwargs) if github_repo is not None: self.add(repo=github_repo) - self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content." + self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." self.args_schema = FixedGithubSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 71bda18fb..102cd89ad 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -34,6 +34,7 @@ class JSONSearchTool(RagTool): self.add(json_path) self.description = f"A tool that can be used to semantic search a query the {json_path} JSON's content." self.args_schema = FixedJSONSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index a3a768e99..99bd37348 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -34,6 +34,7 @@ class MDXSearchTool(RagTool): self.add(mdx) self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." self.args_schema = FixedMDXSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index 47e425a45..af95ae0bf 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -33,6 +33,7 @@ class PDFSearchTool(RagTool): self.add(pdf) self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." self.args_schema = FixedPDFSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index e8e653061..921e633e8 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -34,6 +34,7 @@ class TXTSearchTool(RagTool): self.add(txt) self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." self.args_schema = FixedTXTSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 9233e7766..cfe163ae8 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -34,6 +34,7 @@ class WebsiteSearchTool(RagTool): self.add(website) self.description = f"A tool that can be used to semantic search a query from {website} website content." self.args_schema = FixedWebsiteSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 6caf09971..53fd73248 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -34,6 +34,7 @@ class XMLSearchTool(RagTool): self.add(xml) self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." self.args_schema = FixedXMLSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 1eb89fe56..8e9591be8 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -34,6 +34,7 @@ class YoutubeChannelSearchTool(RagTool): self.add(youtube_channel_handle) self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." self.args_schema = FixedYoutubeChannelSearchToolSchema + self._generate_description() def add( self, diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index a2dd45661..f1caa1b9c 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -34,6 +34,7 @@ class YoutubeVideoSearchTool(RagTool): self.add(youtube_video_url) self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." self.args_schema = FixedYoutubeVideoSearchToolSchema + self._generate_description() def add( self, From aab3acbaa66a7ffefac316fd78a4bfc3869a874c Mon Sep 17 00:00:00 2001 From: Christian24 Date: Mon, 15 Apr 2024 22:14:16 +0200 Subject: [PATCH 038/391] Fix wrong comments / descriptions for SerperDevTool --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 3fbf5ea61..7328be83c 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -7,12 +7,12 @@ from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool class SerperDevToolSchema(BaseModel): - """Input for TXTSearchTool.""" + """Input for SerperDevTool.""" search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") class SerperDevTool(BaseTool): name: str = "Search the internet" - description: str = "A tool that can be used to semantic search a query from a txt's content." + description: str = "A tool that can be used to search the internet." args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" n_results: int = None From b80dd1ca8b83f5aa9b9cb30e6b5af4c54f482376 Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Wed, 17 Apr 2024 13:36:21 +0000 Subject: [PATCH 039/391] added BrowserbaseLoadTool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 3 +- .../tools/browserbase_load_tool/README.md | 29 +++++++++++++++++++ .../browserbase_load_tool.py | 21 ++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 src/crewai_tools/tools/browserbase_load_tool/README.md create mode 100644 src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 642ad703c..34e15b876 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,5 +1,6 @@ from .tools.base_tool import BaseTool, Tool, tool from .tools import ( + BrowserbaseLoadTool, CodeDocsSearchTool, CSVSearchTool, DirectorySearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 99860a14f..efbe0588e 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,3 +1,4 @@ +from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .csv_search_tool.csv_search_tool import CSVSearchTool from .directory_search_tool.directory_search_tool import DirectorySearchTool @@ -18,4 +19,4 @@ from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool -from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool \ No newline at end of file +from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool diff --git a/src/crewai_tools/tools/browserbase_load_tool/README.md b/src/crewai_tools/tools/browserbase_load_tool/README.md new file mode 100644 index 000000000..0007feb91 --- /dev/null +++ b/src/crewai_tools/tools/browserbase_load_tool/README.md @@ -0,0 +1,29 @@ +# BrowserbaseLoadTool + +## Description + +[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving. + +## Installation + +- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_KEY`). +- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk) along with `crewai[tools]` package: + +``` +pip install browserbase 'crewai[tools]' +``` + +## Example + +Utilize the BrowserbaseLoadTool as follows to allow your agent to load websites: + +```python +from crewai_tools import BrowserbaseLoadTool + +tool = BrowserbaseLoadTool() +``` + +## Arguments + +- `api_key`: Optional. Specifies Browserbase API key. Defaults is the `BROWSERBASE_KEY` environment variable. +- `text_content`: Optional. Load pages as readable text. Default is `False`. diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py new file mode 100644 index 000000000..03ee53f99 --- /dev/null +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -0,0 +1,21 @@ +import os +from crewai_tools import BaseTool +from typing import Union + +class BrowserbaseLoadTool(BaseTool): + name: str = "Browserbase web load tool" + description: str = "Load webpages in a headless browser using Browserbase and return the contents" + + def __init__(self, api_key: str = os.environ["BROWSERBASE_KEY"], text_content: bool = False): + try: + from browserbase import Browserbase + except ImportError: + raise ImportError( + "`browserbase` package not found, please run `pip install browserbase`" + ) + + self.browserbase = Browserbase(api_key=api_key) + self.text_content = text_content + + def _run(self, url: str): + return self.browserbase.load_url(url, text_content=self.text_content) From e0d799c075972414d328ee4d9e362e286c90f0ef Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Fri, 19 Apr 2024 11:40:03 +0000 Subject: [PATCH 040/391] updated browserbase integration --- src/crewai_tools/tools/browserbase_load_tool/README.md | 4 ++-- .../tools/browserbase_load_tool/browserbase_load_tool.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/README.md b/src/crewai_tools/tools/browserbase_load_tool/README.md index 0007feb91..a2866f9a8 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/README.md +++ b/src/crewai_tools/tools/browserbase_load_tool/README.md @@ -6,7 +6,7 @@ ## Installation -- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_KEY`). +- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`). - Install the [Browserbase SDK](http://github.com/browserbase/python-sdk) along with `crewai[tools]` package: ``` @@ -25,5 +25,5 @@ tool = BrowserbaseLoadTool() ## Arguments -- `api_key`: Optional. Specifies Browserbase API key. Defaults is the `BROWSERBASE_KEY` environment variable. +- `api_key`: Optional. Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. - `text_content`: Optional. Load pages as readable text. Default is `False`. diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 03ee53f99..126219bd3 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,12 +1,12 @@ import os from crewai_tools import BaseTool -from typing import Union +from typing import Union, Optional class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" description: str = "Load webpages in a headless browser using Browserbase and return the contents" - def __init__(self, api_key: str = os.environ["BROWSERBASE_KEY"], text_content: bool = False): + def __init__(self, api_key: Optional[str] = None, text_content: bool = False): try: from browserbase import Browserbase except ImportError: From 39aba4cb48b0fdbfc2dff63ee01ec0d6c6c22078 Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:14:56 +0000 Subject: [PATCH 041/391] updated browserbase load tool --- .../tools/browserbase_load_tool/browserbase_load_tool.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 126219bd3..7bf066287 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,12 +1,15 @@ -import os from crewai_tools import BaseTool -from typing import Union, Optional +from typing import Optional, Any class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" description: str = "Load webpages in a headless browser using Browserbase and return the contents" + api_key: Optional[str] = None + text_content: Optional[bool] = False + browserbase: Optional[Any] = None - def __init__(self, api_key: Optional[str] = None, text_content: bool = False): + def __init__(self, api_key: Optional[str] = None, text_content: Optional[bool] = False, **kwargs): + super().__init__(**kwargs) try: from browserbase import Browserbase except ImportError: From f78011e68ca911010b81a4097b153d57eca3162b Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:53:10 +0200 Subject: [PATCH 042/391] added args_schema to browserbase tool --- .../tools/browserbase_load_tool/browserbase_load_tool.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 7bf066287..d29656188 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,9 +1,14 @@ from crewai_tools import BaseTool from typing import Optional, Any +from pydantic.v1 import BaseModel, Field + +class BrowserbaseLoadToolSchema(BaseModel): + url: str = Field(description="Website URL") class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" description: str = "Load webpages in a headless browser using Browserbase and return the contents" + args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema api_key: Optional[str] = None text_content: Optional[bool] = False browserbase: Optional[Any] = None From d4fc993f1e74811550ea64622312a1bb98f7d129 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 02:25:55 -0300 Subject: [PATCH 043/391] Update serper_dev_tool.py --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index a3fe871f1..d8471bef7 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -12,7 +12,7 @@ class SerperDevToolSchema(BaseModel): class SerperDevTool(BaseTool): name: str = "Search the internet" - description: str = "A tool that can be used to semantic search a query from a txt's content." + description: str = "A tool that can be used to search the internet with a search_query." args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" n_results: int = 10 From 5fb9ddfa2a604b0a55c3281b10d30eef30608d40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 02:37:47 -0300 Subject: [PATCH 044/391] updating serperdev to allow for query and search_query --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index e08c1e983..927c0e3b3 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -19,10 +19,13 @@ class SerperDevTool(BaseTool): def _run( self, - search_query: str, **kwargs: Any, ) -> Any: - payload = json.dumps({"q": search_query, "num": self.n_results}) + search_query = kwargs.get('search_query') + if search_query is None: + search_query = kwargs.get('query') + + payload = json.dumps({"q": search_query}) headers = { 'X-API-KEY': os.environ['SERPER_API_KEY'], 'content-type': 'application/json' From ffd5942b31b253d58f987d105192e42b419123cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 02:48:21 -0300 Subject: [PATCH 045/391] pushing initial exa search tool --- .../tools/exa_tools/exa_base_tool.py | 37 +++++++++++++++++++ .../tools/exa_tools/exa_url_search_tool.py | 24 ++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 src/crewai_tools/tools/exa_tools/exa_base_tool.py create mode 100644 src/crewai_tools/tools/exa_tools/exa_url_search_tool.py diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py new file mode 100644 index 000000000..6235ab57b --- /dev/null +++ b/src/crewai_tools/tools/exa_tools/exa_base_tool.py @@ -0,0 +1,37 @@ +import os +from typing import Type +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class EXABaseToolToolSchema(BaseModel): + """Input for EXABaseTool.""" + search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") + +class EXABaseTool(BaseTool): + name: str = "Search the internet" + description: str = "A tool that can be used to search the internet from a search_query" + args_schema: Type[BaseModel] = EXABaseToolToolSchema + search_url: str = "https://api.exa.ai/search" + n_results: int = None + headers: dict = { + "accept": "application/json", + "content-type": "application/json", + "x-api-key": os.environ['EXA_API_KEY'], + } + + def _parse_results(self, results): + stirng = [] + for result in results: + try: + stirng.append('\n'.join([ + f"Title: {result['title']}", + f"Score: {result['score']}", + f"Url: {result['url']}", + f"ID: {result['id']}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(stirng) + return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/exa_tools/exa_url_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_url_search_tool.py new file mode 100644 index 000000000..6e5f389b7 --- /dev/null +++ b/src/crewai_tools/tools/exa_tools/exa_url_search_tool.py @@ -0,0 +1,24 @@ +import requests +from typing import Any + +from .exa_base_tool import EXABaseTool + +class EXAURLTool(EXABaseTool): + def _run( + self, + **kwargs: Any, + ) -> Any: + search_query = kwargs.get('search_query') + if search_query is None: + search_query = kwargs.get('query') + + payload = { + "query": search_query, + } + + response = requests.post(self.search_url, json=payload, headers=self.headers) + results = response.json() + if 'results' in results: + results = super()._parse_results(results['results']) + else: + return results From 768bb74a2c17e46863556e1dae47091e2a77155f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 02:50:54 -0300 Subject: [PATCH 046/391] renaming exa tool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + src/crewai_tools/tools/exa_tools/README.md | 30 +++++++++++++++++++ ..._url_search_tool.py => exa_search_tool.py} | 2 +- 4 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 src/crewai_tools/tools/exa_tools/README.md rename src/crewai_tools/tools/exa_tools/{exa_url_search_tool.py => exa_search_tool.py} (93%) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 34e15b876..faac5d37d 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -6,6 +6,7 @@ from .tools import ( DirectorySearchTool, DOCXSearchTool, DirectoryReadTool, + EXASearchTool, FileReadTool, GithubSearchTool, SerperDevTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index efbe0588e..648671d97 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -4,6 +4,7 @@ from .csv_search_tool.csv_search_tool import CSVSearchTool from .directory_search_tool.directory_search_tool import DirectorySearchTool from .directory_read_tool.directory_read_tool import DirectoryReadTool from .docx_search_tool.docx_search_tool import DOCXSearchTool +from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool from .serper_dev_tool.serper_dev_tool import SerperDevTool diff --git a/src/crewai_tools/tools/exa_tools/README.md b/src/crewai_tools/tools/exa_tools/README.md new file mode 100644 index 000000000..8d556dab3 --- /dev/null +++ b/src/crewai_tools/tools/exa_tools/README.md @@ -0,0 +1,30 @@ +# EXASearchTool Documentation + +## Description +This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the `https://exa.ai/` API to fetch and display the most relevant search results based on the query provided by the user. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: +```shell +pip install 'crewai[tools]' +``` + +## Example +The following example demonstrates how to initialize the tool and execute a search with a given query: + +```python +from crewai_tools import EXASearchTool + +# Initialize the tool for internet searching capabilities +tool = EXASearchTool() +``` + +## Steps to Get Started +To effectively use the `EXASearchTool`, follow these steps: + +1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **API Key Acquisition**: Acquire a `https://exa.ai/` API key by registering for a free account at `https://exa.ai/`. +3. **Environment Configuration**: Store your obtained API key in an environment variable named `EXA_API_KEY` to facilitate its use by the tool. + +## Conclusion +By integrating the `EXASearchTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. diff --git a/src/crewai_tools/tools/exa_tools/exa_url_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py similarity index 93% rename from src/crewai_tools/tools/exa_tools/exa_url_search_tool.py rename to src/crewai_tools/tools/exa_tools/exa_search_tool.py index 6e5f389b7..fe67c4fa2 100644 --- a/src/crewai_tools/tools/exa_tools/exa_url_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -3,7 +3,7 @@ from typing import Any from .exa_base_tool import EXABaseTool -class EXAURLTool(EXABaseTool): +class EXASearchTool(EXABaseTool): def _run( self, **kwargs: Any, From cf96d5579ff78cc07605ec357ad7e4098a4533e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 02:54:18 -0300 Subject: [PATCH 047/391] Update README.md --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 0287c6c3b..6af85df02 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,6 @@ Tools are always expect to return strings, as they are meant to be used by the a There are three ways to create tools for crewAI agents: - [Subclassing `BaseTool`](#subclassing-basetool) -- [Creating a tool from a function or lambda](#functional-tool-creation) - [Using the `tool` decorator](#utilizing-the-tool-decorator) ### Subclassing `BaseTool` @@ -122,4 +121,4 @@ poetry build pip install dist/*.tar.gz ``` -Thank you for your interest in enhancing the capabilities of AI agents through advanced tooling. Your contributions make a significant impact. \ No newline at end of file +Thank you for your interest in enhancing the capabilities of AI agents through advanced tooling. Your contributions make a significant impact. From a95cbfdc6ad76fc3ecc5f64e0d0ed3377d10ef30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 03:04:16 -0300 Subject: [PATCH 048/391] TYPO --- .../tools/selenium_scraping_tool/selenium_scraping_tool.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 6af3e18cb..d0c420fc9 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -55,7 +55,6 @@ class SeleniumScrapingTool(BaseTool): body_text = driver.find_element(By.TAG_NAME, "body").text content.append(body_text) else: - driver.find_elements(By.CSS_SELECTOR, css_element) for element in driver.find_elements(By.CSS_SELECTOR, css_element): content.append(element.text) driver.close() From cf6231384c447780d07446fff6f1c83d7338c91c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 03:56:14 -0300 Subject: [PATCH 049/391] quick fix --- .../tools/browserbase_load_tool/browserbase_load_tool.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index d29656188..48f3dacef 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,13 +1,13 @@ -from crewai_tools import BaseTool -from typing import Optional, Any +from typing import Optional, Any, Type from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool class BrowserbaseLoadToolSchema(BaseModel): url: str = Field(description="Website URL") class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" - description: str = "Load webpages in a headless browser using Browserbase and return the contents" + description: str = "Load webpages url in a headless browser using Browserbase and return the contents" args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema api_key: Optional[str] = None text_content: Optional[bool] = False @@ -16,7 +16,7 @@ class BrowserbaseLoadTool(BaseTool): def __init__(self, api_key: Optional[str] = None, text_content: Optional[bool] = False, **kwargs): super().__init__(**kwargs) try: - from browserbase import Browserbase + from browserbase import Browserbase # type: ignore except ImportError: raise ImportError( "`browserbase` package not found, please run `pip install browserbase`" From 59d9d9eb1f43c5b93828c7c3049d63dee7958540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 04:12:45 -0300 Subject: [PATCH 050/391] moving EXA env dependency to tool execution --- src/crewai_tools/tools/exa_tools/exa_base_tool.py | 1 - src/crewai_tools/tools/exa_tools/exa_search_tool.py | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py index 6235ab57b..237af8f84 100644 --- a/src/crewai_tools/tools/exa_tools/exa_base_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_base_tool.py @@ -16,7 +16,6 @@ class EXABaseTool(BaseTool): headers: dict = { "accept": "application/json", "content-type": "application/json", - "x-api-key": os.environ['EXA_API_KEY'], } def _parse_results(self, results): diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index fe67c4fa2..d26357e8c 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,3 +1,4 @@ +import os import requests from typing import Any @@ -16,7 +17,10 @@ class EXASearchTool(EXABaseTool): "query": search_query, } - response = requests.post(self.search_url, json=payload, headers=self.headers) + headers = self.headers.copy() + headers["x-api-key"] = os.environ['EXA_API_KEY'] + + response = requests.post(self.search_url, json=payload, headers=headers) results = response.json() if 'results' in results: results = super()._parse_results(results['results']) From 037e80c6a30878712744356d5bf42e7c2499c8ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 May 2024 23:15:32 -0300 Subject: [PATCH 051/391] cutting new verison with exa search tool --- .../tools/exa_tools/exa_search_tool.py | 35 +++++++++---------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index d26357e8c..541c673b9 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -5,24 +5,23 @@ from typing import Any from .exa_base_tool import EXABaseTool class EXASearchTool(EXABaseTool): - def _run( - self, - **kwargs: Any, - ) -> Any: - search_query = kwargs.get('search_query') - if search_query is None: - search_query = kwargs.get('query') + def _run( + self, + **kwargs: Any, + ) -> Any: + search_query = kwargs.get('search_query') + if search_query is None: + search_query = kwargs.get('query') - payload = { - "query": search_query, - } + payload = { + "query": search_query, + } - headers = self.headers.copy() - headers["x-api-key"] = os.environ['EXA_API_KEY'] + headers = self.headers.copy() + headers["x-api-key"] = os.environ['EXA_API_KEY'] - response = requests.post(self.search_url, json=payload, headers=headers) - results = response.json() - if 'results' in results: - results = super()._parse_results(results['results']) - else: - return results + response = requests.post(self.search_url, json=payload, headers=headers) + results = response.json() + if 'results' in results: + results = super()._parse_results(results['results']) + return results From 7c6b1249fb285f41fb64d3918d2ca6f7259ad98c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 6 May 2024 14:02:09 -0300 Subject: [PATCH 052/391] cutring new version with improved docs search --- .../tools/docx_search_tool/docx_search_tool.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index e6f5b2d55..ed2d4daad 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -8,18 +8,18 @@ from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" - + docx: str = Optional[Field](..., description="Mandatory docx path you want to search") search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", ) - class DOCXSearchToolSchema(FixedDOCXSearchToolSchema): """Input for DOCXSearchTool.""" - - docx: str = Field(..., description="Mandatory docx path you want to search") - + search_query: str = Field( + ..., + description="Mandatory search query you want to use to search the DOCX's content", + ) class DOCXSearchTool(RagTool): name: str = "Search a DOCX's content" @@ -54,7 +54,13 @@ class DOCXSearchTool(RagTool): def _run( self, - search_query: str, **kwargs: Any, ) -> Any: + search_query = kwargs.get('search_query') + if search_query is None: + search_query = kwargs.get('query') + + docx = kwargs.get("docx") + if docx is not None: + self.add(docx) return super()._run(query=search_query) From e0840e4826d4fc9ccd9c05b5d0a915419ece6626 Mon Sep 17 00:00:00 2001 From: "Slava Kurilyak (slavakurilyak.eth)" Date: Mon, 6 May 2024 15:19:37 -0300 Subject: [PATCH 053/391] Fix TypeError in FixedDOCXSearchToolSchema by correcting Optional usage with Field --- src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index ed2d4daad..96bb4721b 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -8,7 +8,7 @@ from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" - docx: str = Optional[Field](..., description="Mandatory docx path you want to search") + docx: Optional[str] = Field(..., description="Mandatory docx path you want to search") search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", From b14f2d4bf1dcc524f3a788b13d31302d1c4406e4 Mon Sep 17 00:00:00 2001 From: "Slava Kurilyak (slavakurilyak.eth)" Date: Mon, 6 May 2024 15:21:07 -0300 Subject: [PATCH 054/391] Revert "Fix TypeError in FixedDOCXSearchToolSchema by correcting Optional usage with Field" This reverts commit e0840e4826d4fc9ccd9c05b5d0a915419ece6626. --- src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 96bb4721b..ed2d4daad 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -8,7 +8,7 @@ from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" - docx: Optional[str] = Field(..., description="Mandatory docx path you want to search") + docx: str = Optional[Field](..., description="Mandatory docx path you want to search") search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", From cecfde66084a7a2773ef6468886bca4688902751 Mon Sep 17 00:00:00 2001 From: "Slava Kurilyak (slavakurilyak.eth)" Date: Mon, 6 May 2024 15:19:37 -0300 Subject: [PATCH 055/391] Fix TypeError in FixedDOCXSearchToolSchema by correcting Optional usage with Field --- src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index ed2d4daad..96bb4721b 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -8,7 +8,7 @@ from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" - docx: str = Optional[Field](..., description="Mandatory docx path you want to search") + docx: Optional[str] = Field(..., description="Mandatory docx path you want to search") search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", From 53c7d815ae1548f09f8549ba2fb271fd28a1496c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 13 May 2024 21:30:34 -0300 Subject: [PATCH 056/391] preapring new verison with new version of Exa tool --- src/crewai_tools/tools/exa_tools/exa_search_tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 541c673b9..30f77d1ee 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -15,6 +15,7 @@ class EXASearchTool(EXABaseTool): payload = { "query": search_query, + "type": "magic", } headers = self.headers.copy() From a51a7000c5e84c9612f33e5d7fc378b1efdc033a Mon Sep 17 00:00:00 2001 From: rafaelsideguide <150964962+rafaelsideguide@users.noreply.github.com> Date: Thu, 16 May 2024 11:20:36 -0300 Subject: [PATCH 057/391] added Firecrawl tools --- .../firecrawl_crawl_website_tool/README.md | 42 +++++++++++++++++++ .../firecrawl_crawl_website_tool.py | 33 +++++++++++++++ .../firecrawl_scrape_website_tool/README.md | 38 +++++++++++++++++ .../firecrawl_scrape_website_tool.py | 35 ++++++++++++++++ .../tools/firecrawl_search_tool/README.md | 35 ++++++++++++++++ .../firecrawl_search_tool.py | 33 +++++++++++++++ 6 files changed, 216 insertions(+) create mode 100644 src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md create mode 100644 src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py create mode 100644 src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md create mode 100644 src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py create mode 100644 src/crewai_tools/tools/firecrawl_search_tool/README.md create mode 100644 src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md new file mode 100644 index 000000000..46d011602 --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md @@ -0,0 +1,42 @@ +# FirecrawlCrawlWebsiteTool + +## Description + +[Firecrawl](https://firecrawl.dev) is a platform for crawling and convert any website into clean markdown or structured data. + +## Installation + +- Get an API key from [firecrawl.dev](https://firecrawl.dev) and set it in environment variables (`FIRECRAWL_API_KEY`). +- Install the [Firecrawl SDK](https://github.com/mendableai/firecrawl) along with `crewai[tools]` package: + +``` +pip install firecrawl-py 'crewai[tools]' +``` + +## Example + +Utilize the FirecrawlScrapeFromWebsiteTool as follows to allow your agent to load websites: + +```python +from crewai_tools import FirecrawlCrawlWebsiteTool + +tool = FirecrawlCrawlWebsiteTool(url='firecrawl.dev') +``` + +## Arguments + +- `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. +- `url`: The base URL to start crawling from. +- `page_options`: Optional. + - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. + - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. +- `crawler_options`: Optional. Options for controlling the crawling behavior. + - `includes`: Optional. URL patterns to include in the crawl. + - `exclude`: Optional. URL patterns to exclude from the crawl. + - `generateImgAltText`: Optional. Generate alt text for images using LLMs (requires a paid plan). + - `returnOnlyUrls`: Optional. If true, returns only the URLs as a list in the crawl status. Note: the response will be a list of URLs inside the data, not a list of documents. + - `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children, and so on. + - `mode`: Optional. The crawling mode to use. Fast mode crawls 4x faster on websites without a sitemap but may not be as accurate and shouldn't be used on heavily JavaScript-rendered websites. + - `limit`: Optional. Maximum number of pages to crawl. + - `timeout`: Optional. Timeout in milliseconds for the crawling operation. + diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py new file mode 100644 index 000000000..5c796189a --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -0,0 +1,33 @@ +from typing import Optional, Any, Type, Dict, List +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class FirecrawlCrawlWebsiteToolSchema(BaseModel): + url: str = Field(description="Website URL") + crawler_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for crawling") + page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for page") + +class FirecrawlCrawlWebsiteTool(BaseTool): + name: str = "Firecrawl web crawl tool" + description: str = "Crawl webpages using Firecrawl and return the contents" + args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema + api_key: Optional[str] = None + firecrawl: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from firecrawl import FirecrawlApp # type: ignore + except ImportError: + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) + + self.firecrawl = FirecrawlApp(api_key=api_key) + + def _run(self, url: str, crawler_options: Optional[Dict[str, Any]] = None, page_options: Optional[Dict[str, Any]] = None): + options = { + "crawlerOptions": crawler_options, + "pageOptions": page_options + } + return self.firecrawl.crawl_url(url, options) \ No newline at end of file diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md b/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md new file mode 100644 index 000000000..93570f06b --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md @@ -0,0 +1,38 @@ +# FirecrawlScrapeWebsiteTool + +## Description + +[Firecrawl](https://firecrawl.dev) is a platform for crawling and convert any website into clean markdown or structured data. + +## Installation + +- Get an API key from [firecrawl.dev](https://firecrawl.dev) and set it in environment variables (`FIRECRAWL_API_KEY`). +- Install the [Firecrawl SDK](https://github.com/mendableai/firecrawl) along with `crewai[tools]` package: + +``` +pip install firecrawl-py 'crewai[tools]' +``` + +## Example + +Utilize the FirecrawlScrapeWebsiteTool as follows to allow your agent to load websites: + +```python +from crewai_tools import FirecrawlScrapeWebsiteTool + +tool = FirecrawlScrapeWebsiteTool(url='firecrawl.dev') +``` + +## Arguments + +- `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. +- `url`: The URL to scrape. +- `page_options`: Optional. + - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. + - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. +- `extractor_options`: Optional. Options for LLM-based extraction of structured information from the page content + - `mode`: The extraction mode to use, currently supports 'llm-extraction' + - `extractionPrompt`: Optional. A prompt describing what information to extract from the page + - `extractionSchema`: Optional. The schema for the data to be extracted +- `timeout`: Optional. Timeout in milliseconds for the request + diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py new file mode 100644 index 000000000..8540b13ff --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -0,0 +1,35 @@ +from typing import Optional, Any, Type, Dict +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class FirecrawlScrapeWebsiteToolSchema(BaseModel): + url: str = Field(description="Website URL") + page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for page scraping") + extractor_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for data extraction") + timeout: Optional[int] = Field(default=None, description="Timeout for the scraping operation") + +class FirecrawlScrapeWebsiteTool(BaseTool): + name: str = "Firecrawl web scrape tool" + description: str = "Scrape webpages url using Firecrawl and return the contents" + args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema + api_key: Optional[str] = None + firecrawl: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from firecrawl import FirecrawlApp # type: ignore + except ImportError: + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) + + self.firecrawl = FirecrawlApp(api_key=api_key) + + def _run(self, url: str, page_options: Optional[Dict[str, Any]] = None, extractor_options: Optional[Dict[str, Any]] = None, timeout: Optional[int] = None): + options = { + "pageOptions": page_options, + "extractorOptions": extractor_options, + "timeout": timeout + } + return self.firecrawl.scrape_url(url, options) \ No newline at end of file diff --git a/src/crewai_tools/tools/firecrawl_search_tool/README.md b/src/crewai_tools/tools/firecrawl_search_tool/README.md new file mode 100644 index 000000000..effb3f3d4 --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_search_tool/README.md @@ -0,0 +1,35 @@ +# FirecrawlSearchTool + +## Description + +[Firecrawl](https://firecrawl.dev) is a platform for crawling and convert any website into clean markdown or structured data. + +## Installation + +- Get an API key from [firecrawl.dev](https://firecrawl.dev) and set it in environment variables (`FIRECRAWL_API_KEY`). +- Install the [Firecrawl SDK](https://github.com/mendableai/firecrawl) along with `crewai[tools]` package: + +``` +pip install firecrawl-py 'crewai[tools]' +``` + +## Example + +Utilize the FirecrawlSearchTool as follows to allow your agent to load websites: + +```python +from crewai_tools import FirecrawlSearchTool + +tool = FirecrawlSearchTool(query='what is firecrawl?') +``` + +## Arguments + +- `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. +- `query`: The search query string to be used for searching. +- `page_options`: Optional. Options for result formatting. + - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. + - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. + - `fetchPageContent`: Optional. Fetch the full content of the page. +- `search_options`: Optional. Options for controlling the crawling behavior. + - `limit`: Optional. Maximum number of pages to crawl. \ No newline at end of file diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py new file mode 100644 index 000000000..89843f797 --- /dev/null +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -0,0 +1,33 @@ +from typing import Optional, Any, Type, Dict, List +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class FirecrawlSearchToolSchema(BaseModel): + query: str = Field(description="Search query") + page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for result formatting") + search_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for searching") + +class FirecrawlSearchTool(BaseTool): + name: str = "Firecrawl web search tool" + description: str = "Search webpages using Firecrawl and return the results" + args_schema: Type[BaseModel] = FirecrawlSearchToolSchema + api_key: Optional[str] = None + firecrawl: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from firecrawl import FirecrawlApp # type: ignore + except ImportError: + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) + + self.firecrawl = FirecrawlApp(api_key=api_key) + + def _run(self, query: str, page_options: Optional[Dict[str, Any]] = None, result_options: Optional[Dict[str, Any]] = None): + options = { + "pageOptions": page_options, + "resultOptions": result_options + } + return self.firecrawl.search(query, options) From e36af697cd3d8e8f874d6db601aa4352adfc94b7 Mon Sep 17 00:00:00 2001 From: Carlos Antunes Date: Sat, 18 May 2024 16:56:06 -0300 Subject: [PATCH 058/391] adding MySQLSearchTool README --- .../tools/mysql_seach_tool/README.md | 56 +++++++++++++++++++ .../mysql_seach_tool/mysql_search_tool.py | 44 +++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 src/crewai_tools/tools/mysql_seach_tool/README.md create mode 100644 src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py diff --git a/src/crewai_tools/tools/mysql_seach_tool/README.md b/src/crewai_tools/tools/mysql_seach_tool/README.md new file mode 100644 index 000000000..b31d7120b --- /dev/null +++ b/src/crewai_tools/tools/mysql_seach_tool/README.md @@ -0,0 +1,56 @@ +# MySQLSearchTool + +## Description +This tool is designed to facilitate semantic searches within MySQL database tables. Leveraging the RAG (Retrieve and Generate) technology, the MySQLSearchTool provides users with an efficient means of querying database table content, specifically tailored for MySQL databases. It simplifies the process of finding relevant data through semantic search queries, making it an invaluable resource for users needing to perform advanced queries on extensive datasets within a MySQL database. + +## Installation +To install the `crewai_tools` package and utilize the MySQLSearchTool, execute the following command in your terminal: + +```shell +pip install 'crewai[tools]' +``` + +## Example +Below is an example showcasing how to use the MySQLSearchTool to conduct a semantic search on a table within a MySQL database: + +```python +from crewai_tools import MySQLSearchTool + +# Initialize the tool with the database URI and the target table name +tool = MySQLSearchTool(db_uri='mysql://user:password@localhost:3306/mydatabase', table_name='employees') + +``` + +## Arguments +The MySQLSearchTool requires the following arguments for its operation: + +- `db_uri`: A string representing the URI of the MySQL database to be queried. This argument is mandatory and must include the necessary authentication details and the location of the database. +- `table_name`: A string specifying the name of the table within the database on which the semantic search will be performed. This argument is mandatory. + +## Custom model and embeddings + +By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: + +```python +tool = MySQLSearchTool( + config=dict( + llm=dict( + provider="ollama", # or google, openai, anthropic, llama2, ... + config=dict( + model="llama2", + # temperature=0.5, + # top_p=1, + # stream=true, + ), + ), + embedder=dict( + provider="google", + config=dict( + model="models/embedding-001", + task_type="retrieval_document", + # title="Embeddings", + ), + ), + ) +) +``` diff --git a/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py new file mode 100644 index 000000000..226fb1ddd --- /dev/null +++ b/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py @@ -0,0 +1,44 @@ +from typing import Any, Type + +from embedchain.loaders.postgres import PostgresLoader +from pydantic.v1 import BaseModel, Field + +from ..rag.rag_tool import RagTool + + +class PGSearchToolSchema(BaseModel): + """Input for PGSearchTool.""" + + search_query: str = Field( + ..., + description="Mandatory semantic search query you want to use to search the database's content", + ) + + +class PGSearchTool(RagTool): + name: str = "Search a database's table content" + description: str = "A tool that can be used to semantic search a query from a database table's content." + args_schema: Type[BaseModel] = PGSearchToolSchema + db_uri: str = Field(..., description="Mandatory database URI") + + def __init__(self, table_name: str, **kwargs): + super().__init__(**kwargs) + self.add(table_name) + self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." + self._generate_description() + + def add( + self, + table_name: str, + **kwargs: Any, + ) -> None: + kwargs["data_type"] = "postgres" + kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) + super().add(f"SELECT * FROM {table_name};", **kwargs) + + def _run( + self, + search_query: str, + **kwargs: Any, + ) -> Any: + return super()._run(query=search_query) From a11cc57345e51eac1e7658a84bb250518eb0b36a Mon Sep 17 00:00:00 2001 From: Carlos Antunes Date: Sat, 18 May 2024 16:58:40 -0300 Subject: [PATCH 059/391] adding MySQLSearcherTool --- .../tools/mysql_seach_tool/mysql_search_tool.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py index 226fb1ddd..372a02f38 100644 --- a/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py @@ -1,13 +1,13 @@ from typing import Any, Type -from embedchain.loaders.postgres import PostgresLoader +from embedchain.loaders.mysql import MySQLLoader from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool -class PGSearchToolSchema(BaseModel): - """Input for PGSearchTool.""" +class MySQLSearchToolSchema(BaseModel): + """Input for MySQLSearchTool.""" search_query: str = Field( ..., @@ -15,10 +15,10 @@ class PGSearchToolSchema(BaseModel): ) -class PGSearchTool(RagTool): +class MySQLSearchTool(RagTool): name: str = "Search a database's table content" description: str = "A tool that can be used to semantic search a query from a database table's content." - args_schema: Type[BaseModel] = PGSearchToolSchema + args_schema: Type[BaseModel] = MySQLSearchToolSchema db_uri: str = Field(..., description="Mandatory database URI") def __init__(self, table_name: str, **kwargs): @@ -32,8 +32,8 @@ class PGSearchTool(RagTool): table_name: str, **kwargs: Any, ) -> None: - kwargs["data_type"] = "postgres" - kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) + kwargs["data_type"] = "mysql" + kwargs["loader"] = MySQLLoader(config=dict(url=self.db_uri)) super().add(f"SELECT * FROM {table_name};", **kwargs) def _run( From 5c2d8c4cfa86debc8f769cc9a8838a21ae1a4f72 Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Mon, 20 May 2024 08:58:25 +0000 Subject: [PATCH 060/391] updated browserbase tool --- .../browserbase_load_tool.py | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 48f3dacef..52722520d 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -10,20 +10,35 @@ class BrowserbaseLoadTool(BaseTool): description: str = "Load webpages url in a headless browser using Browserbase and return the contents" args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema api_key: Optional[str] = None + project_id: Optional[str] = None text_content: Optional[bool] = False + session_id: Optional[str] = None + proxy: Optional[bool] = None browserbase: Optional[Any] = None - def __init__(self, api_key: Optional[str] = None, text_content: Optional[bool] = False, **kwargs): + def __init__( + self, + api_key: Optional[str] = None, + project_id: Optional[str] = None, + text_content: Optional[bool] = False, + session_id: Optional[str] = None, + proxy: Optional[bool] = None, + **kwargs, + ): super().__init__(**kwargs) try: - from browserbase import Browserbase # type: ignore + from browserbase import Browserbase # type: ignore except ImportError: - raise ImportError( - "`browserbase` package not found, please run `pip install browserbase`" - ) + raise ImportError( + "`browserbase` package not found, please run `pip install browserbase`" + ) - self.browserbase = Browserbase(api_key=api_key) + self.browserbase = Browserbase(api_key, project_id) self.text_content = text_content + self.session_id = session_id + self.proxy = proxy def _run(self, url: str): - return self.browserbase.load_url(url, text_content=self.text_content) + return self.browserbase.load_url( + url, self.text_content, self.session_id, self.proxy + ) From bedbac2aafb80b3369abd88bd45dcdbdb67a59b0 Mon Sep 17 00:00:00 2001 From: Mish Ushakov <10400064+mishushakov@users.noreply.github.com> Date: Mon, 20 May 2024 09:02:55 +0000 Subject: [PATCH 061/391] updated browserbase tool readme --- .../tools/browserbase_load_tool/README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/README.md b/src/crewai_tools/tools/browserbase_load_tool/README.md index a2866f9a8..bd562da0d 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/README.md +++ b/src/crewai_tools/tools/browserbase_load_tool/README.md @@ -2,11 +2,17 @@ ## Description -[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving. +[Browserbase](https://browserbase.com) is a developer platform to reliably run, manage, and monitor headless browsers. + + Power your AI data retrievals with: + - [Serverless Infrastructure](https://docs.browserbase.com/under-the-hood) providing reliable browsers to extract data from complex UIs + - [Stealth Mode](https://docs.browserbase.com/features/stealth-mode) with included fingerprinting tactics and automatic captcha solving + - [Session Debugger](https://docs.browserbase.com/features/sessions) to inspect your Browser Session with networks timeline and logs + - [Live Debug](https://docs.browserbase.com/guides/session-debug-connection/browser-remote-control) to quickly debug your automation ## Installation -- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`). +- Get an API key and Project ID from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID`). - Install the [Browserbase SDK](http://github.com/browserbase/python-sdk) along with `crewai[tools]` package: ``` @@ -25,5 +31,8 @@ tool = BrowserbaseLoadTool() ## Arguments -- `api_key`: Optional. Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. -- `text_content`: Optional. Load pages as readable text. Default is `False`. +- `api_key` Optional. Browserbase API key. Default is `BROWSERBASE_API_KEY` env variable. +- `project_id` Optional. Browserbase Project ID. Default is `BROWSERBASE_PROJECT_ID` env variable. +- `text_content` Retrieve only text content. Default is `False`. +- `session_id` Optional. Provide an existing Session ID. +- `proxy` Optional. Enable/Disable Proxies." From 1f08d74015877278cd48dd88aeb73a8037f1b8a3 Mon Sep 17 00:00:00 2001 From: Carlos Antunes Date: Mon, 20 May 2024 22:00:34 -0300 Subject: [PATCH 062/391] adding file writer tool and documentation --- .../tools/file_writer_tool/README.md | 35 +++++++++++++++++++ .../file_writer_tool/file_writer_tool.py | 22 ++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 src/crewai_tools/tools/file_writer_tool/README.md create mode 100644 src/crewai_tools/tools/file_writer_tool/file_writer_tool.py diff --git a/src/crewai_tools/tools/file_writer_tool/README.md b/src/crewai_tools/tools/file_writer_tool/README.md new file mode 100644 index 000000000..e93e5c682 --- /dev/null +++ b/src/crewai_tools/tools/file_writer_tool/README.md @@ -0,0 +1,35 @@ +Here's the rewritten README for the `FileWriterTool`: + +# FileWriterTool Documentation + +## Description +The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files. It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more. This tool supports creating new directories if they don't exist, making it easier to organize your output. + +## Installation +Install the crewai_tools package to use the `FileWriterTool` in your projects: + +```shell +pip install 'crewai[tools]' +``` + +## Example +To get started with the `FileWriterTool`: + +```python +from crewai_tools import FileWriterTool + +# Initialize the tool +file_writer_tool = FileWriterTool() + +# Write content to a file in a specified directory +result = file_writer_tool._run('example.txt', 'This is a test content.', 'test_directory') +print(result) +``` + +## Arguments +- `filename`: The name of the file you want to create or overwrite. +- `content`: The content to write into the file. +- `directory` (optional): The path to the directory where the file will be created. Defaults to the current directory (`.`). If the directory does not exist, it will be created. + +## Conclusion +By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories. This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and efficient. diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py new file mode 100644 index 000000000..21db460cc --- /dev/null +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -0,0 +1,22 @@ +import os +from crewai_tools import BaseTool + +class FileWriterTool(BaseTool): + name: str = "File Writer Tool" + description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path as input." + + def _run(self, filename: str, content: str, directory: str = '.') -> str: + try: + # Create the directory if it doesn't exist + if directory and not os.path.exists(directory): + os.makedirs(directory) + + # Construct the full path + filepath = os.path.join(directory, filename) + + # Write content to the file + with open(filepath, 'w') as file: + file.write(content) + return f"Content successfully written to {filepath}" + except Exception as e: + return f"An error occurred while writing to the file: {str(e)}" From 4e1425665c673badcc0a233cca03733ec35656e4 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Tue, 21 May 2024 11:48:52 +0200 Subject: [PATCH 063/391] spider tool --- .../tools/spider_crawl_tool/README.md | 27 ++++++++++++ .../tools/spider_crawl_tool/spider_tool.py | 44 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 src/crewai_tools/tools/spider_crawl_tool/README.md create mode 100644 src/crewai_tools/tools/spider_crawl_tool/spider_tool.py diff --git a/src/crewai_tools/tools/spider_crawl_tool/README.md b/src/crewai_tools/tools/spider_crawl_tool/README.md new file mode 100644 index 000000000..3207efcca --- /dev/null +++ b/src/crewai_tools/tools/spider_crawl_tool/README.md @@ -0,0 +1,27 @@ +# SpiderTool + +## Description + +[Spider](https://spider.cloud) is the [fastest]([Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. + +## Installation + +To use the Spider API you need to download the [Spider SDK](https://pypi.org/project/spider-client/) and the crewai[tools] SDK too: + +```python +pip install spider-client 'crewai[tools]' +``` + +## Example + +This example shows you how you can use the Spider tool to enable your agent to scrape and crawl websites. The data returned from the Spider API is already LLM-ready, so no need to do any cleaning there. + +```python +from crewai_tools import SpiderTool + +tool = SpiderTool() +``` + +## Arguments + +- `api_key`: Optional. Specifies Spider API key. If not specified it looks for `SPIDER_API_KEY` in environment variables. diff --git a/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py b/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py new file mode 100644 index 000000000..c924f6136 --- /dev/null +++ b/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py @@ -0,0 +1,44 @@ +from typing import Optional, Any, Type, Dict, Literal +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class SpiderToolSchema(BaseModel): + url: str = Field(description="Website URL") + params: Optional[Dict[str, Any]] = Field(default={"return_format": "markdown"}, description="Specified Params, see https://spider.cloud/docs/api for all availabe params") + mode: Optional[Literal["scrape", "crawl"]] = Field(defualt="scrape", description="Mode, either `scrape` or `crawl` the url") + +class SpiderTool(BaseTool): + name: str = "Spider scrape & crawl tool" + description: str = "Scrape & Crawl any url and return LLM-ready data." + args_schema: Type[BaseModel] = SpiderToolSchema + api_key: Optional[str] = None + spider: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from spider import Spider # type: ignore + except ImportError: + raise ImportError( + "`spider-client` package not found, please run `pip install spider-client`" + ) + + self.spider = Spider(api_key=api_key) + + def _run(self, url: str, params: Optional[Dict[str, Any]] = None, mode: Optional[Literal["scrape", "crawl"]] = "scrape"): + if mode != "scrape" and mode != "crawl": + raise ValueError( + "Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes" + ) + + if params is None: + params = {"return_format": "markdown"} + + action = ( + self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url + ) + spider_docs = action(url=url, params=params) + + + print(spider_docs) + return spider_docs From 0b494036352409ed02a6d377307d69194fa19a39 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Tue, 21 May 2024 12:06:08 +0200 Subject: [PATCH 064/391] remove print from testing --- src/crewai_tools/tools/spider_crawl_tool/spider_tool.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py b/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py index c924f6136..aeb922c74 100644 --- a/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py @@ -39,6 +39,4 @@ class SpiderTool(BaseTool): ) spider_docs = action(url=url, params=params) - - print(spider_docs) return spider_docs From dd15dab111a743d80c15126f09c1f99f45b7045e Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Tue, 21 May 2024 21:26:12 +0200 Subject: [PATCH 065/391] added full params --- .../tools/spider_crawl_tool/README.md | 28 ++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/spider_crawl_tool/README.md b/src/crewai_tools/tools/spider_crawl_tool/README.md index 3207efcca..e0734bcf9 100644 --- a/src/crewai_tools/tools/spider_crawl_tool/README.md +++ b/src/crewai_tools/tools/spider_crawl_tool/README.md @@ -24,4 +24,30 @@ tool = SpiderTool() ## Arguments -- `api_key`: Optional. Specifies Spider API key. If not specified it looks for `SPIDER_API_KEY` in environment variables. +- `api_key` (string, optional): Specifies Spider API key. If not specified, it looks for `SPIDER_API_KEY` in environment variables. +- `params` (object, optional): Optional parameters for the request. Defaults to `{"return_format": "markdown"}` to return the website's content in a format that fits LLMs better. + - `request` (string): The request type to perform. Possible values are `http`, `chrome`, and `smart`. Use `smart` to perform an HTTP request by default until JavaScript rendering is needed for the HTML. + - `limit` (int): The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages. + - `depth` (int): The crawl limit for maximum depth. If `0`, no limit will be applied. + - `cache` (bool): Use HTTP caching for the crawl to speed up repeated runs. Default is `true`. + - `budget` (object): Object that has paths with a counter for limiting the amount of pages example `{"*":1}` for only crawling the root page. + - `locale` (string): The locale to use for request, example `en-US`. + - `cookies` (string): Add HTTP cookies to use for request. + - `stealth` (bool): Use stealth mode for headless chrome request to help prevent being blocked. The default is `true` on chrome. + - `headers` (object): Forward HTTP headers to use for all request. The object is expected to be a map of key value pairs. + - `metadata` (bool): Boolean to store metadata about the pages and content found. This could help improve AI interopt. Defaults to `false` unless you have the website already stored with the configuration enabled. + - `viewport` (object): Configure the viewport for chrome. Defaults to `800x600`. + - `encoding` (string): The type of encoding to use like `UTF-8`, `SHIFT_JIS`, or etc. + - `subdomains` (bool): Allow subdomains to be included. Default is `false`. + - `user_agent` (string): Add a custom HTTP user agent to the request. By default this is set to a random agent. + - `store_data` (bool): Boolean to determine if storage should be used. If set this takes precedence over `storageless`. Defaults to `false`. + - `gpt_config` (object): Use AI to generate actions to perform during the crawl. You can pass an array for the `"prompt"` to chain steps. + - `fingerprint` (bool): Use advanced fingerprint for chrome. + - `storageless` (bool): Boolean to prevent storing any type of data for the request including storage and AI vectors embedding. Defaults to `false` unless you have the website already stored. + - `readability` (bool): Use [readability](https://github.com/mozilla/readability) to pre-process the content for reading. This may drastically improve the content for LLM usage. + `return_format` (string): The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`. Use `raw` to return the default format of the page like HTML etc. + - `proxy_enabled` (bool): Enable high performance premium proxies for the request to prevent being blocked at the network level. + - `query_selector` (string): The CSS query selector to use when extracting content from the markup. + - `full_resources` (bool): Crawl and download all the resources for a website. + - `request_timeout` (int): The timeout to use for request. Timeouts can be from `5-60`. The default is `30` seconds. + - `run_in_background` (bool): Run the request in the background. Useful if storing data and wanting to trigger crawls to the dashboard. This has no effect if storageless is set. From 60eb6e7c6f7565e8d97099bc0da24c75be149b2b Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Tue, 21 May 2024 23:34:05 +0200 Subject: [PATCH 066/391] spider_tool working, not spider_full_tool --- src/crewai_tools/tools/__init__.py | 2 + .../tools/spider_full_tool/README.md | 55 ++++++++++++++ .../spider_full_tool/spider_full_tool.py | 75 +++++++++++++++++++ .../README.md | 2 +- .../spider_tool.py | 6 +- tests/spider_full_tool_test.py | 38 ++++++++++ tests/spider_tool_test.py | 31 ++++++++ 7 files changed, 205 insertions(+), 4 deletions(-) create mode 100644 src/crewai_tools/tools/spider_full_tool/README.md create mode 100644 src/crewai_tools/tools/spider_full_tool/spider_full_tool.py rename src/crewai_tools/tools/{spider_crawl_tool => spider_tool}/README.md (91%) rename src/crewai_tools/tools/{spider_crawl_tool => spider_tool}/spider_tool.py (86%) create mode 100644 tests/spider_full_tool_test.py create mode 100644 tests/spider_tool_test.py diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..7b794508d 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -21,3 +21,5 @@ from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .spider_tool.spider_tool import SpiderTool +from .spider_full_tool.spider_full_tool import SpiderFullTool \ No newline at end of file diff --git a/src/crewai_tools/tools/spider_full_tool/README.md b/src/crewai_tools/tools/spider_full_tool/README.md new file mode 100644 index 000000000..f2e1d536c --- /dev/null +++ b/src/crewai_tools/tools/spider_full_tool/README.md @@ -0,0 +1,55 @@ +# SpiderFullTool + +## Description + +This is the full fledged Spider tool, with all the possible params listed to the agent. This can eat ut tokens and be a big chunk of your token limit, if this is a problem, check out the `SpiderTool` which probably has most of the features you are looking for. But if you truly want to experience the full power of Spider... + +[Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. + +## Installation + +To use the Spider API you need to download the [Spider SDK](https://pypi.org/project/spider-client/) and the crewai[tools] SDK too: + +```python +pip install spider-client 'crewai[tools]' +``` + +## Example + +This example shows you how you can use the full Spider tool to enable your agent to scrape and crawl websites. The data returned from the Spider API is already LLM-ready, so no need to do any cleaning there. + +```python +from crewai_tools import SpiderFullTool + +tool = SpiderFullTool() +``` + +## Arguments + +- `api_key` (string, optional): Specifies Spider API key. If not specified, it looks for `SPIDER_API_KEY` in environment variables. +- `params` (object, optional): Optional parameters for the request. Defaults to `{"return_format": "markdown"}` to return the website's content in a format that fits LLMs better. + - `request` (string): The request type to perform. Possible values are `http`, `chrome`, and `smart`. Use `smart` to perform an HTTP request by default until JavaScript rendering is needed for the HTML. + - `limit` (int): The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages. + - `depth` (int): The crawl limit for maximum depth. If `0`, no limit will be applied. + - `cache` (bool): Use HTTP caching for the crawl to speed up repeated runs. Default is `true`. + - `budget` (object): Object that has paths with a counter for limiting the amount of pages example `{"*":1}` for only crawling the root page. + - `locale` (string): The locale to use for request, example `en-US`. + - `cookies` (string): Add HTTP cookies to use for request. + - `stealth` (bool): Use stealth mode for headless chrome request to help prevent being blocked. The default is `true` on chrome. + - `headers` (object): Forward HTTP headers to use for all request. The object is expected to be a map of key value pairs. + - `metadata` (bool): Boolean to store metadata about the pages and content found. This could help improve AI interopt. Defaults to `false` unless you have the website already stored with the configuration enabled. + - `viewport` (object): Configure the viewport for chrome. Defaults to `800x600`. + - `encoding` (string): The type of encoding to use like `UTF-8`, `SHIFT_JIS`, or etc. + - `subdomains` (bool): Allow subdomains to be included. Default is `false`. + - `user_agent` (string): Add a custom HTTP user agent to the request. By default this is set to a random agent. + - `store_data` (bool): Boolean to determine if storage should be used. If set this takes precedence over `storageless`. Defaults to `false`. + - `gpt_config` (object): Use AI to generate actions to perform during the crawl. You can pass an array for the `"prompt"` to chain steps. + - `fingerprint` (bool): Use advanced fingerprint for chrome. + - `storageless` (bool): Boolean to prevent storing any type of data for the request including storage and AI vectors embedding. Defaults to `false` unless you have the website already stored. + - `readability` (bool): Use [readability](https://github.com/mozilla/readability) to pre-process the content for reading. This may drastically improve the content for LLM usage. + `return_format` (string): The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`. Use `raw` to return the default format of the page like HTML etc. + - `proxy_enabled` (bool): Enable high performance premium proxies for the request to prevent being blocked at the network level. + - `query_selector` (string): The CSS query selector to use when extracting content from the markup. + - `full_resources` (bool): Crawl and download all the resources for a website. + - `request_timeout` (int): The timeout to use for request. Timeouts can be from `5-60`. The default is `30` seconds. + - `run_in_background` (bool): Run the request in the background. Useful if storing data and wanting to trigger crawls to the dashboard. This has no effect if storageless is set. diff --git a/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py b/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py new file mode 100644 index 000000000..e1041b701 --- /dev/null +++ b/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py @@ -0,0 +1,75 @@ +from typing import Optional, Any, Type, Dict, Literal +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class SpiderFullParams(BaseModel): + request: Optional[str] = Field(description="The request type to perform. Possible values are `http`, `chrome`, and `smart`.") + limit: Optional[int] = Field(description="The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.") + depth: Optional[int] = Field(description="The crawl limit for maximum depth. If `0`, no limit will be applied.") + cache: Optional[bool] = Field(default=True, description="Use HTTP caching for the crawl to speed up repeated runs.") + budget: Optional[Dict[str, int]] = Field(description="Object that has paths with a counter for limiting the number of pages, e.g., `{'*':1}` for only crawling the root page.") + locale: Optional[str] = Field(description="The locale to use for request, e.g., `en-US`.") + cookies: Optional[str] = Field(description="Add HTTP cookies to use for request.") + stealth: Optional[bool] = Field(default=True, description="Use stealth mode for headless chrome request to help prevent being blocked. Default is `true` on chrome.") + headers: Optional[Dict[str, str]] = Field(description="Forward HTTP headers to use for all requests. The object is expected to be a map of key-value pairs.") + metadata: Optional[bool] = Field(default=False, description="Boolean to store metadata about the pages and content found. Defaults to `false` unless enabled.") + viewport: Optional[str] = Field(default="800x600", description="Configure the viewport for chrome. Defaults to `800x600`.") + encoding: Optional[str] = Field(description="The type of encoding to use, e.g., `UTF-8`, `SHIFT_JIS`.") + subdomains: Optional[bool] = Field(default=False, description="Allow subdomains to be included. Default is `false`.") + user_agent: Optional[str] = Field(description="Add a custom HTTP user agent to the request. Default is a random agent.") + store_data: Optional[bool] = Field(default=False, description="Boolean to determine if storage should be used. Defaults to `false`.") + gpt_config: Optional[Dict[str, Any]] = Field(description="Use AI to generate actions to perform during the crawl. Can pass an array for the `prompt` to chain steps.") + fingerprint: Optional[bool] = Field(description="Use advanced fingerprinting for chrome.") + storageless: Optional[bool] = Field(default=False, description="Boolean to prevent storing any data for the request. Defaults to `false`.") + readability: Optional[bool] = Field(description="Use readability to pre-process the content for reading.") + return_format: Optional[str] = Field(default="markdown", description="The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`.") + proxy_enabled: Optional[bool] = Field(description="Enable high-performance premium proxies to prevent being blocked.") + query_selector: Optional[str] = Field(description="The CSS query selector to use when extracting content from the markup.") + full_resources: Optional[bool] = Field(description="Crawl and download all resources for a website.") + request_timeout: Optional[int] = Field(default=30, description="The timeout for requests. Ranges from `5-60` seconds. Default is `30` seconds.") + run_in_background: Optional[bool] = Field(description="Run the request in the background. Useful if storing data and triggering crawls to the dashboard.") + +class SpiderFullToolSchema(BaseModel): + url: str = Field(description="Website URL") + params: Optional[SpiderFullParams] = Field(default=SpiderFullParams(), description="All the params available") + mode: Optional[Literal["scrape", "crawl"]] = Field(default="scrape", description="Mode, either `scrape` or `crawl` the URL") + +class SpiderFullTool(BaseTool): + name: str = "Spider scrape & crawl tool" + description: str = "Scrape & Crawl any URL and return LLM-ready data." + args_schema: Type[BaseModel] = SpiderFullToolSchema + api_key: Optional[str] = None + spider: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from spider import Spider # type: ignore + except ImportError: + raise ImportError( + "`spider-client` package not found, please run `pip install spider-client`" + ) + + self.spider = Spider(api_key=api_key) + + def _run( + self, + url: str, + params: Optional[SpiderFullParams] = None, + mode: Optional[Literal["scrape", "crawl"]] = "scrape" + ): + if mode not in ["scrape", "crawl"]: + raise ValueError( + "Unknown mode in `mode` parameter, `scrape` or `crawl` are the allowed modes" + ) + + if params is None: + params = SpiderFullParams() + + action = self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url + spider_docs = action(url=url, params=params.dict()) + + return spider_docs + +tool = SpiderFullTool() +tool._run(url="https://spider.cloud") \ No newline at end of file diff --git a/src/crewai_tools/tools/spider_crawl_tool/README.md b/src/crewai_tools/tools/spider_tool/README.md similarity index 91% rename from src/crewai_tools/tools/spider_crawl_tool/README.md rename to src/crewai_tools/tools/spider_tool/README.md index e0734bcf9..c608b5f5f 100644 --- a/src/crewai_tools/tools/spider_crawl_tool/README.md +++ b/src/crewai_tools/tools/spider_tool/README.md @@ -2,7 +2,7 @@ ## Description -[Spider](https://spider.cloud) is the [fastest]([Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. +[Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. ## Installation diff --git a/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py similarity index 86% rename from src/crewai_tools/tools/spider_crawl_tool/spider_tool.py rename to src/crewai_tools/tools/spider_tool/spider_tool.py index aeb922c74..73df77ac2 100644 --- a/src/crewai_tools/tools/spider_crawl_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -4,8 +4,8 @@ from crewai_tools.tools.base_tool import BaseTool class SpiderToolSchema(BaseModel): url: str = Field(description="Website URL") - params: Optional[Dict[str, Any]] = Field(default={"return_format": "markdown"}, description="Specified Params, see https://spider.cloud/docs/api for all availabe params") - mode: Optional[Literal["scrape", "crawl"]] = Field(defualt="scrape", description="Mode, either `scrape` or `crawl` the url") + params: Optional[Dict[str, Any]] = Field(default={"return_format": "markdown"}, description="Set additional params. Leave empty for this to return LLM-ready data") + mode: Optional[Literal["scrape", "crawl"]] = Field(defualt="scrape", description="Mode, the only two allowed modes are `scrape` or `crawl` the url") class SpiderTool(BaseTool): name: str = "Spider scrape & crawl tool" @@ -31,7 +31,7 @@ class SpiderTool(BaseTool): "Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes" ) - if params is None: + if params is None or params == {}: params = {"return_format": "markdown"} action = ( diff --git a/tests/spider_full_tool_test.py b/tests/spider_full_tool_test.py new file mode 100644 index 000000000..f00c0ec9c --- /dev/null +++ b/tests/spider_full_tool_test.py @@ -0,0 +1,38 @@ +import os +from crewai_tools.tools.spider_full_tool.spider_full_tool import SpiderFullTool, SpiderFullParams +from crewai import Agent, Task, Crew + +def test_spider_tool(): + spider_tool = SpiderFullTool() + + params = SpiderFullParams( + return_format="markdown" + ) + + docs = spider_tool._run("https://spider.cloud", params=params) + print(docs) + + # searcher = Agent( + # role="Web Research Expert", + # goal="Find related information from specific URL's", + # backstory="An expert web researcher that uses the web extremely well", + # tools=[spider_tool], + # verbose=True + # ) + + # summarize_spider = Task( + # description="Summarize the content of spider.cloud", + # expected_output="A summary that goes over what spider does", + # agent=searcher + # ) + + # crew = Crew( + # agents=[searcher], + # tasks=[summarize_spider], + # verbose=2 + # ) + + # crew.kickoff() + +if __name__ == "__main__": + test_spider_tool() \ No newline at end of file diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py new file mode 100644 index 000000000..67e5802a2 --- /dev/null +++ b/tests/spider_tool_test.py @@ -0,0 +1,31 @@ +import os +from crewai_tools.tools.spider_tool.spider_tool import SpiderTool +from crewai import Agent, Task, Crew + +def test_spider_tool(): + spider_tool = SpiderTool() + + searcher = Agent( + role="Web Research Expert", + goal="Find related information from specific URL's", + backstory="An expert web researcher that uses the web extremely well", + tools=[spider_tool], + verbose=True + ) + + summarize_spider = Task( + description="Summarize the content of spider.cloud", + expected_output="A summary that goes over what spider does", + agent=searcher + ) + + crew = Crew( + agents=[searcher], + tasks=[summarize_spider], + verbose=2 + ) + + crew.kickoff() + +if __name__ == "__main__": + test_spider_tool() \ No newline at end of file From 70b5a3ab853db3ab36d97e792f2124a7cfa5b8c9 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Wed, 22 May 2024 17:05:46 +0200 Subject: [PATCH 067/391] fixed white space --- src/crewai_tools/tools/spider_tool/spider_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 73df77ac2..3495d55c9 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -33,7 +33,7 @@ class SpiderTool(BaseTool): if params is None or params == {}: params = {"return_format": "markdown"} - + action = ( self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url ) From 5b7276c0bb75c2557ce0e7b6f08d316fb0a6426e Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Thu, 23 May 2024 12:03:48 +0200 Subject: [PATCH 068/391] x --- .../spider_full_tool/spider_full_tool.py | 18 +++++-- .../tools/spider_tool/spider_tool.py | 11 ++-- tests/spider_full_tool_test.py | 54 ++++++++----------- 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py b/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py index e1041b701..5d8ea6eda 100644 --- a/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py +++ b/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py @@ -1,6 +1,7 @@ from typing import Optional, Any, Type, Dict, Literal from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +import requests class SpiderFullParams(BaseModel): request: Optional[str] = Field(description="The request type to perform. Possible values are `http`, `chrome`, and `smart`.") @@ -64,12 +65,21 @@ class SpiderFullTool(BaseTool): ) if params is None: + print("PARAMS IT NONE") params = SpiderFullParams() + print(params) action = self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url - spider_docs = action(url=url, params=params.dict()) + response = action(url=url, params=params.dict()) + + # Debugging: Print the response content + print(f"Response status code: {response.status_code}") + print(f"Response content: {response.text}") + + try: + spider_docs = response.json() + except requests.exceptions.JSONDecodeError as e: + print(f"JSONDecodeError: {e}") + spider_docs = {"error": "Failed to decode JSON response"} return spider_docs - -tool = SpiderFullTool() -tool._run(url="https://spider.cloud") \ No newline at end of file diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 3495d55c9..e020a599a 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -25,10 +25,15 @@ class SpiderTool(BaseTool): self.spider = Spider(api_key=api_key) - def _run(self, url: str, params: Optional[Dict[str, Any]] = None, mode: Optional[Literal["scrape", "crawl"]] = "scrape"): - if mode != "scrape" and mode != "crawl": + def _run( + self, + url: str, + params: Optional[Dict[str, any]] = None, + mode: Optional[Literal["scrape", "crawl"]] = "scrape" + ): + if mode not in ["scrape", "crawl"]: raise ValueError( - "Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes" + "Unknown mode in `mode` parameter, `scrape` or `crawl` are the allowed modes" ) if params is None or params == {}: diff --git a/tests/spider_full_tool_test.py b/tests/spider_full_tool_test.py index f00c0ec9c..220acfb49 100644 --- a/tests/spider_full_tool_test.py +++ b/tests/spider_full_tool_test.py @@ -1,38 +1,30 @@ -import os from crewai_tools.tools.spider_full_tool.spider_full_tool import SpiderFullTool, SpiderFullParams -from crewai import Agent, Task, Crew - -def test_spider_tool(): - spider_tool = SpiderFullTool() +def test_spider_full_tool(): + spider_tool = SpiderFullTool(api_key="your_api_key") + url = "https://spider.cloud" params = SpiderFullParams( - return_format="markdown" + request="http", + limit=1, + depth=1, + cache=True, + locale="en-US", + stealth=True, + headers={"User-Agent": "test-agent"}, + metadata=False, + viewport="800x600", + encoding="UTF-8", + subdomains=False, + user_agent="test-agent", + store_data=False, + proxy_enabled=False, + query_selector=None, + full_resources=False, + request_timeout=30, + run_in_background=False ) - - docs = spider_tool._run("https://spider.cloud", params=params) + docs = spider_tool._run(url=url, params=params) print(docs) - - # searcher = Agent( - # role="Web Research Expert", - # goal="Find related information from specific URL's", - # backstory="An expert web researcher that uses the web extremely well", - # tools=[spider_tool], - # verbose=True - # ) - - # summarize_spider = Task( - # description="Summarize the content of spider.cloud", - # expected_output="A summary that goes over what spider does", - # agent=searcher - # ) - - # crew = Crew( - # agents=[searcher], - # tasks=[summarize_spider], - # verbose=2 - # ) - - # crew.kickoff() if __name__ == "__main__": - test_spider_tool() \ No newline at end of file + test_spider_full_tool() From 438c979a2e29b0865a8334980414434638e9c082 Mon Sep 17 00:00:00 2001 From: SuperMuel <69467005+SuperMuel@users.noreply.github.com> Date: Thu, 23 May 2024 13:51:17 +0200 Subject: [PATCH 069/391] Add n_results, country, location and locale parameters to payload --- .../tools/serper_dev_tool/serper_dev_tool.py | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 927c0e3b3..9a571cdac 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -2,7 +2,7 @@ import os import json import requests -from typing import Type, Any +from typing import Optional, Type, Any from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool @@ -16,18 +16,27 @@ class SerperDevTool(BaseTool): args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" n_results: int = 10 + country: Optional[str] = None + location: Optional[str] = None + locale: Optional[str] = None def _run( self, **kwargs: Any, ) -> Any: - search_query = kwargs.get('search_query') - if search_query is None: - search_query = kwargs.get('query') + search_query = kwargs.get('search_query') or kwargs.get('query') - payload = json.dumps({"q": search_query}) + payload = json.dumps( + { + "q": search_query, + "num": self.n_results, + "gl": self.country, + "location": self.location, + "hl": self.locale, + } + ) headers = { - 'X-API-KEY': os.environ['SERPER_API_KEY'], + 'X-API-KEY': os.environ['SERPER_API_KEY'], 'content-type': 'application/json' } response = requests.request("POST", self.search_url, headers=headers, data=payload) From 56146b7df43849ae696d8afc2100f505ff650962 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Sat, 25 May 2024 22:22:50 +0200 Subject: [PATCH 070/391] remove full tool, refined tool --- .../tools/spider_full_tool/README.md | 55 ------------ .../spider_full_tool/spider_full_tool.py | 85 ------------------- .../tools/spider_tool/spider_tool.py | 20 ++++- tests/spider_full_tool_test.py | 30 ------- tests/spider_tool_test.py | 31 +++++-- 5 files changed, 40 insertions(+), 181 deletions(-) delete mode 100644 src/crewai_tools/tools/spider_full_tool/README.md delete mode 100644 src/crewai_tools/tools/spider_full_tool/spider_full_tool.py delete mode 100644 tests/spider_full_tool_test.py diff --git a/src/crewai_tools/tools/spider_full_tool/README.md b/src/crewai_tools/tools/spider_full_tool/README.md deleted file mode 100644 index f2e1d536c..000000000 --- a/src/crewai_tools/tools/spider_full_tool/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# SpiderFullTool - -## Description - -This is the full fledged Spider tool, with all the possible params listed to the agent. This can eat ut tokens and be a big chunk of your token limit, if this is a problem, check out the `SpiderTool` which probably has most of the features you are looking for. But if you truly want to experience the full power of Spider... - -[Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. - -## Installation - -To use the Spider API you need to download the [Spider SDK](https://pypi.org/project/spider-client/) and the crewai[tools] SDK too: - -```python -pip install spider-client 'crewai[tools]' -``` - -## Example - -This example shows you how you can use the full Spider tool to enable your agent to scrape and crawl websites. The data returned from the Spider API is already LLM-ready, so no need to do any cleaning there. - -```python -from crewai_tools import SpiderFullTool - -tool = SpiderFullTool() -``` - -## Arguments - -- `api_key` (string, optional): Specifies Spider API key. If not specified, it looks for `SPIDER_API_KEY` in environment variables. -- `params` (object, optional): Optional parameters for the request. Defaults to `{"return_format": "markdown"}` to return the website's content in a format that fits LLMs better. - - `request` (string): The request type to perform. Possible values are `http`, `chrome`, and `smart`. Use `smart` to perform an HTTP request by default until JavaScript rendering is needed for the HTML. - - `limit` (int): The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages. - - `depth` (int): The crawl limit for maximum depth. If `0`, no limit will be applied. - - `cache` (bool): Use HTTP caching for the crawl to speed up repeated runs. Default is `true`. - - `budget` (object): Object that has paths with a counter for limiting the amount of pages example `{"*":1}` for only crawling the root page. - - `locale` (string): The locale to use for request, example `en-US`. - - `cookies` (string): Add HTTP cookies to use for request. - - `stealth` (bool): Use stealth mode for headless chrome request to help prevent being blocked. The default is `true` on chrome. - - `headers` (object): Forward HTTP headers to use for all request. The object is expected to be a map of key value pairs. - - `metadata` (bool): Boolean to store metadata about the pages and content found. This could help improve AI interopt. Defaults to `false` unless you have the website already stored with the configuration enabled. - - `viewport` (object): Configure the viewport for chrome. Defaults to `800x600`. - - `encoding` (string): The type of encoding to use like `UTF-8`, `SHIFT_JIS`, or etc. - - `subdomains` (bool): Allow subdomains to be included. Default is `false`. - - `user_agent` (string): Add a custom HTTP user agent to the request. By default this is set to a random agent. - - `store_data` (bool): Boolean to determine if storage should be used. If set this takes precedence over `storageless`. Defaults to `false`. - - `gpt_config` (object): Use AI to generate actions to perform during the crawl. You can pass an array for the `"prompt"` to chain steps. - - `fingerprint` (bool): Use advanced fingerprint for chrome. - - `storageless` (bool): Boolean to prevent storing any type of data for the request including storage and AI vectors embedding. Defaults to `false` unless you have the website already stored. - - `readability` (bool): Use [readability](https://github.com/mozilla/readability) to pre-process the content for reading. This may drastically improve the content for LLM usage. - `return_format` (string): The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`. Use `raw` to return the default format of the page like HTML etc. - - `proxy_enabled` (bool): Enable high performance premium proxies for the request to prevent being blocked at the network level. - - `query_selector` (string): The CSS query selector to use when extracting content from the markup. - - `full_resources` (bool): Crawl and download all the resources for a website. - - `request_timeout` (int): The timeout to use for request. Timeouts can be from `5-60`. The default is `30` seconds. - - `run_in_background` (bool): Run the request in the background. Useful if storing data and wanting to trigger crawls to the dashboard. This has no effect if storageless is set. diff --git a/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py b/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py deleted file mode 100644 index 5d8ea6eda..000000000 --- a/src/crewai_tools/tools/spider_full_tool/spider_full_tool.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import Optional, Any, Type, Dict, Literal -from pydantic.v1 import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool -import requests - -class SpiderFullParams(BaseModel): - request: Optional[str] = Field(description="The request type to perform. Possible values are `http`, `chrome`, and `smart`.") - limit: Optional[int] = Field(description="The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.") - depth: Optional[int] = Field(description="The crawl limit for maximum depth. If `0`, no limit will be applied.") - cache: Optional[bool] = Field(default=True, description="Use HTTP caching for the crawl to speed up repeated runs.") - budget: Optional[Dict[str, int]] = Field(description="Object that has paths with a counter for limiting the number of pages, e.g., `{'*':1}` for only crawling the root page.") - locale: Optional[str] = Field(description="The locale to use for request, e.g., `en-US`.") - cookies: Optional[str] = Field(description="Add HTTP cookies to use for request.") - stealth: Optional[bool] = Field(default=True, description="Use stealth mode for headless chrome request to help prevent being blocked. Default is `true` on chrome.") - headers: Optional[Dict[str, str]] = Field(description="Forward HTTP headers to use for all requests. The object is expected to be a map of key-value pairs.") - metadata: Optional[bool] = Field(default=False, description="Boolean to store metadata about the pages and content found. Defaults to `false` unless enabled.") - viewport: Optional[str] = Field(default="800x600", description="Configure the viewport for chrome. Defaults to `800x600`.") - encoding: Optional[str] = Field(description="The type of encoding to use, e.g., `UTF-8`, `SHIFT_JIS`.") - subdomains: Optional[bool] = Field(default=False, description="Allow subdomains to be included. Default is `false`.") - user_agent: Optional[str] = Field(description="Add a custom HTTP user agent to the request. Default is a random agent.") - store_data: Optional[bool] = Field(default=False, description="Boolean to determine if storage should be used. Defaults to `false`.") - gpt_config: Optional[Dict[str, Any]] = Field(description="Use AI to generate actions to perform during the crawl. Can pass an array for the `prompt` to chain steps.") - fingerprint: Optional[bool] = Field(description="Use advanced fingerprinting for chrome.") - storageless: Optional[bool] = Field(default=False, description="Boolean to prevent storing any data for the request. Defaults to `false`.") - readability: Optional[bool] = Field(description="Use readability to pre-process the content for reading.") - return_format: Optional[str] = Field(default="markdown", description="The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`.") - proxy_enabled: Optional[bool] = Field(description="Enable high-performance premium proxies to prevent being blocked.") - query_selector: Optional[str] = Field(description="The CSS query selector to use when extracting content from the markup.") - full_resources: Optional[bool] = Field(description="Crawl and download all resources for a website.") - request_timeout: Optional[int] = Field(default=30, description="The timeout for requests. Ranges from `5-60` seconds. Default is `30` seconds.") - run_in_background: Optional[bool] = Field(description="Run the request in the background. Useful if storing data and triggering crawls to the dashboard.") - -class SpiderFullToolSchema(BaseModel): - url: str = Field(description="Website URL") - params: Optional[SpiderFullParams] = Field(default=SpiderFullParams(), description="All the params available") - mode: Optional[Literal["scrape", "crawl"]] = Field(default="scrape", description="Mode, either `scrape` or `crawl` the URL") - -class SpiderFullTool(BaseTool): - name: str = "Spider scrape & crawl tool" - description: str = "Scrape & Crawl any URL and return LLM-ready data." - args_schema: Type[BaseModel] = SpiderFullToolSchema - api_key: Optional[str] = None - spider: Optional[Any] = None - - def __init__(self, api_key: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - try: - from spider import Spider # type: ignore - except ImportError: - raise ImportError( - "`spider-client` package not found, please run `pip install spider-client`" - ) - - self.spider = Spider(api_key=api_key) - - def _run( - self, - url: str, - params: Optional[SpiderFullParams] = None, - mode: Optional[Literal["scrape", "crawl"]] = "scrape" - ): - if mode not in ["scrape", "crawl"]: - raise ValueError( - "Unknown mode in `mode` parameter, `scrape` or `crawl` are the allowed modes" - ) - - if params is None: - print("PARAMS IT NONE") - params = SpiderFullParams() - print(params) - - action = self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url - response = action(url=url, params=params.dict()) - - # Debugging: Print the response content - print(f"Response status code: {response.status_code}") - print(f"Response content: {response.text}") - - try: - spider_docs = response.json() - except requests.exceptions.JSONDecodeError as e: - print(f"JSONDecodeError: {e}") - spider_docs = {"error": "Failed to decode JSON response"} - - return spider_docs diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index e020a599a..9c7f6ad08 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -4,8 +4,17 @@ from crewai_tools.tools.base_tool import BaseTool class SpiderToolSchema(BaseModel): url: str = Field(description="Website URL") - params: Optional[Dict[str, Any]] = Field(default={"return_format": "markdown"}, description="Set additional params. Leave empty for this to return LLM-ready data") - mode: Optional[Literal["scrape", "crawl"]] = Field(defualt="scrape", description="Mode, the only two allowed modes are `scrape` or `crawl` the url") + params: Optional[Dict[str, Any]] = Field( + description="Set additional params. Options include:\n" + "- `limit`: Optional[int] - The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.\n" + "- `depth`: Optional[int] - The crawl limit for maximum depth. If `0`, no limit will be applied.\n" + "- `metadata`: Optional[bool] - Boolean to include metadata or not. Defaults to `False` unless set to `True`. If the user wants metadata, include params.metadata = True.\n" + "- `query_selector`: Optional[str] - The CSS query selector to use when extracting content from the markup.\n" + ) + mode: Literal["scrape", "crawl"] = Field( + default="scrape", + description="Mode, the only two allowed modes are `scrape` or `crawl`. `scrape` will only scrape the one page of the url provided, while `crawl` will crawl the website following all the subpages found." + ) class SpiderTool(BaseTool): name: str = "Spider scrape & crawl tool" @@ -28,7 +37,7 @@ class SpiderTool(BaseTool): def _run( self, url: str, - params: Optional[Dict[str, any]] = None, + params: Optional[Dict[str, Any]] = None, mode: Optional[Literal["scrape", "crawl"]] = "scrape" ): if mode not in ["scrape", "crawl"]: @@ -36,7 +45,10 @@ class SpiderTool(BaseTool): "Unknown mode in `mode` parameter, `scrape` or `crawl` are the allowed modes" ) - if params is None or params == {}: + # Ensure 'return_format': 'markdown' is always included + if params: + params["return_format"] = "markdown" + else: params = {"return_format": "markdown"} action = ( diff --git a/tests/spider_full_tool_test.py b/tests/spider_full_tool_test.py deleted file mode 100644 index 220acfb49..000000000 --- a/tests/spider_full_tool_test.py +++ /dev/null @@ -1,30 +0,0 @@ -from crewai_tools.tools.spider_full_tool.spider_full_tool import SpiderFullTool, SpiderFullParams - -def test_spider_full_tool(): - spider_tool = SpiderFullTool(api_key="your_api_key") - url = "https://spider.cloud" - params = SpiderFullParams( - request="http", - limit=1, - depth=1, - cache=True, - locale="en-US", - stealth=True, - headers={"User-Agent": "test-agent"}, - metadata=False, - viewport="800x600", - encoding="UTF-8", - subdomains=False, - user_agent="test-agent", - store_data=False, - proxy_enabled=False, - query_selector=None, - full_resources=False, - request_timeout=30, - run_in_background=False - ) - docs = spider_tool._run(url=url, params=params) - print(docs) - -if __name__ == "__main__": - test_spider_full_tool() diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py index 67e5802a2..7faaa5338 100644 --- a/tests/spider_tool_test.py +++ b/tests/spider_tool_test.py @@ -10,22 +10,39 @@ def test_spider_tool(): goal="Find related information from specific URL's", backstory="An expert web researcher that uses the web extremely well", tools=[spider_tool], - verbose=True + verbose=True, + cache=False ) - summarize_spider = Task( - description="Summarize the content of spider.cloud", - expected_output="A summary that goes over what spider does", + choose_between_scrape_crawl = Task( + description="Scrape the page of spider.cloud and return a summary of how fast it is", + expected_output="spider.cloud is a fast scraping and crawling tool", agent=searcher ) - + + return_metadata = Task( + description="Scrape https://spider.cloud with a limit of 1 and enable metadata", + expected_output="Metadata and 10 word summary of spider.cloud", + agent=searcher + ) + + css_selector = Task( + description="Scrape one page of spider.cloud with the `body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1` CSS selector", + expected_output="The content of the element with the css selector body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1", + agent=searcher + ) + crew = Crew( agents=[searcher], - tasks=[summarize_spider], + tasks=[ + choose_between_scrape_crawl, + return_metadata, + css_selector + ], verbose=2 ) crew.kickoff() if __name__ == "__main__": - test_spider_tool() \ No newline at end of file + test_spider_tool() From 7d40c98434a0480c0085adb1aaed105932a4c992 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Sat, 25 May 2024 22:28:48 +0200 Subject: [PATCH 071/391] remove full tool import --- src/crewai_tools/tools/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 7b794508d..ea0b16304 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -22,4 +22,3 @@ from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool from .spider_tool.spider_tool import SpiderTool -from .spider_full_tool.spider_full_tool import SpiderFullTool \ No newline at end of file From f0f1ab175a96ccc5388d960d71dc29d2bab9487e Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Sat, 25 May 2024 22:36:30 +0200 Subject: [PATCH 072/391] remove unecessary os import --- tests/spider_tool_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py index 7faaa5338..977dd8769 100644 --- a/tests/spider_tool_test.py +++ b/tests/spider_tool_test.py @@ -1,4 +1,3 @@ -import os from crewai_tools.tools.spider_tool.spider_tool import SpiderTool from crewai import Agent, Task, Crew From ad965357ce403e165725f9131820f7ba924babf1 Mon Sep 17 00:00:00 2001 From: WilliamEspegren Date: Sat, 25 May 2024 22:40:48 +0200 Subject: [PATCH 073/391] refined spider_tool.py --- src/crewai_tools/tools/spider_tool/README.md | 30 ++++++++++++++++++- .../tools/spider_tool/spider_tool.py | 2 +- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/README.md b/src/crewai_tools/tools/spider_tool/README.md index c608b5f5f..563c07a04 100644 --- a/src/crewai_tools/tools/spider_tool/README.md +++ b/src/crewai_tools/tools/spider_tool/README.md @@ -19,7 +19,35 @@ This example shows you how you can use the Spider tool to enable your agent to s ```python from crewai_tools import SpiderTool -tool = SpiderTool() +def main(): + spider_tool = SpiderTool() + + searcher = Agent( + role="Web Research Expert", + goal="Find related information from specific URL's", + backstory="An expert web researcher that uses the web extremely well", + tools=[spider_tool], + verbose=True, + ) + + return_metadata = Task( + description="Scrape https://spider.cloud with a limit of 1 and enable metadata", + expected_output="Metadata and 10 word summary of spider.cloud", + agent=searcher + ) + + crew = Crew( + agents=[searcher], + tasks=[ + return_metadata, + ], + verbose=2 + ) + + crew.kickoff() + +if __name__ == "__main__": + main() ``` ## Arguments diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 9c7f6ad08..b4b230c8e 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -13,7 +13,7 @@ class SpiderToolSchema(BaseModel): ) mode: Literal["scrape", "crawl"] = Field( default="scrape", - description="Mode, the only two allowed modes are `scrape` or `crawl`. `scrape` will only scrape the one page of the url provided, while `crawl` will crawl the website following all the subpages found." + description="Mode, the only two allowed modes are `scrape` or `crawl`. Use `scrape` to scrape a single page and `crawl` to crawl the entire website following subpages. These modes are the only allowed values even when ANY params is set." ) class SpiderTool(BaseTool): From 53e9b407250ce2b769b4f3912f3794ea3623e0eb Mon Sep 17 00:00:00 2001 From: Mazen Ramadan Date: Mon, 27 May 2024 14:48:38 +0300 Subject: [PATCH 074/391] Add Scrapfly website scrape tool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../scrapfly_scrape_website_tool/README.md | 57 +++++++++++++++++++ .../scrapfly_scrape_website_tool.py | 47 +++++++++++++++ 4 files changed, 106 insertions(+) create mode 100644 src/crewai_tools/tools/scrapfly_scrape_website_tool/README.md create mode 100644 src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index faac5d37d..cc1a4cef4 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -18,6 +18,7 @@ from .tools import ( RagTool, ScrapeElementFromWebsiteTool, ScrapeWebsiteTool, + ScrapflyScrapeWebsiteTool, SeleniumScrapingTool, WebsiteSearchTool, XMLSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..1b332a43d 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -16,6 +16,7 @@ from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool +from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ScrapflyScrapeWebsiteTool from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/README.md b/src/crewai_tools/tools/scrapfly_scrape_website_tool/README.md new file mode 100644 index 000000000..6ab9c9d52 --- /dev/null +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/README.md @@ -0,0 +1,57 @@ +# ScrapflyScrapeWebsiteTool + +## Description +[ScrapFly](https://scrapfly.io/) is a web scraping API with headless browser capabilities, proxies, and anti-bot bypass. It allows for extracting web page data into accessible LLM markdown or text. + +## Setup and Installation +1. **Install ScrapFly Python SDK**: Install `scrapfly-sdk` Python package is installed to use the ScrapFly Web Loader. Install it via pip with the following command: + + ```bash + pip install scrapfly-sdk + ``` + +2. **API Key**: Register for free from [scrapfly.io/register](https://www.scrapfly.io/register/) to obtain your API key. + +## Example Usage + +Utilize the ScrapflyScrapeWebsiteTool as follows to retrieve a web page data as text, markdown (LLM accissible) or HTML: + +```python +from crewai_tools import ScrapflyScrapeWebsiteTool + +tool = ScrapflyScrapeWebsiteTool( + api_key="Your ScrapFly API key" +) + +result = tool._run( + url="https://web-scraping.dev/products", + scrape_format="markdown", + ignore_scrape_failures=True +) +``` + +## Additional Arguments +The ScrapflyScrapeWebsiteTool also allows passigng ScrapeConfig object for customizing the scrape request. See the [API params documentation](https://scrapfly.io/docs/scrape-api/getting-started) for the full feature details and their API params: +```python +from crewai_tools import ScrapflyScrapeWebsiteTool + +tool = ScrapflyScrapeWebsiteTool( + api_key="Your ScrapFly API key" +) + +scrapfly_scrape_config = { + "asp": True, # Bypass scraping blocking and solutions, like Cloudflare + "render_js": True, # Enable JavaScript rendering with a cloud headless browser + "proxy_pool": "public_residential_pool", # Select a proxy pool (datacenter or residnetial) + "country": "us", # Select a proxy location + "auto_scroll": True, # Auto scroll the page + "js": "" # Execute custom JavaScript code by the headless browser +} + +result = tool._run( + url="https://web-scraping.dev/products", + scrape_format="markdown", + ignore_scrape_failures=True, + scrape_config=scrapfly_scrape_config +) +``` \ No newline at end of file diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py new file mode 100644 index 000000000..b0bfa7ee6 --- /dev/null +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -0,0 +1,47 @@ +import logging + +from typing import Optional, Any, Type, Dict, Literal +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +logger = logging.getLogger(__file__) + +class ScrapflyScrapeWebsiteToolSchema(BaseModel): + url: str = Field(description="Webpage URL") + scrape_format: Optional[Literal["raw", "markdown", "text"]] = Field(default="markdown", description="Webpage extraction format") + scrape_config: Optional[Dict[str, Any]] = Field(default=None, description="Scrapfly request scrape config") + ignore_scrape_failures: Optional[bool] = Field(default=None, description="whether to ignore failures") + +class ScrapflyScrapeWebsiteTool(BaseTool): + name: str = "Scrapfly web scraping API tool" + description: str = "Scrape a webpage url using Scrapfly and return its content as markdown or text" + args_schema: Type[BaseModel] = ScrapflyScrapeWebsiteToolSchema + api_key: str = None + scrapfly: Optional[Any] = None + + def __init__(self, api_key: str): + super().__init__() + try: + from scrapfly import ScrapflyClient + except ImportError: + raise ImportError( + "`scrapfly` package not found, please run `pip install scrapfly-sdk`" + ) + self.scrapfly = ScrapflyClient(key=api_key) + + def _run(self, url: str, scrape_format: str = "markdown", scrape_config: Optional[Dict[str, Any]] = None, ignore_scrape_failures: Optional[bool] = None): + from scrapfly import ScrapeApiResponse, ScrapeConfig + + scrape_config = scrape_config if scrape_config is not None else {} + try: + response: ScrapeApiResponse = self.scrapfly.scrape( + ScrapeConfig(url, format=scrape_format, **scrape_config) + ) + return response.scrape_result["content"] + except Exception as e: + if ignore_scrape_failures: + logger.error(f"Error fetching data from {url}, exception: {e}") + return None + else: + raise e + \ No newline at end of file From 7ee7d846e2abe079d637a10629c709bf86edd8ac Mon Sep 17 00:00:00 2001 From: Jerry Liu Date: Sat, 8 Jun 2024 21:42:28 -0700 Subject: [PATCH 075/391] cr --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/llamaindex_tool/README.md | 53 ++++++++++++ .../tools/llamaindex_tool/llamaindex_tool.py | 84 +++++++++++++++++++ 4 files changed, 139 insertions(+) create mode 100644 src/crewai_tools/tools/llamaindex_tool/README.md create mode 100644 src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index faac5d37d..a51d70449 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -23,4 +23,5 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, + LlamaIndexTool ) \ No newline at end of file diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..4da0c0337 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -21,3 +21,4 @@ from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .llamaindex_tool.llamaindex_tool import LlamaIndexTool diff --git a/src/crewai_tools/tools/llamaindex_tool/README.md b/src/crewai_tools/tools/llamaindex_tool/README.md new file mode 100644 index 000000000..cd8f4cd99 --- /dev/null +++ b/src/crewai_tools/tools/llamaindex_tool/README.md @@ -0,0 +1,53 @@ +# LlamaIndexTool Documentation + +## Description +This tool is designed to be a general wrapper around LlamaIndex tools and query engines, enabling you to leverage LlamaIndex resources +in terms of RAG/agentic pipelines as tools to plug into CrewAI agents. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: +```shell +pip install 'crewai[tools]' +``` + +## Example +The following example demonstrates how to initialize the tool and execute a search with a given query: + +```python +from crewai_tools import LlamaIndexTool + +# Initialize the tool from a LlamaIndex Tool + +## Example 1: Initialize from FunctionTool +from llama_index.core.tools import FunctionTool + +your_python_function = lambda ...: ... +og_tool = FunctionTool.from_defaults(your_python_function, name="", description='') +tool = LlamaIndexTool.from_tool(og_tool) + +## Example 2: Initialize from LlamaHub Tools +from llama_index.tools.wolfram_alpha import WolframAlphaToolSpec +wolfram_spec = WolframAlphaToolSpec(app_id="") +wolfram_tools = wolfram_spec.to_tool_list() +tools = [LlamaIndexTool.from_tool(t) for t in wolfram_tools] + + +# Initialize Tool from a LlamaIndex Query Engine + +## NOTE: LlamaIndex has a lot of query engines, define whatever query engine you want +query_engine = index.as_query_engine() +query_tool = LlamaIndexTool.from_query_engine( + query_engine, + name="Uber 2019 10K Query Tool", + description="Use this tool to lookup the 2019 Uber 10K Annual Report" +) + +``` + +## Steps to Get Started +To effectively use the `LlamaIndexTool`, follow these steps: + +1. **Install CrewAI**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **Install and use LlamaIndex**: Follow LlamaIndex documentation (https://docs.llamaindex.ai/) to setup a RAG/agent pipeline. + + diff --git a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py new file mode 100644 index 000000000..5aac51052 --- /dev/null +++ b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py @@ -0,0 +1,84 @@ +import os +import json +import requests + +from typing import Type, Any, cast, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class LlamaIndexTool(BaseTool): + """Tool to wrap LlamaIndex tools/query engines.""" + llama_index_tool: Any + + def _run( + self, + *args: Any, + **kwargs: Any, + ) -> Any: + """Run tool.""" + from llama_index.core.tools import BaseTool as LlamaBaseTool + tool = cast(LlamaBaseTool, self.llama_index_tool) + return tool(*args, **kwargs) + + @classmethod + def from_tool( + cls, + tool: Any, + **kwargs: Any + ) -> "LlamaIndexTool": + from llama_index.core.tools import BaseTool as LlamaBaseTool + + if not isinstance(tool, LlamaBaseTool): + raise ValueError(f"Expected a LlamaBaseTool, got {type(tool)}") + tool = cast(LlamaBaseTool, tool) + + if tool.metadata.fn_schema is None: + raise ValueError("The LlamaIndex tool does not have an fn_schema specified.") + args_schema = cast(Type[BaseModel], tool.metadata.fn_schema) + + return cls( + name=tool.metadata.name, + description=tool.metadata.description, + args_schema=args_schema, + llama_index_tool=tool, + **kwargs + ) + + + @classmethod + def from_query_engine( + cls, + query_engine: Any, + name: Optional[str] = None, + description: Optional[str] = None, + return_direct: bool = False, + **kwargs: Any + ) -> "LlamaIndexTool": + from llama_index.core.query_engine import BaseQueryEngine + from llama_index.core.tools import QueryEngineTool + + if not isinstance(query_engine, BaseQueryEngine): + raise ValueError(f"Expected a BaseQueryEngine, got {type(query_engine)}") + + # NOTE: by default the schema expects an `input` variable. However this + # confuses crewAI so we are renaming to `query`. + class QueryToolSchema(BaseModel): + """Schema for query tool.""" + query: str = Field(..., description="Search query for the query tool.") + + # NOTE: setting `resolve_input_errors` to True is important because the schema expects `input` but we are using `query` + query_engine_tool = QueryEngineTool.from_defaults( + query_engine, + name=name, + description=description, + return_direct=return_direct, + resolve_input_errors=True, + ) + # HACK: we are replacing the schema with our custom schema + query_engine_tool.metadata.fn_schema = QueryToolSchema + + return cls.from_tool( + query_engine_tool, + **kwargs + ) + \ No newline at end of file From ff80e6cb79d5606753a9a232105c24f64e57c223 Mon Sep 17 00:00:00 2001 From: smsajjadzaidi Date: Mon, 10 Jun 2024 02:54:21 +0500 Subject: [PATCH 076/391] fixed use of arg n_results --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 927c0e3b3..9c26f6128 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -33,7 +33,7 @@ class SerperDevTool(BaseTool): response = requests.request("POST", self.search_url, headers=headers, data=payload) results = response.json() if 'organic' in results: - results = results['organic'] + results = results['organic'][:self.n_results] string = [] for result in results: try: @@ -44,7 +44,7 @@ class SerperDevTool(BaseTool): "---" ])) except KeyError: - next + continue content = '\n'.join(string) return f"\nSearch results: {content}\n" From d8b8edab087fa7da4e085c2d83154f3c4a272d63 Mon Sep 17 00:00:00 2001 From: teampen <136991215+teampen@users.noreply.github.com> Date: Mon, 10 Jun 2024 21:15:21 -0400 Subject: [PATCH 077/391] adding google search, sholar, and news --- src/crewai_tools/__init__.py | 3 + src/crewai_tools/tools/__init__.py | 3 + .../tools/serply_api_tool/README.md | 67 +++++++++++++ .../serply_news_search_tool.py | 80 ++++++++++++++++ .../serply_scholar_search_tool.py | 85 +++++++++++++++++ .../serply_api_tool/serply_web_search_tool.py | 93 +++++++++++++++++++ 6 files changed, 331 insertions(+) create mode 100644 src/crewai_tools/tools/serply_api_tool/README.md create mode 100644 src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py create mode 100644 src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py create mode 100644 src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index faac5d37d..a9013b7ee 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -23,4 +23,7 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, + SerplyWebSearchTool, + SerplyNewsSearchTool, + SerplyScholarSearchTool ) \ No newline at end of file diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..138dbce17 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -21,3 +21,6 @@ from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool +from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool +from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool diff --git a/src/crewai_tools/tools/serply_api_tool/README.md b/src/crewai_tools/tools/serply_api_tool/README.md new file mode 100644 index 000000000..fe439b28f --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/README.md @@ -0,0 +1,67 @@ +# Serply API Documentation + +## Description +This tool is designed to perform a web/news/scholar search for a specified query from a text's content across the internet. It utilizes the [Serply.io](https://serply.io) API to fetch and display the most relevant search results based on the query provided by the user. + +## Installation + +To incorporate this tool into your project, follow the installation instructions below: +```shell +pip install 'crewai[tools]' +``` + +## Examples + +## Web Search +The following example demonstrates how to initialize the tool and execute a search the web with a given query: + +```python +from crewai_tools import SerplyWebSearchTool + +# Initialize the tool for internet searching capabilities +tool = SerplyWebSearchTool() + +# increase search limits to 100 results +tool = SerplyWebSearchTool(limit=100) + + +# change results language (fr - French) +tool = SerplyWebSearchTool(hl="fr") +``` + +## News Search +The following example demonstrates how to initialize the tool and execute a search news with a given query: + +```python +from crewai_tools import SerplyNewsSearchTool + +# Initialize the tool for internet searching capabilities +tool = SerplyNewsSearchTool() + +# change country news (JP - Japan) +tool = SerplyNewsSearchTool(proxy_location="JP") +``` + +## Scholar Search +The following example demonstrates how to initialize the tool and execute a search scholar articles a given query: + +```python +from crewai_tools import SerplyScholarSearchTool + +# Initialize the tool for internet searching capabilities +tool = SerplyScholarSearchTool() + +# change country news (GB - Great Britain) +tool = SerplyScholarSearchTool(proxy_location="GB") +``` + + +## Steps to Get Started +To effectively use the `SerplyApiTool`, follow these steps: + +1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at [Serply.io](https://serply.io). +3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPLY_API_KEY` to facilitate its use by the tool. + +## Conclusion +By integrating the `SerplyApiTool` into Python projects, users gain the ability to conduct real-time searches, relevant news across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py new file mode 100644 index 000000000..c1fef5a77 --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -0,0 +1,80 @@ +import os +import requests +from urllib.parse import urlencode +from typing import Type, Any, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class SerplyNewsSearchToolSchema(BaseModel): + """Input for Serply News Search.""" + search_query: str = Field(..., description="Mandatory search query you want to use to fetch news articles") + + +class SerplyNewsSearchTool(BaseTool): + name: str = "News Search" + description: str = "A tool to perform News article search with a search_query." + args_schema: Type[BaseModel] = SerplyNewsSearchToolSchema + search_url: str = "https://api.serply.io/v1/news/" + proxy_location: Optional[str] = "US" + headers: Optional[dict] = {} + limit: Optional[int] = 10 + + def __init__( + self, + limit: Optional[int] = 10, + proxy_location: Optional[str] = "US", + **kwargs + ): + """ + param: limit (int): The maximum number of results to return [10-100, defaults to 10] + proxy_location: (str): Where to get news, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + """ + super().__init__(**kwargs) + self.limit = limit + self.proxy_location = proxy_location + self.headers = { + "X-API-KEY": os.environ["SERPLY_API_KEY"], + "User-Agent": "crew-tools", + "X-Proxy-Location": proxy_location + } + + def _run( + self, + **kwargs: Any, + ) -> Any: + # build query parameters + query_payload = {} + + if "query" in kwargs: + query_payload["q"] = kwargs["query"] + elif "search_query" in kwargs: + query_payload["q"] = kwargs["search_query"] + + # build the url + url = f"{self.search_url}{urlencode(query_payload)}" + + response = requests.request("GET", url, headers=self.headers) + results = response.json() + if "entries" in results: + results = results['entries'] + string = [] + for result in results[:self.limit]: + try: + # follow url + r = requests.get(result['link']) + final_link = r.history[-1].headers['Location'] + string.append('\n'.join([ + f"Title: {result['title']}", + f"Link: {final_link}", + f"Source: {result['source']['title']}", + f"Published: {result['published']}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(string) + return f"\nSearch results: {content}\n" + else: + return results diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py new file mode 100644 index 000000000..badc9950e --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -0,0 +1,85 @@ +import os +import requests +from urllib.parse import urlencode +from typing import Type, Any, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + +class SerplyScholarSearchToolSchema(BaseModel): + """Input for Serply Scholar Search.""" + search_query: str = Field(..., description="Mandatory search query you want to use to fetch scholarly literature") + + +class SerplyScholarSearchTool(BaseTool): + name: str = "Scholar Search" + description: str = "A tool to perform News article search with a search_query." + args_schema: Type[BaseModel] = SerplyScholarSearchToolSchema + search_url: str = "https://api.serply.io/v1/scholar/" + hl: Optional[str] = "us" + proxy_location: Optional[str] = "US" + headers: Optional[dict] = {} + + def __init__( + self, + hl: str = "us", + proxy_location: Optional[str] = "US", + **kwargs + ): + """ + param: hl (str): host Language code to display results in + (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) + proxy_location: (str): Where to get news, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + """ + super().__init__(**kwargs) + self.hl = hl + self.proxy_location = proxy_location + self.headers = { + "X-API-KEY": os.environ["SERPLY_API_KEY"], + "User-Agent": "crew-tools", + "X-Proxy-Location": proxy_location + } + + def _run( + self, + **kwargs: Any, + ) -> Any: + query_payload = { + "hl": self.hl + } + + if "query" in kwargs: + query_payload["q"] = kwargs["query"] + elif "search_query" in kwargs: + query_payload["q"] = kwargs["search_query"] + + # build the url + url = f"{self.search_url}{urlencode(query_payload)}" + + response = requests.request("GET", url, headers=self.headers) + articles = response.json().get("articles", "") + + if not articles: + return "" + + string = [] + for article in articles: + try: + if "doc" in article: + link = article['doc']['link'] + else: + link = article['link'] + authors = [author['name'] for author in article['author']['authors']] + string.append('\n'.join([ + f"Title: {article['title']}", + f"Link: {link}", + f"Description: {article['description']}", + f"Cite: {article['cite']}", + f"Authors: {', '.join(authors)}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(string) + return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py new file mode 100644 index 000000000..5f146c673 --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py @@ -0,0 +1,93 @@ +import os +import requests +from urllib.parse import urlencode +from typing import Type, Any, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + + +class SerplyWebSearchToolSchema(BaseModel): + """Input for Serply Web Search.""" + search_query: str = Field(..., description="Mandatory search query you want to use to Google search") + + +class SerplyWebSearchTool(BaseTool): + name: str = "Google Search" + description: str = "A tool to perform Google search with a search_query." + args_schema: Type[BaseModel] = SerplyWebSearchToolSchema + search_url: str = "https://api.serply.io/v1/search/" + hl: Optional[str] = "us" + limit: Optional[int] = 10 + device_type: Optional[str] = "desktop" + proxy_location: Optional[str] = "US" + query_payload: Optional[dict] = {} + headers: Optional[dict] = {} + + def __init__( + self, + hl: str = "us", + limit: int = 10, + device_type: str = "desktop", + proxy_location: str = "US", + **kwargs + ): + """ + param: query (str): The query to search for + param: hl (str): host Language code to display results in + (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) + param: limit (int): The maximum number of results to return [10-100, defaults to 10] + param: device_type (str): desktop/mobile results (defaults to desktop) + proxy_location: (str): Where to perform the search, specifically for local/regional results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + """ + super().__init__(**kwargs) + + self.limit = limit + self.device_type = device_type + self.proxy_location = proxy_location + + # build query parameters + self.query_payload = { + "num": limit, + "gl": proxy_location.upper(), + "hl": hl.lower() + } + self.headers = { + "X-API-KEY": os.environ["SERPLY_API_KEY"], + "X-User-Agent": device_type, + "User-Agent": "crew-tools", + "X-Proxy-Location": proxy_location + } + + def _run( + self, + **kwargs: Any, + ) -> Any: + if "query" in kwargs: + self.query_payload["q"] = kwargs["query"] + elif "search_query" in kwargs: + self.query_payload["q"] = kwargs["search_query"] + + # build the url + url = f"{self.search_url}{urlencode(self.query_payload)}" + + response = requests.request("GET", url, headers=self.headers) + results = response.json() + if "results" in results: + results = results['results'] + string = [] + for result in results: + try: + string.append('\n'.join([ + f"Title: {result['title']}", + f"Link: {result['link']}", + f"Description: {result['description'].strip()}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(string) + return f"\nSearch results: {content}\n" + else: + return results From ffe3829ceff593cfbe2f398bf12051b9ca39e80c Mon Sep 17 00:00:00 2001 From: teampen <136991215+teampen@users.noreply.github.com> Date: Mon, 10 Jun 2024 21:34:53 -0400 Subject: [PATCH 078/391] adding webpage to markdown --- src/crewai_tools/__init__.py | 3 +- src/crewai_tools/tools/__init__.py | 1 + .../tools/serply_api_tool/README.md | 39 +++++++++++++++ .../serply_news_search_tool.py | 1 + .../serply_scholar_search_tool.py | 5 +- .../serply_web_to_markdown_tool.py | 49 +++++++++++++++++++ 6 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a9013b7ee..beb228936 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -25,5 +25,6 @@ from .tools import ( YoutubeVideoSearchTool, SerplyWebSearchTool, SerplyNewsSearchTool, - SerplyScholarSearchTool + SerplyScholarSearchTool, + SerplyWebpageToMarkdownTool ) \ No newline at end of file diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 138dbce17..11ceebfaa 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -24,3 +24,4 @@ from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSea from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool +from .serply_api_tool.serply_web_to_markdown_tool import SerplyWebpageToMarkdownTool diff --git a/src/crewai_tools/tools/serply_api_tool/README.md b/src/crewai_tools/tools/serply_api_tool/README.md index fe439b28f..22292bcf7 100644 --- a/src/crewai_tools/tools/serply_api_tool/README.md +++ b/src/crewai_tools/tools/serply_api_tool/README.md @@ -55,6 +55,45 @@ tool = SerplyScholarSearchTool() tool = SerplyScholarSearchTool(proxy_location="GB") ``` +## Web Page To Markdown +The following example demonstrates how to initialize the tool and fetch a web page and convert it to markdown: + +```python +from crewai_tools import SerplyWebpageToMarkdownTool + +# Initialize the tool for internet searching capabilities +tool = SerplyWebpageToMarkdownTool() + +# change country news (DE - Germany) +tool = SerplyWebpageToMarkdownTool(proxy_location="DE") +``` + +## Combining Multiple Tools + +The following example demonstrates performing a Google search to find relevant articles. Then, convert those articles to markdown format for easier extraction of key points. + +```python +from crewai import Agent +from crewai_tools import SerplyWebSearchTool, SerplyWebpageToMarkdownTool + +search_tool = SerplyWebSearchTool() +convert_to_markdown = SerplyWebpageToMarkdownTool() + +# Creating a senior researcher agent with memory and verbose mode +researcher = Agent( + role='Senior Researcher', + goal='Uncover groundbreaking technologies in {topic}', + verbose=True, + memory=True, + backstory=( + "Driven by curiosity, you're at the forefront of" + "innovation, eager to explore and share knowledge that could change" + "the world." + ), + tools=[search_tool, convert_to_markdown], + allow_delegation=True +) +``` ## Steps to Get Started To effectively use the `SerplyApiTool`, follow these steps: diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py index c1fef5a77..40b1415b7 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -5,6 +5,7 @@ from typing import Type, Any, Optional from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool + class SerplyNewsSearchToolSchema(BaseModel): """Input for Serply News Search.""" search_query: str = Field(..., description="Mandatory search query you want to use to fetch news articles") diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index badc9950e..dc7449353 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -5,6 +5,7 @@ from typing import Type, Any, Optional from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool + class SerplyScholarSearchToolSchema(BaseModel): """Input for Serply Scholar Search.""" search_query: str = Field(..., description="Mandatory search query you want to use to fetch scholarly literature") @@ -41,8 +42,8 @@ class SerplyScholarSearchTool(BaseTool): } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: query_payload = { "hl": self.hl diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py new file mode 100644 index 000000000..36a42a48f --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py @@ -0,0 +1,49 @@ +import os +import requests +from urllib.parse import urlencode +from typing import Type, Any, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.rag.rag_tool import RagTool + + +class SerplyWebpageToMarkdownToolSchema(BaseModel): + """Input for Serply Scholar Search.""" + url: str = Field(..., description="Mandatory url you want to use to fetch and convert to markdown") + + +class SerplyWebpageToMarkdownTool(RagTool): + name: str = "Webpage to Markdown" + description: str = "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" + args_schema: Type[BaseModel] = SerplyWebpageToMarkdownToolSchema + request_url: str = "https://api.serply.io/v1/request" + proxy_location: Optional[str] = "US" + headers: Optional[dict] = {} + + def __init__( + self, + proxy_location: Optional[str] = "US", + **kwargs + ): + """ + proxy_location: (str): Where to get news, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + """ + super().__init__(**kwargs) + self.proxy_location = proxy_location + self.headers = { + "X-API-KEY": os.environ["SERPLY_API_KEY"], + "User-Agent": "crew-tools", + "X-Proxy-Location": proxy_location + } + + def _run( + self, + **kwargs: Any, + ) -> Any: + data = { + "url": kwargs["url"], + "method": "get", + "response_type": "markdown" + } + response = requests.request("POST", self.request_url, headers=self.headers, json=data) + return response.text From 2c0f90dd22cef5bb9fa1de9b13b5aba4ba6f4d54 Mon Sep 17 00:00:00 2001 From: teampen <136991215+teampen@users.noreply.github.com> Date: Tue, 11 Jun 2024 13:03:17 -0400 Subject: [PATCH 079/391] adding serply job search tool --- src/crewai_tools/__init__.py | 3 +- src/crewai_tools/tools/__init__.py | 3 +- .../tools/serply_api_tool/README.md | 13 +++- .../serply_api_tool/serply_job_search_tool.py | 75 +++++++++++++++++++ ....py => serply_webpage_to_markdown_tool.py} | 1 - 5 files changed, 91 insertions(+), 4 deletions(-) create mode 100644 src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py rename src/crewai_tools/tools/serply_api_tool/{serply_web_to_markdown_tool.py => serply_webpage_to_markdown_tool.py} (97%) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index beb228936..ec5abc1ce 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -26,5 +26,6 @@ from .tools import ( SerplyWebSearchTool, SerplyNewsSearchTool, SerplyScholarSearchTool, - SerplyWebpageToMarkdownTool + SerplyWebpageToMarkdownTool, + SerplyJobSearchTool ) \ No newline at end of file diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 11ceebfaa..151401647 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -24,4 +24,5 @@ from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSea from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool -from .serply_api_tool.serply_web_to_markdown_tool import SerplyWebpageToMarkdownTool +from .serply_api_tool.serply_webpage_to_markdown_tool import SerplyWebpageToMarkdownTool +from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool diff --git a/src/crewai_tools/tools/serply_api_tool/README.md b/src/crewai_tools/tools/serply_api_tool/README.md index 22292bcf7..5c6b9395e 100644 --- a/src/crewai_tools/tools/serply_api_tool/README.md +++ b/src/crewai_tools/tools/serply_api_tool/README.md @@ -55,6 +55,17 @@ tool = SerplyScholarSearchTool() tool = SerplyScholarSearchTool(proxy_location="GB") ``` +## Job Search +The following example demonstrates how to initialize the tool and searching for jobs in the USA: + +```python +from crewai_tools import SerplyJobSearchTool + +# Initialize the tool for internet searching capabilities +tool = SerplyJobSearchTool() +``` + + ## Web Page To Markdown The following example demonstrates how to initialize the tool and fetch a web page and convert it to markdown: @@ -64,7 +75,7 @@ from crewai_tools import SerplyWebpageToMarkdownTool # Initialize the tool for internet searching capabilities tool = SerplyWebpageToMarkdownTool() -# change country news (DE - Germany) +# change country make request from (DE - Germany) tool = SerplyWebpageToMarkdownTool(proxy_location="DE") ``` diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py new file mode 100644 index 000000000..1013c3d47 --- /dev/null +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -0,0 +1,75 @@ +import os +import requests +from urllib.parse import urlencode +from typing import Type, Any, Optional +from pydantic.v1 import BaseModel, Field +from crewai_tools.tools.rag.rag_tool import RagTool + + +class SerplyJobSearchToolSchema(BaseModel): + """Input for Serply Scholar Search.""" + search_query: str = Field(..., description="Mandatory search query you want to use to fetch jobs postings.") + + +class SerplyJobSearchTool(RagTool): + name: str = "Job Search" + description: str = "A tool to perform to perform a job search in the US with a search_query." + args_schema: Type[BaseModel] = SerplyJobSearchToolSchema + request_url: str = "https://api.serply.io/v1/job/search/" + proxy_location: Optional[str] = "US" + """ + proxy_location: (str): Where to get news, specifically for a specific country results. + - Currently only supports US + """ + headers: Optional[dict] = {} + + def __init__( + self, + **kwargs + ): + super().__init__(**kwargs) + self.headers = { + "X-API-KEY": os.environ["SERPLY_API_KEY"], + "User-Agent": "crew-tools", + "X-Proxy-Location": self.proxy_location + } + + def _run( + self, + **kwargs: Any, + ) -> Any: + query_payload = {} + + if "query" in kwargs: + query_payload["q"] = kwargs["query"] + elif "search_query" in kwargs: + query_payload["q"] = kwargs["search_query"] + + # build the url + url = f"{self.request_url}{urlencode(query_payload)}" + + response = requests.request("GET", url, headers=self.headers) + + jobs = response.json().get("jobs", "") + + if not jobs: + return "" + + string = [] + for job in jobs: + try: + string.append('\n'.join([ + f"Position: {job['position']}", + f"Employer: {job['employer']}", + f"Location: {job['location']}", + f"Link: {job['link']}", + f"""Highest: {', '.join([h for h in job['highlights']])}""", + f"Is Remote: {job['is_remote']}", + f"Is Hybrid: {job['is_remote']}", + "---" + ])) + except KeyError: + next + + content = '\n'.join(string) + return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py similarity index 97% rename from src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py rename to src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index 36a42a48f..c7692a066 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_web_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -1,6 +1,5 @@ import os import requests -from urllib.parse import urlencode from typing import Type, Any, Optional from pydantic.v1 import BaseModel, Field from crewai_tools.tools.rag.rag_tool import RagTool From 5e8e711170f8b003e65a46dc141f3974621dca9f Mon Sep 17 00:00:00 2001 From: Rip&Tear <84775494+theCyberTech@users.noreply.github.com> Date: Thu, 13 Jun 2024 12:53:35 +0800 Subject: [PATCH 080/391] Update serper_dev_tool.py Added two additional functionalities: 1) added the ability to save the server results to a file 2) added the ability to set the number of results returned Can be used as follows: serper_tool = SerperDevTool(file_save=True, n_results=20) --- .../tools/serper_dev_tool/serper_dev_tool.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 927c0e3b3..bd6eaab54 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -6,6 +6,14 @@ from typing import Type, Any from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +def _save_results_to_file(content: str) -> None: + """Saves the search results to a file.""" + filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, 'w') as file: + file.write(content) + print(f"Results saved to {filename}") + + class SerperDevToolSchema(BaseModel): """Input for SerperDevTool.""" search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") @@ -15,17 +23,22 @@ class SerperDevTool(BaseTool): description: str = "A tool that can be used to search the internet with a search_query." args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" - n_results: int = 10 + n_results: int = Field(default=10, description="Number of search results to return") + save_file: bool = Field(default=False, description="Flag to determine whether to save the results to a file") def _run( self, **kwargs: Any, ) -> Any: + save_file = kwargs.get('save_file', self.save_file) + + n_results = kwargs.get('n_results', self.n_results) + search_query = kwargs.get('search_query') if search_query is None: search_query = kwargs.get('query') - payload = json.dumps({"q": search_query}) + payload = json.dumps({"q": search_query, "num": n_results}) headers = { 'X-API-KEY': os.environ['SERPER_API_KEY'], 'content-type': 'application/json' @@ -47,6 +60,8 @@ class SerperDevTool(BaseTool): next content = '\n'.join(string) + if save_file: + _save_results_to_file(content) return f"\nSearch results: {content}\n" else: return results From 806f88495668724ee847155c7232041844bcb8a5 Mon Sep 17 00:00:00 2001 From: teampen <136991215+teampen@users.noreply.github.com> Date: Fri, 14 Jun 2024 01:43:25 +0000 Subject: [PATCH 081/391] using GET in markdown --- .../tools/serply_api_tool/serply_job_search_tool.py | 4 ++-- .../tools/serply_api_tool/serply_news_search_tool.py | 2 +- .../tools/serply_api_tool/serply_scholar_search_tool.py | 2 +- .../tools/serply_api_tool/serply_web_search_tool.py | 2 +- .../tools/serply_api_tool/serply_webpage_to_markdown_tool.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py index 1013c3d47..358e312c7 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -18,7 +18,7 @@ class SerplyJobSearchTool(RagTool): request_url: str = "https://api.serply.io/v1/job/search/" proxy_location: Optional[str] = "US" """ - proxy_location: (str): Where to get news, specifically for a specific country results. + proxy_location: (str): Where to get jobs, specifically for a specific country results. - Currently only supports US """ headers: Optional[dict] = {} @@ -69,7 +69,7 @@ class SerplyJobSearchTool(RagTool): "---" ])) except KeyError: - next + continue content = '\n'.join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py index 40b1415b7..f1127246e 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -73,7 +73,7 @@ class SerplyNewsSearchTool(BaseTool): "---" ])) except KeyError: - next + continue content = '\n'.join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index dc7449353..62c3bef7f 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -80,7 +80,7 @@ class SerplyScholarSearchTool(BaseTool): "---" ])) except KeyError: - next + continue content = '\n'.join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py index 5f146c673..894c24741 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py @@ -85,7 +85,7 @@ class SerplyWebSearchTool(BaseTool): "---" ])) except KeyError: - next + continue content = '\n'.join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index c7692a066..27ffc54ce 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -41,7 +41,7 @@ class SerplyWebpageToMarkdownTool(RagTool): ) -> Any: data = { "url": kwargs["url"], - "method": "get", + "method": "GET", "response_type": "markdown" } response = requests.request("POST", self.request_url, headers=self.headers, json=data) From 2b47377a7849be73c8ecac19338aaa7811bdbd27 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Wed, 19 Jun 2024 20:45:04 -0300 Subject: [PATCH 082/391] feat: add code-interpreter tool --- .../tools/code_interpreter_tool/Dockerfile | 21 +++++++ .../tools/code_interpreter_tool/README.md | 0 .../code_interpreter_tool.py | 57 +++++++++++++++++++ 3 files changed, 78 insertions(+) create mode 100644 src/crewai_tools/tools/code_interpreter_tool/Dockerfile create mode 100644 src/crewai_tools/tools/code_interpreter_tool/README.md create mode 100644 src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py diff --git a/src/crewai_tools/tools/code_interpreter_tool/Dockerfile b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile new file mode 100644 index 000000000..b72a51a88 --- /dev/null +++ b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile @@ -0,0 +1,21 @@ +# Use an official Ubuntu as a parent image +FROM ubuntu:20.04 + +# Set environment variables +ENV DEBIAN_FRONTEND=noninteractive + +# Install common utilities +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + wget \ + software-properties-common + +# Install Python +RUN apt-get install -y python3 python3-pip + +# Clean up +RUN apt-get clean && rm -rf /var/lib/apt/lists/* + +# Set the working directory +WORKDIR /workspace diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py new file mode 100644 index 000000000..a2066ca03 --- /dev/null +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -0,0 +1,57 @@ +from typing import Optional, Type + +import docker +from crewai_tools.tools.base_tool import BaseTool +from pydantic.v1 import BaseModel, Field + + +class FixedCodeInterpreterSchemaSchema(BaseModel): + """Input for DirectoryReadTool.""" + + pass + + +class CodeInterpreterSchema(FixedCodeInterpreterSchemaSchema): + """Input for DirectoryReadTool.""" + + code: str = Field( + ..., + description="Python3 code used to be interpreted in the Docker container and output the result", + ) + + +class CodeInterpreterTool(BaseTool): + name: str = "Code Interpreter" + description: str = "Interprets Python code in a Docker container" + args_schema: Type[BaseModel] = CodeInterpreterSchema + code: Optional[str] = None + + def __init__(self, code: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if code is not None: + self.code = code + self.description = ( + "A tool that can be used to run Python code in a Docker container" + ) + self.args_schema = FixedCodeInterpreterSchemaSchema + self._generate_description() + + def _run(self, **kwargs): + code = kwargs.get("code", self.code) + return self.run_code_in_docker(code) + + def run_code_in_docker(self, code): + client = docker.from_env() + container = client.containers.run( + "code-interpreter", + command=f'python3 -c "{code}"', + detach=True, + working_dir="/workspace", + ) + + result = container.logs().decode("utf-8") + + container.stop() + container.remove() + + return result From bd13b55afd229fa830a944063fa90b33a2371b6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 20 May 2024 11:02:20 -0300 Subject: [PATCH 083/391] Adding new PDFTextWritingTool --- .../pdf_text_writing_tool.py | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py diff --git a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py new file mode 100644 index 000000000..c3a686b14 --- /dev/null +++ b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py @@ -0,0 +1,66 @@ +from typing import Any, Optional, Type +from pydantic import BaseModel, Field +from pypdf import PdfReader, PdfWriter, PageObject, ContentStream, NameObject, Font +from pathlib import Path + + +class PDFTextWritingToolSchema(BaseModel): + """Input schema for PDFTextWritingTool.""" + pdf_path: str = Field(..., description="Path to the PDF file to modify") + text: str = Field(..., description="Text to add to the PDF") + position: tuple = Field(..., description="Tuple of (x, y) coordinates for text placement") + font_size: int = Field(default=12, description="Font size of the text") + font_color: str = Field(default="0 0 0 rg", description="RGB color code for the text") + font_name: Optional[str] = Field(default="F1", description="Font name for standard fonts") + font_file: Optional[str] = Field(None, description="Path to a .ttf font file for custom font usage") + page_number: int = Field(default=0, description="Page number to add text to") + + +class PDFTextWritingTool(RagTool): + """A tool to add text to specific positions in a PDF, with custom font support.""" + name: str = "PDF Text Writing Tool" + description: str = "A tool that can write text to a specific position in a PDF document, with optional custom font embedding." + args_schema: Type[BaseModel] = PDFTextWritingToolSchema + + def run(self, pdf_path: str, text: str, position: tuple, font_size: int, font_color: str, + font_name: str = "F1", font_file: Optional[str] = None, page_number: int = 0, **kwargs) -> str: + reader = PdfReader(pdf_path) + writer = PdfWriter() + + if page_number >= len(reader.pages): + return "Page number out of range." + + page: PageObject = reader.pages[page_number] + content = ContentStream(page["/Contents"].data, reader) + + if font_file: + # Check if the font file exists + if not Path(font_file).exists(): + return "Font file does not exist." + + # Embed the custom font + font_name = self.embed_font(writer, font_file) + + # Prepare text operation with the custom or standard font + x_position, y_position = position + text_operation = f"BT /{font_name} {font_size} Tf {x_position} {y_position} Td ({text}) Tj ET" + content.operations.append([font_color]) # Set color + content.operations.append([text_operation]) # Add text + + # Replace old content with new content + page[NameObject("/Contents")] = content + writer.add_page(page) + + # Save the new PDF + output_pdf_path = "modified_output.pdf" + with open(output_pdf_path, "wb") as out_file: + writer.write(out_file) + + return f"Text added to {output_pdf_path} successfully." + + def embed_font(self, writer: PdfWriter, font_file: str) -> str: + """Embeds a TTF font into the PDF and returns the font name.""" + with open(font_file, "rb") as file: + font = Font.true_type(file.read()) + font_ref = writer.add_object(font) + return font_ref \ No newline at end of file From da75d51fe8431aae6b7333cdeee0ecf5d9e91843 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 20 Jun 2024 20:24:26 -0300 Subject: [PATCH 084/391] feat: add Dockerfile, Makefile and update version of code --- .../tools/code_interpreter_tool/Dockerfile | 9 +--- .../tools/code_interpreter_tool/Makefile | 6 +++ .../code_interpreter_tool.py | 51 ++++++++++++------- 3 files changed, 41 insertions(+), 25 deletions(-) create mode 100644 src/crewai_tools/tools/code_interpreter_tool/Makefile diff --git a/src/crewai_tools/tools/code_interpreter_tool/Dockerfile b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile index b72a51a88..ae9b2ffd6 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/Dockerfile +++ b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile @@ -1,8 +1,4 @@ -# Use an official Ubuntu as a parent image -FROM ubuntu:20.04 - -# Set environment variables -ENV DEBIAN_FRONTEND=noninteractive +FROM python:3.11-slim # Install common utilities RUN apt-get update && apt-get install -y \ @@ -11,9 +7,6 @@ RUN apt-get update && apt-get install -y \ wget \ software-properties-common -# Install Python -RUN apt-get install -y python3 python3-pip - # Clean up RUN apt-get clean && rm -rf /var/lib/apt/lists/* diff --git a/src/crewai_tools/tools/code_interpreter_tool/Makefile b/src/crewai_tools/tools/code_interpreter_tool/Makefile new file mode 100644 index 000000000..5a514db0a --- /dev/null +++ b/src/crewai_tools/tools/code_interpreter_tool/Makefile @@ -0,0 +1,6 @@ +# Makefile +IMAGE_NAME=code-interpreter +TAG=latest + +build: + docker build -t $(IMAGE_NAME):$(TAG) . diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index a2066ca03..f497a7c96 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -6,23 +6,27 @@ from pydantic.v1 import BaseModel, Field class FixedCodeInterpreterSchemaSchema(BaseModel): - """Input for DirectoryReadTool.""" + """Input for CodeInterpreterTool.""" pass class CodeInterpreterSchema(FixedCodeInterpreterSchemaSchema): - """Input for DirectoryReadTool.""" + """Input for CodeInterpreterTool.""" code: str = Field( ..., - description="Python3 code used to be interpreted in the Docker container and output the result", + description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code", + ) + libraries_used: Optional[str] = Field( + None, + description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4", ) class CodeInterpreterTool(BaseTool): name: str = "Code Interpreter" - description: str = "Interprets Python code in a Docker container" + description: str = "Interprets Python code in a Docker container. ALWAYS PRINT the final result and the output of the code" args_schema: Type[BaseModel] = CodeInterpreterSchema code: Optional[str] = None @@ -30,28 +34,41 @@ class CodeInterpreterTool(BaseTool): super().__init__(**kwargs) if code is not None: self.code = code - self.description = ( - "A tool that can be used to run Python code in a Docker container" - ) + self.description = "Interprets Python code in a Docker container. ALWAYS PRINT the final result and the output of the code" self.args_schema = FixedCodeInterpreterSchemaSchema self._generate_description() def _run(self, **kwargs): code = kwargs.get("code", self.code) - return self.run_code_in_docker(code) + libraries_used = kwargs.get("libraries_used", None) + return self.run_code_in_docker(code, libraries_used) - def run_code_in_docker(self, code): + def run_code_in_docker(self, code, libraries_used): client = docker.from_env() - container = client.containers.run( - "code-interpreter", - command=f'python3 -c "{code}"', - detach=True, - working_dir="/workspace", - ) - result = container.logs().decode("utf-8") + def run_code(container, code): + cmd_to_run = f'python3 -c "{code}"' + exec_result = container.exec_run(cmd_to_run) + return exec_result + + container = client.containers.run( + "code-interpreter", detach=True, tty=True, working_dir="/workspace" + ) + if libraries_used: + self._install_libraries(container, libraries_used.split(",")) + + exec_result = run_code(container, code) container.stop() container.remove() + if exec_result.exit_code != 0: + return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" - return result + return exec_result.output.decode("utf-8") + + def _install_libraries(self, container, libraries): + """ + Install missing libraries in the Docker container + """ + for library in libraries: + container.exec_run(f"pip install {library}") From 1a4ac76b1eb11ec70a11784a4f6b1d33948e9171 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 20 Jun 2024 20:24:47 -0300 Subject: [PATCH 085/391] feat: update code --- .../code_interpreter_tool.py | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index f497a7c96..caafd44e3 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -30,7 +30,7 @@ class CodeInterpreterTool(BaseTool): args_schema: Type[BaseModel] = CodeInterpreterSchema code: Optional[str] = None - def __init__(self, code: Optional[str] = None, **kwargs): + def __init__(self, code: Optional[str] = None, **kwargs) -> None: super().__init__(**kwargs) if code is not None: self.code = code @@ -38,37 +38,37 @@ class CodeInterpreterTool(BaseTool): self.args_schema = FixedCodeInterpreterSchemaSchema self._generate_description() - def _run(self, **kwargs): + def _run(self, **kwargs) -> str: code = kwargs.get("code", self.code) libraries_used = kwargs.get("libraries_used", None) return self.run_code_in_docker(code, libraries_used) - def run_code_in_docker(self, code, libraries_used): - client = docker.from_env() - - def run_code(container, code): - cmd_to_run = f'python3 -c "{code}"' - exec_result = container.exec_run(cmd_to_run) - return exec_result - - container = client.containers.run( - "code-interpreter", detach=True, tty=True, working_dir="/workspace" - ) - if libraries_used: - self._install_libraries(container, libraries_used.split(",")) - - exec_result = run_code(container, code) - - container.stop() - container.remove() - if exec_result.exit_code != 0: - return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" - - return exec_result.output.decode("utf-8") - - def _install_libraries(self, container, libraries): + def _install_libraries( + self, container: docker.models.containers.Container, libraries: list[str] + ) -> None: """ Install missing libraries in the Docker container """ for library in libraries: container.exec_run(f"pip install {library}") + + def _init_docker_container(self) -> docker.models.containers.Container: + client = docker.from_env() + return client.containers.run( + "code-interpreter", detach=True, tty=True, working_dir="/workspace" + ) + + def run_code_in_docker(self, code: str, libraries_used: str) -> str: + container = self._init_docker_container() + + if libraries_used: + self._install_libraries(container, libraries_used.split(",")) + + cmd_to_run = f'python3 -c "{code}"' + exec_result = container.exec_run(cmd_to_run) + + container.stop().remove() + + if exec_result.exit_code != 0: + return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" + return exec_result.output.decode("utf-8") From 94e6651b55cbebd85b0610787de5f4429e0778b1 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 20 Jun 2024 20:43:19 -0300 Subject: [PATCH 086/391] feat: add code-interpreter tool to init and add unit tests --- src/crewai_tools/tools/__init__.py | 17 ++++++---- tests/tools/test_code_interpreter_tool.py | 38 +++++++++++++++++++++++ 2 files changed, 49 insertions(+), 6 deletions(-) create mode 100644 tests/tools/test_code_interpreter_tool.py diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4da0c0337..35b81396b 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,24 +1,29 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool +from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .csv_search_tool.csv_search_tool import CSVSearchTool -from .directory_search_tool.directory_search_tool import DirectorySearchTool from .directory_read_tool.directory_read_tool import DirectoryReadTool +from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool -from .serper_dev_tool.serper_dev_tool import SerperDevTool -from .txt_search_tool.txt_search_tool import TXTSearchTool from .json_search_tool.json_search_tool import JSONSearchTool +from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool -from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool +from .scrape_element_from_website.scrape_element_from_website import ( + ScrapeElementFromWebsiteTool, +) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool +from .serper_dev_tool.serper_dev_tool import SerperDevTool +from .txt_search_tool.txt_search_tool import TXTSearchTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool -from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool +from .youtube_channel_search_tool.youtube_channel_search_tool import ( + YoutubeChannelSearchTool, +) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .llamaindex_tool.llamaindex_tool import LlamaIndexTool diff --git a/tests/tools/test_code_interpreter_tool.py b/tests/tools/test_code_interpreter_tool.py new file mode 100644 index 000000000..a9ffb9dbc --- /dev/null +++ b/tests/tools/test_code_interpreter_tool.py @@ -0,0 +1,38 @@ +import unittest +from unittest.mock import patch + +from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( + CodeInterpreterTool, +) + + +class TestCodeInterpreterTool(unittest.TestCase): + @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker") + def test_run_code_in_docker(self, docker_mock): + tool = CodeInterpreterTool() + code = "print('Hello, World!')" + libraries_used = "numpy,pandas" + expected_output = "Hello, World!\n" + + docker_mock.from_env().containers.run().exec_run().exit_code = 0 + docker_mock.from_env().containers.run().exec_run().output = ( + expected_output.encode() + ) + result = tool.run_code_in_docker(code, libraries_used) + + self.assertEqual(result, expected_output) + + @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker") + def test_run_code_in_docker_with_error(self, docker_mock): + tool = CodeInterpreterTool() + code = "print(1/0)" + libraries_used = "numpy,pandas" + expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n" + + docker_mock.from_env().containers.run().exec_run().exit_code = 1 + docker_mock.from_env().containers.run().exec_run().output = ( + b"ZeroDivisionError: division by zero\n" + ) + result = tool.run_code_in_docker(code, libraries_used) + + self.assertEqual(result, expected_output) From 61cce93fd020fa4fce74cc80f64e07f2a777215d Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 20 Jun 2024 21:41:12 -0300 Subject: [PATCH 087/391] feat: remove unused Makefile, update README and update code --- .../tools/code_interpreter_tool/Makefile | 6 ----- .../tools/code_interpreter_tool/README.md | 27 +++++++++++++++++++ .../code_interpreter_tool.py | 22 ++++++++++++++- 3 files changed, 48 insertions(+), 7 deletions(-) delete mode 100644 src/crewai_tools/tools/code_interpreter_tool/Makefile diff --git a/src/crewai_tools/tools/code_interpreter_tool/Makefile b/src/crewai_tools/tools/code_interpreter_tool/Makefile deleted file mode 100644 index 5a514db0a..000000000 --- a/src/crewai_tools/tools/code_interpreter_tool/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# Makefile -IMAGE_NAME=code-interpreter -TAG=latest - -build: - docker build -t $(IMAGE_NAME):$(TAG) . diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md index e69de29bb..672c86f21 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/README.md +++ b/src/crewai_tools/tools/code_interpreter_tool/README.md @@ -0,0 +1,27 @@ +# CodeInterpreterTool + +## Description +This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code. + +## Requirements + +- Docker + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Example + +Remember that when using this tool, the code must be generated by the Agent itself. The code must be a Python3 code. And it will take some time for the first time to run because it needs to build the Docker image. + +```python +from crewai_tools import CodeInterpreterTool + +Agent( + ... + tools=[CodeInterpreterTool()], +) +``` diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index caafd44e3..06cb081f0 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -1,3 +1,4 @@ +import os from typing import Optional, Type import docker @@ -30,9 +31,27 @@ class CodeInterpreterTool(BaseTool): args_schema: Type[BaseModel] = CodeInterpreterSchema code: Optional[str] = None + def _verify_docker_image(self) -> None: + """ + Verify if the Docker image is available + """ + image_tag = "code-interpreter:latest" + + client = docker.from_env() + images = client.images.list() + all_tags = [tag for image in images for tag in image.tags] + + if image_tag not in all_tags: + client.images.build( + path=os.path.dirname(os.path.abspath(__file__)), + tag=image_tag, + rm=True, + ) + def __init__(self, code: Optional[str] = None, **kwargs) -> None: super().__init__(**kwargs) if code is not None: + self._verify_docker_image() self.code = code self.description = "Interprets Python code in a Docker container. ALWAYS PRINT the final result and the output of the code" self.args_schema = FixedCodeInterpreterSchemaSchema @@ -67,7 +86,8 @@ class CodeInterpreterTool(BaseTool): cmd_to_run = f'python3 -c "{code}"' exec_result = container.exec_run(cmd_to_run) - container.stop().remove() + container.stop() + container.remove() if exec_result.exit_code != 0: return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" From 161c72b29f553f6b3758b9408ff9423349c5bba7 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 20 Jun 2024 21:55:25 -0300 Subject: [PATCH 088/391] feat: update README --- src/crewai_tools/tools/code_interpreter_tool/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md index 672c86f21..e66a82e39 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/README.md +++ b/src/crewai_tools/tools/code_interpreter_tool/README.md @@ -3,6 +3,8 @@ ## Description This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code. +It is incredible useful since it allows the Agent to generate code, run it in the same environment, get the result and use it to make decisions. + ## Requirements - Docker From 2f80840c748e5b501bf688cff95713c1c5c18ac2 Mon Sep 17 00:00:00 2001 From: Jakub Strnad Date: Fri, 21 Jun 2024 15:06:17 +0200 Subject: [PATCH 089/391] fix: Ensure tools handle parameters passed post-creation correctly (#3) - Fixed an issue where multiple tools failed to function if parameters were provided after tool creation. - Updated tools to correctly process source file/URL passed by the agent post-creation as per documentation. Closes #<47> --- .../tools/code_docs_search_tool/code_docs_search_tool.py | 2 +- src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | 2 +- .../tools/directory_search_tool/directory_search_tool.py | 2 +- src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 2 +- src/crewai_tools/tools/github_search_tool/github_search_tool.py | 2 +- src/crewai_tools/tools/json_search_tool/json_search_tool.py | 2 +- src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py | 2 +- src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py | 2 +- src/crewai_tools/tools/txt_search_tool/txt_search_tool.py | 2 +- src/crewai_tools/tools/website_search/website_search_tool.py | 2 +- src/crewai_tools/tools/xml_search_tool/xml_search_tool.py | 2 +- .../youtube_channel_search_tool/youtube_channel_search_tool.py | 2 +- .../youtube_video_search_tool/youtube_video_search_tool.py | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index a69ea7eb4..899943511 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -57,4 +57,4 @@ class CodeDocsSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index a04f227ca..9d0509f88 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -57,4 +57,4 @@ class CSVSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 9a988a7fa..a06229081 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -57,4 +57,4 @@ class DirectorySearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 96bb4721b..b60dfd0f5 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -63,4 +63,4 @@ class DOCXSearchTool(RagTool): docx = kwargs.get("docx") if docx is not None: self.add(docx) - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 5bfa65542..2ec39c8c0 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -68,4 +68,4 @@ class GithubSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 102cd89ad..930438c88 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -57,4 +57,4 @@ class JSONSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index 99bd37348..69572140b 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -57,4 +57,4 @@ class MDXSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index 226fb1ddd..6f9ea2901 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -41,4 +41,4 @@ class PGSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 921e633e8..5dbaed4d4 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -57,4 +57,4 @@ class TXTSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index cfe163ae8..1ff587f00 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -57,4 +57,4 @@ class WebsiteSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 53fd73248..0346d484e 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -57,4 +57,4 @@ class XMLSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 8e9591be8..2edc0026b 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -60,4 +60,4 @@ class YoutubeChannelSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index f1caa1b9c..77d25752e 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -57,4 +57,4 @@ class YoutubeVideoSearchTool(RagTool): search_query: str, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, **kwargs) From c97678bb1106ba8e7c0a9837736f89a5df5f2cd1 Mon Sep 17 00:00:00 2001 From: Naman Garg Date: Fri, 21 Jun 2024 14:48:24 -0700 Subject: [PATCH 090/391] add multion tool --- src/crewai_tools/__init__.py | 53 +++++++++--------- src/crewai_tools/tools/__init__.py | 17 ++++-- src/crewai_tools/tools/multion_tool/README.md | 31 ++++++++++ .../tools/multion_tool/example.py | 29 ++++++++++ .../tools/multion_tool/multion_tool.py | 56 +++++++++++++++++++ 5 files changed, 154 insertions(+), 32 deletions(-) create mode 100644 src/crewai_tools/tools/multion_tool/README.md create mode 100644 src/crewai_tools/tools/multion_tool/example.py create mode 100644 src/crewai_tools/tools/multion_tool/multion_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a51d70449..d0c4746df 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,27 +1,28 @@ -from .tools.base_tool import BaseTool, Tool, tool from .tools import ( - BrowserbaseLoadTool, - CodeDocsSearchTool, - CSVSearchTool, - DirectorySearchTool, - DOCXSearchTool, - DirectoryReadTool, - EXASearchTool, - FileReadTool, - GithubSearchTool, - SerperDevTool, - TXTSearchTool, - JSONSearchTool, - MDXSearchTool, - PDFSearchTool, - PGSearchTool, - RagTool, - ScrapeElementFromWebsiteTool, - ScrapeWebsiteTool, - SeleniumScrapingTool, - WebsiteSearchTool, - XMLSearchTool, - YoutubeChannelSearchTool, - YoutubeVideoSearchTool, - LlamaIndexTool -) \ No newline at end of file + BrowserbaseLoadTool, + CodeDocsSearchTool, + CSVSearchTool, + DirectoryReadTool, + DirectorySearchTool, + DOCXSearchTool, + EXASearchTool, + FileReadTool, + GithubSearchTool, + JSONSearchTool, + LlamaIndexTool, + MDXSearchTool, + MultiOnTool, + PDFSearchTool, + PGSearchTool, + RagTool, + ScrapeElementFromWebsiteTool, + ScrapeWebsiteTool, + SeleniumScrapingTool, + SerperDevTool, + TXTSearchTool, + WebsiteSearchTool, + XMLSearchTool, + YoutubeChannelSearchTool, + YoutubeVideoSearchTool, +) +from .tools.base_tool import BaseTool, Tool, tool diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4da0c0337..11074bbe3 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,24 +1,29 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .csv_search_tool.csv_search_tool import CSVSearchTool -from .directory_search_tool.directory_search_tool import DirectorySearchTool from .directory_read_tool.directory_read_tool import DirectoryReadTool +from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool -from .serper_dev_tool.serper_dev_tool import SerperDevTool -from .txt_search_tool.txt_search_tool import TXTSearchTool from .json_search_tool.json_search_tool import JSONSearchTool +from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool +from .multion_tool.multion_tool import MultiOnTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool -from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool +from .scrape_element_from_website.scrape_element_from_website import ( + ScrapeElementFromWebsiteTool, +) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool +from .serper_dev_tool.serper_dev_tool import SerperDevTool +from .txt_search_tool.txt_search_tool import TXTSearchTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool -from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool +from .youtube_channel_search_tool.youtube_channel_search_tool import ( + YoutubeChannelSearchTool, +) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .llamaindex_tool.llamaindex_tool import LlamaIndexTool diff --git a/src/crewai_tools/tools/multion_tool/README.md b/src/crewai_tools/tools/multion_tool/README.md new file mode 100644 index 000000000..0cbbbb2cd --- /dev/null +++ b/src/crewai_tools/tools/multion_tool/README.md @@ -0,0 +1,31 @@ +# MultiOnTool Documentation + +## Description +The MultiOnTool, integrated within the crewai_tools package, empowers CrewAI agents with the capability to navigate and interact with the web through natural language instructions. Leveraging the Multion API, this tool facilitates seamless web browsing, making it an essential asset for projects requiring dynamic web data interaction. + +## Installation +Ensure the `crewai[tools]` package is installed in your environment to use the MultiOnTool. If it's not already installed, you can add it using the command below: + +## Example +The following example demonstrates how to initialize the tool and execute a search with a given query: + +```python +from crewai_tools import MultiOnTool + +# Initialize the tool from a MultiOn Tool +multion_tool = MultiOnTool(api_key= "YOUR_MULTION_API_KEY", local=False) + +``` + +## Arguments + +- `api_key`: Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. +- `local`: Optional. Use the local flag to run the agent locally on your browser. + +## Steps to Get Started +To effectively use the `MultiOnTool`, follow these steps: + +1. **Install CrewAI**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **Install and use MultiOn**: Follow MultiOn documentation (https://docs.multion.ai/). + + diff --git a/src/crewai_tools/tools/multion_tool/example.py b/src/crewai_tools/tools/multion_tool/example.py new file mode 100644 index 000000000..ec69e5cdf --- /dev/null +++ b/src/crewai_tools/tools/multion_tool/example.py @@ -0,0 +1,29 @@ +import os + +from crewai import Agent, Crew, Task +from multion_tool import MultiOnTool + +os.environ["OPENAI_API_KEY"] = "Your Key" + +multion_browse_tool = MultiOnTool(api_key="Your Key") + +# Create a new agent +Browser = Agent( + role="Browser Agent", + goal="control web browsers using natural language ", + backstory="An expert browsing agent.", + tools=[multion_browse_tool], + verbose=True, +) + +# Define tasks +browse = Task( + description="Summarize the top 3 trending AI News headlines", + expected_output="A summary of the top 3 trending AI News headlines", + agent=Browser, +) + + +crew = Crew(agents=[Browser], tasks=[browse]) + +crew.kickoff() diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py new file mode 100644 index 000000000..1253627a2 --- /dev/null +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -0,0 +1,56 @@ +"""Multion tool spec.""" + +from typing import Any, Optional + +from crewai_tools.tools.base_tool import BaseTool + + +class MultiOnTool(BaseTool): + """Tool to wrap MultiOn Browse Capabilities.""" + + name: str = "Multion Browse Tool" + description: str = """Multion gives the ability for LLMs to control web browsers using natural language instructions. + If the status is 'CONTINUE', reissue the same instruction to continue execution + """ + multion: Optional[Any] = None + session_id: Optional[str] = None + local: bool = False + + def __init__(self, api_key: Optional[str] = None, local: bool = False, **kwargs): + super().__init__(**kwargs) + try: + from multion.client import MultiOn # type: ignore + except ImportError: + raise ImportError( + "`multion` package not found, please run `pip install multion`" + ) + self.session_id = None + self.local = local + self.multion = MultiOn(api_key=api_key) + + def _run( + self, + cmd: str, + *args: Any, + **kwargs: Any, + ) -> str: + """ + Run the Multion client with the given command. + + Args: + cmd (str): The detailed and specific natural language instructrion for web browsing + + *args (Any): Additional arguments to pass to the Multion client + **kwargs (Any): Additional keyword arguments to pass to the Multion client + """ + + browse = self.multion.browse( + cmd=cmd, + session_id=self.session_id, + local=self.local, + *args, + **kwargs, + ) + self.session_id = browse.session_id + + return browse.message + "\n\n STATUS: " + browse.status From d84a61657274f925d43e7b0f9807d47589393904 Mon Sep 17 00:00:00 2001 From: Naman Garg Date: Fri, 21 Jun 2024 15:01:08 -0700 Subject: [PATCH 091/391] update local option description in readme --- src/crewai_tools/tools/multion_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/multion_tool/README.md b/src/crewai_tools/tools/multion_tool/README.md index 0cbbbb2cd..608931166 100644 --- a/src/crewai_tools/tools/multion_tool/README.md +++ b/src/crewai_tools/tools/multion_tool/README.md @@ -20,7 +20,7 @@ multion_tool = MultiOnTool(api_key= "YOUR_MULTION_API_KEY", local=False) ## Arguments - `api_key`: Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. -- `local`: Optional. Use the local flag to run the agent locally on your browser. +- `local`: Optional. Use the local flag set as "true" to run the agent locally on your browser. Make sure the multion browser extension is installed and API Enabled is checked. ## Steps to Get Started To effectively use the `MultiOnTool`, follow these steps: From f9c803a8c1f13e75c526125994aa12127880b7fb Mon Sep 17 00:00:00 2001 From: Marcelo Busana Date: Sun, 23 Jun 2024 15:33:55 -0300 Subject: [PATCH 092/391] Fix: Selenium incorrect firefox options import --- .../tools/selenium_scraping_tool/selenium_scraping_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index d0c420fc9..6bf8ff5f1 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -5,7 +5,7 @@ from pydantic.v1 import BaseModel, Field from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.common.by import By -from selenium.webdriver.firefox.options import Options +from selenium.webdriver.chrome.options import Options from ..base_tool import BaseTool From d4449ee5f0044e9324325bd77acc788c7d72ed1f Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Mon, 24 Jun 2024 20:48:52 +0530 Subject: [PATCH 093/391] feat: add composio CrewAI tool wrapper --- .../tools/composio_tool/README.md | 30 +++++++++ .../tools/composio_tool/composio_tool.py | 62 +++++++++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 src/crewai_tools/tools/composio_tool/README.md create mode 100644 src/crewai_tools/tools/composio_tool/composio_tool.py diff --git a/src/crewai_tools/tools/composio_tool/README.md b/src/crewai_tools/tools/composio_tool/README.md new file mode 100644 index 000000000..ef7cf1edb --- /dev/null +++ b/src/crewai_tools/tools/composio_tool/README.md @@ -0,0 +1,30 @@ +# ComposioTool Documentation + +## Description + +This tools is a wrapper around the composio toolset and gives your agent access to a wide variety of tools from the composio SDK. + +## Installation + +To incorporate this tool into your project, follow the installation instructions below: + +```shell +pip install composio-core +pip install 'crewai[tools]' +``` + +## Example + +The following example demonstrates how to initialize the tool and execute a mathematical operation: + +```python + +from composio import Action + +from crewai_tools.tools.composio_tool.composio_tool import ComposioTool + +tool = ComposioTool.from_tool( + tool=Action.MATHEMATICAL_CALCULATOR, +) +``` + diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py new file mode 100644 index 000000000..e08fbde31 --- /dev/null +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -0,0 +1,62 @@ +""" +Composio tools wrapper. +""" + +import typing as t + +import typing_extensions as te + +from crewai_tools.tools.base_tool import BaseTool + + +class ComposioTool(BaseTool): + """Wrapper for composio tools.""" + + composio_action: t.Callable + + def _run(self, *args: t.Any, **kwargs: t.Any) -> t.Any: + """Run the composio action with given arguments.""" + return self.composio_action(*args, **kwargs) + + @classmethod + def from_tool(cls, tool: t.Any, **kwargs: t.Any) -> te.Self: + """Wrap a composio tool as crewAI tool.""" + + from composio import Action, ComposioToolSet + from composio.constants import DEFAULT_ENTITY_ID + from composio.utils.shared import json_schema_to_model + + toolset = ComposioToolSet() + if not isinstance(tool, Action): + tool = Action.from_action(name=tool) + + tool = t.cast(Action, tool) + (action,) = toolset.get_action_schemas(actions=[tool]) + schema = action.model_dump(exclude_none=True) + entity_id = kwargs.pop("entity_id", DEFAULT_ENTITY_ID) + + def function(**kwargs: t.Any) -> t.Dict: + """Wrapper function for composio action.""" + return toolset.execute_action( + action=Action.from_app_and_action( + app=schema["appName"], + name=schema["name"], + ), + params=kwargs, + entity_id=entity_id, + ) + + function.__name__ = schema["name"] + function.__doc__ = schema["description"] + + return cls( + name=schema["name"], + description=schema["description"], + args_schema=json_schema_to_model( + action.parameters.model_dump( + exclude_none=True, + ) + ), + composio_action=function, + **kwargs + ) From f5d092f6a3a895e4c149ad15a2eb5a3756c47248 Mon Sep 17 00:00:00 2001 From: Seth Donaldson Date: Wed, 26 Jun 2024 15:46:14 -0400 Subject: [PATCH 094/391] clean copy of embedchain_adapter.py --- .../adapters/pdf_embedchain_adapter.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 src/crewai_tools/adapters/pdf_embedchain_adapter.py diff --git a/src/crewai_tools/adapters/pdf_embedchain_adapter.py b/src/crewai_tools/adapters/pdf_embedchain_adapter.py new file mode 100644 index 000000000..446aab96c --- /dev/null +++ b/src/crewai_tools/adapters/pdf_embedchain_adapter.py @@ -0,0 +1,25 @@ +from typing import Any + +from embedchain import App + +from crewai_tools.tools.rag.rag_tool import Adapter + + +class EmbedchainAdapter(Adapter): + embedchain_app: App + summarize: bool = False + + def query(self, question: str) -> str: + result, sources = self.embedchain_app.query( + question, citations=True, dry_run=(not self.summarize) + ) + if self.summarize: + return result + return "\n\n".join([source[0] for source in sources]) + + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + self.embedchain_app.add(*args, **kwargs) From a95f5c27c68fa846139ae81ec9478a4b9f91c553 Mon Sep 17 00:00:00 2001 From: Seth Donaldson Date: Wed, 26 Jun 2024 15:52:54 -0400 Subject: [PATCH 095/391] Create PDFEmbedchainAdapter class and utilize it in PDFSearchTool --- .../adapters/pdf_embedchain_adapter.py | 13 ++++++++++--- .../tools/pdf_search_tool/pdf_search_tool.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/adapters/pdf_embedchain_adapter.py b/src/crewai_tools/adapters/pdf_embedchain_adapter.py index 446aab96c..12557c971 100644 --- a/src/crewai_tools/adapters/pdf_embedchain_adapter.py +++ b/src/crewai_tools/adapters/pdf_embedchain_adapter.py @@ -1,17 +1,23 @@ -from typing import Any +from typing import Any, Optional from embedchain import App from crewai_tools.tools.rag.rag_tool import Adapter -class EmbedchainAdapter(Adapter): +class PDFEmbedchainAdapter(Adapter): embedchain_app: App summarize: bool = False + src: Optional[str] = None def query(self, question: str) -> str: + where = ( + {"app_id": self.embedchain_app.config.id, "source": self.src} + if self.src + else None + ) result, sources = self.embedchain_app.query( - question, citations=True, dry_run=(not self.summarize) + question, citations=True, dry_run=(not self.summarize), where=where ) if self.summarize: return result @@ -22,4 +28,5 @@ class EmbedchainAdapter(Adapter): *args: Any, **kwargs: Any, ) -> None: + self.src = args[0] if args else None self.embedchain_app.add(*args, **kwargs) diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index af95ae0bf..48df8e966 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -1,6 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType +from pydantic import model_validator from pydantic.v1 import BaseModel, Field from ..rag.rag_tool import RagTool @@ -35,6 +36,22 @@ class PDFSearchTool(RagTool): self.args_schema = FixedPDFSearchToolSchema self._generate_description() + @model_validator(mode="after") + def _set_default_adapter(self): + if isinstance(self.adapter, RagTool._AdapterPlaceholder): + from embedchain import App + + from crewai_tools.adapters.pdf_embedchain_adapter import ( + PDFEmbedchainAdapter, + ) + + app = App.from_config(config=self.config) if self.config else App() + self.adapter = PDFEmbedchainAdapter( + embedchain_app=app, summarize=self.summarize + ) + + return self + def add( self, *args: Any, From 41478abdf5665e9a912716e5b7d62a36db1e6d69 Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 11:36:52 +0530 Subject: [PATCH 096/391] feat: define `ComposioTool` in the top level imports --- src/crewai_tools/__init__.py | 53 +++++++++++++++--------------- src/crewai_tools/tools/__init__.py | 17 ++++++---- 2 files changed, 38 insertions(+), 32 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a51d70449..214dbbb31 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,27 +1,28 @@ -from .tools.base_tool import BaseTool, Tool, tool from .tools import ( - BrowserbaseLoadTool, - CodeDocsSearchTool, - CSVSearchTool, - DirectorySearchTool, - DOCXSearchTool, - DirectoryReadTool, - EXASearchTool, - FileReadTool, - GithubSearchTool, - SerperDevTool, - TXTSearchTool, - JSONSearchTool, - MDXSearchTool, - PDFSearchTool, - PGSearchTool, - RagTool, - ScrapeElementFromWebsiteTool, - ScrapeWebsiteTool, - SeleniumScrapingTool, - WebsiteSearchTool, - XMLSearchTool, - YoutubeChannelSearchTool, - YoutubeVideoSearchTool, - LlamaIndexTool -) \ No newline at end of file + BrowserbaseLoadTool, + CodeDocsSearchTool, + ComposioTool, + CSVSearchTool, + DirectoryReadTool, + DirectorySearchTool, + DOCXSearchTool, + EXASearchTool, + FileReadTool, + GithubSearchTool, + JSONSearchTool, + LlamaIndexTool, + MDXSearchTool, + PDFSearchTool, + PGSearchTool, + RagTool, + ScrapeElementFromWebsiteTool, + ScrapeWebsiteTool, + SeleniumScrapingTool, + SerperDevTool, + TXTSearchTool, + WebsiteSearchTool, + XMLSearchTool, + YoutubeChannelSearchTool, + YoutubeVideoSearchTool, +) +from .tools.base_tool import BaseTool, Tool, tool diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4da0c0337..df0ec7286 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,24 +1,29 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool +from .composio_tool.composio_tool import ComposioTool from .csv_search_tool.csv_search_tool import CSVSearchTool -from .directory_search_tool.directory_search_tool import DirectorySearchTool from .directory_read_tool.directory_read_tool import DirectoryReadTool +from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .github_search_tool.github_search_tool import GithubSearchTool -from .serper_dev_tool.serper_dev_tool import SerperDevTool -from .txt_search_tool.txt_search_tool import TXTSearchTool from .json_search_tool.json_search_tool import JSONSearchTool +from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool -from .scrape_element_from_website.scrape_element_from_website import ScrapeElementFromWebsiteTool +from .scrape_element_from_website.scrape_element_from_website import ( + ScrapeElementFromWebsiteTool, +) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool +from .serper_dev_tool.serper_dev_tool import SerperDevTool +from .txt_search_tool.txt_search_tool import TXTSearchTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool -from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool +from .youtube_channel_search_tool.youtube_channel_search_tool import ( + YoutubeChannelSearchTool, +) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .llamaindex_tool.llamaindex_tool import LlamaIndexTool From be6e1a79dd136ed8b3e36947203e1e7b91f95eee Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 11:39:42 +0530 Subject: [PATCH 097/391] feat: add search utility methods --- .../tools/composio_tool/composio_tool.py | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py index e08fbde31..664898ce3 100644 --- a/src/crewai_tools/tools/composio_tool/composio_tool.py +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -19,7 +19,11 @@ class ComposioTool(BaseTool): return self.composio_action(*args, **kwargs) @classmethod - def from_tool(cls, tool: t.Any, **kwargs: t.Any) -> te.Self: + def from_tool( + cls, + tool: t.Any, + **kwargs: t.Any, + ) -> te.Self: """Wrap a composio tool as crewAI tool.""" from composio import Action, ComposioToolSet @@ -28,7 +32,7 @@ class ComposioTool(BaseTool): toolset = ComposioToolSet() if not isinstance(tool, Action): - tool = Action.from_action(name=tool) + tool = Action(tool) tool = t.cast(Action, tool) (action,) = toolset.get_action_schemas(actions=[tool]) @@ -38,10 +42,7 @@ class ComposioTool(BaseTool): def function(**kwargs: t.Any) -> t.Dict: """Wrapper function for composio action.""" return toolset.execute_action( - action=Action.from_app_and_action( - app=schema["appName"], - name=schema["name"], - ), + action=Action(schema["name"]), params=kwargs, entity_id=entity_id, ) @@ -58,5 +59,42 @@ class ComposioTool(BaseTool): ) ), composio_action=function, - **kwargs + **kwargs, ) + + @classmethod + def from_app( + cls, + app: t.Any, + tags: t.Optional[t.List[str]] = None, + **kwargs: t.Any, + ) -> t.List[te.Self]: + """Create toolset from an app.""" + from composio import App + + if not isinstance(app, App): + app = App(app) + + return [ + cls.from_tool(tool=action, **kwargs) + for action in app.get_actions(tags=tags) + ] + + @classmethod + def from_use_case( + cls, + *apps: t.Any, + use_case: str, + **kwargs: t.Any, + ) -> t.List[te.Self]: + """Create toolset from an app.""" + if len(apps) == 0: + raise ValueError( + "You need to provide at least one app name to search by use case" + ) + + from composio import ComposioToolSet + + toolset = ComposioToolSet() + actions = toolset.find_actions_by_use_case(*apps, use_case=use_case) + return [cls.from_tool(tool=action, **kwargs) for action in actions] From ab484172ef2d3237fb14416ca2894db3568e216b Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 11:40:02 +0530 Subject: [PATCH 098/391] chore: update readme --- .../tools/composio_tool/README.md | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/composio_tool/README.md b/src/crewai_tools/tools/composio_tool/README.md index ef7cf1edb..4a4db85d5 100644 --- a/src/crewai_tools/tools/composio_tool/README.md +++ b/src/crewai_tools/tools/composio_tool/README.md @@ -15,16 +15,55 @@ pip install 'crewai[tools]' ## Example -The following example demonstrates how to initialize the tool and execute a mathematical operation: +The following example demonstrates how to initialize the tool and execute a github action: + +1. Initialize toolset ```python +from composio import App +from crewai_tools import ComposioTool +from crewai import Agent, Task -from composio import Action -from crewai_tools.tools.composio_tool.composio_tool import ComposioTool +tools = [ComposioTool.from_tool(tool=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)] +``` -tool = ComposioTool.from_tool( - tool=Action.MATHEMATICAL_CALCULATOR, +If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions + +```python +tools = ComposioTool.from_app(app=App.GITHUB, tags=["important"]) +``` + +or use `from_use_case` to search relevant actions + +```python +tools = ComposioTool.from_use_case(App.GITHUB, use_case="Star a github repository") +``` + +2. Define agent + +```python +crewai_agent = Agent( + role="Github Agent", + goal="""You take action on Github using Github APIs""", + backstory=( + "You are AI agent that is responsible for taking actions on Github " + "on users behalf. You need to take action on Github using Github APIs" + ), + verbose=True, + tools=tools, ) ``` +3. Execute task + +```python +task = Task( + description="Star a repo ComposioHQ/composio on GitHub", + agent=crewai_agent, + expected_output="if the star happened", +) + +task.execute() +``` + From 369c03a257c6c11c5620d3bcf4e66310a649c193 Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 12:16:22 +0530 Subject: [PATCH 099/391] feat: add check for auth accounts --- .../tools/composio_tool/composio_tool.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py index 664898ce3..fd478eeb6 100644 --- a/src/crewai_tools/tools/composio_tool/composio_tool.py +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -18,6 +18,26 @@ class ComposioTool(BaseTool): """Run the composio action with given arguments.""" return self.composio_action(*args, **kwargs) + @staticmethod + def _check_connected_account(tool: t.Any, toolset: t.Any) -> None: + """Check if connected account is required and if required it exists or not.""" + from composio import Action + from composio.client.collections import ConnectedAccountModel + + tool = t.cast(Action, tool) + if tool.no_auth: + return + + connections = t.cast( + t.List[ConnectedAccountModel], + toolset.client.connected_accounts.get(), + ) + if tool.app not in [connection.appUniqueId for connection in connections]: + raise RuntimeError( + f"No connected account found for app `{tool.app}`; " + f"Run `composio add {tool.app}` to fix this" + ) + @classmethod def from_tool( cls, @@ -35,6 +55,11 @@ class ComposioTool(BaseTool): tool = Action(tool) tool = t.cast(Action, tool) + cls._check_connected_account( + tool=tool, + toolset=toolset, + ) + (action,) = toolset.get_action_schemas(actions=[tool]) schema = action.model_dump(exclude_none=True) entity_id = kwargs.pop("entity_id", DEFAULT_ENTITY_ID) From 58354ec638326a4b406aa97d6038d2fc7d6df418 Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 12:23:05 +0530 Subject: [PATCH 100/391] chore: update README --- src/crewai_tools/tools/composio_tool/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/composio_tool/README.md b/src/crewai_tools/tools/composio_tool/README.md index 4a4db85d5..fe030cbd2 100644 --- a/src/crewai_tools/tools/composio_tool/README.md +++ b/src/crewai_tools/tools/composio_tool/README.md @@ -13,6 +13,8 @@ pip install composio-core pip install 'crewai[tools]' ``` +after the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. + ## Example The following example demonstrates how to initialize the tool and execute a github action: @@ -45,7 +47,7 @@ tools = ComposioTool.from_use_case(App.GITHUB, use_case="Star a github repositor ```python crewai_agent = Agent( role="Github Agent", - goal="""You take action on Github using Github APIs""", + goal="You take action on Github using Github APIs", backstory=( "You are AI agent that is responsible for taking actions on Github " "on users behalf. You need to take action on Github using Github APIs" @@ -67,3 +69,4 @@ task = Task( task.execute() ``` +* More detailed list of tools can be found [here](https://app.composio.dev) From 9a8d88b8aa3890897d75814fd4b916d3dba706b7 Mon Sep 17 00:00:00 2001 From: angrybayblade Date: Thu, 27 Jun 2024 13:35:57 +0530 Subject: [PATCH 101/391] fix: merge `from_app` and `from_use_case` --- .../tools/composio_tool/README.md | 8 +-- .../tools/composio_tool/composio_tool.py | 59 +++++++++---------- 2 files changed, 32 insertions(+), 35 deletions(-) diff --git a/src/crewai_tools/tools/composio_tool/README.md b/src/crewai_tools/tools/composio_tool/README.md index fe030cbd2..18045e7f1 100644 --- a/src/crewai_tools/tools/composio_tool/README.md +++ b/src/crewai_tools/tools/composio_tool/README.md @@ -27,19 +27,19 @@ from crewai_tools import ComposioTool from crewai import Agent, Task -tools = [ComposioTool.from_tool(tool=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)] +tools = [ComposioTool.from_action(action=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)] ``` If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions ```python -tools = ComposioTool.from_app(app=App.GITHUB, tags=["important"]) +tools = ComposioTool.from_app(App.GITHUB, tags=["important"]) ``` -or use `from_use_case` to search relevant actions +or use `use_case` to search relevant actions ```python -tools = ComposioTool.from_use_case(App.GITHUB, use_case="Star a github repository") +tools = ComposioTool.from_app(App.GITHUB, use_case="Star a github repository") ``` 2. Define agent diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py index fd478eeb6..62068c0bd 100644 --- a/src/crewai_tools/tools/composio_tool/composio_tool.py +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -39,9 +39,9 @@ class ComposioTool(BaseTool): ) @classmethod - def from_tool( + def from_action( cls, - tool: t.Any, + action: t.Any, **kwargs: t.Any, ) -> te.Self: """Wrap a composio tool as crewAI tool.""" @@ -51,17 +51,17 @@ class ComposioTool(BaseTool): from composio.utils.shared import json_schema_to_model toolset = ComposioToolSet() - if not isinstance(tool, Action): - tool = Action(tool) + if not isinstance(action, Action): + action = Action(action) - tool = t.cast(Action, tool) + action = t.cast(Action, action) cls._check_connected_account( - tool=tool, + tool=action, toolset=toolset, ) - (action,) = toolset.get_action_schemas(actions=[tool]) - schema = action.model_dump(exclude_none=True) + (action_schema,) = toolset.get_action_schemas(actions=[action]) + schema = action_schema.model_dump(exclude_none=True) entity_id = kwargs.pop("entity_id", DEFAULT_ENTITY_ID) def function(**kwargs: t.Any) -> t.Dict: @@ -79,7 +79,7 @@ class ComposioTool(BaseTool): name=schema["name"], description=schema["description"], args_schema=json_schema_to_model( - action.parameters.model_dump( + action_schema.parameters.model_dump( exclude_none=True, ) ), @@ -89,37 +89,34 @@ class ComposioTool(BaseTool): @classmethod def from_app( - cls, - app: t.Any, - tags: t.Optional[t.List[str]] = None, - **kwargs: t.Any, - ) -> t.List[te.Self]: - """Create toolset from an app.""" - from composio import App - - if not isinstance(app, App): - app = App(app) - - return [ - cls.from_tool(tool=action, **kwargs) - for action in app.get_actions(tags=tags) - ] - - @classmethod - def from_use_case( cls, *apps: t.Any, - use_case: str, + tags: t.Optional[t.List[str]] = None, + use_case: t.Optional[str] = None, **kwargs: t.Any, ) -> t.List[te.Self]: """Create toolset from an app.""" if len(apps) == 0: + raise ValueError("You need to provide at least one app name") + + if use_case is None and tags is None: + raise ValueError("Both `use_case` and `tags` cannot be `None`") + + if use_case is not None and tags is not None: raise ValueError( - "You need to provide at least one app name to search by use case" + "Cannot use both `use_case` and `tags` to filter the actions" ) from composio import ComposioToolSet toolset = ComposioToolSet() - actions = toolset.find_actions_by_use_case(*apps, use_case=use_case) - return [cls.from_tool(tool=action, **kwargs) for action in actions] + if use_case is not None: + return [ + cls.from_action(action=action, **kwargs) + for action in toolset.find_actions_by_use_case(*apps, use_case=use_case) + ] + + return [ + cls.from_action(action=action, **kwargs) + for action in toolset.find_actions_by_tags(*apps, tags=tags) + ] From a3d3a70b5a4f87b27089759938cee08a6a9d3f31 Mon Sep 17 00:00:00 2001 From: Mervin Praison Date: Mon, 1 Jul 2024 04:34:39 +0100 Subject: [PATCH 102/391] Update __init__.py to Add CodeInterpreterTool --- src/crewai_tools/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a51d70449..1398dcfaf 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -2,6 +2,7 @@ from .tools.base_tool import BaseTool, Tool, tool from .tools import ( BrowserbaseLoadTool, CodeDocsSearchTool, + CodeInterpreterTool, CSVSearchTool, DirectorySearchTool, DOCXSearchTool, @@ -24,4 +25,4 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, LlamaIndexTool -) \ No newline at end of file +) From f79c385bf76a1dcd0595cfc7bd111fa1928a87d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 1 Jul 2024 00:55:21 -0700 Subject: [PATCH 103/391] revamping code interpreter --- .../code_interpreter_tool.py | 60 ++++++++----------- 1 file changed, 26 insertions(+), 34 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 06cb081f0..ec756d8c0 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -1,69 +1,62 @@ import os -from typing import Optional, Type +import importlib.util +import textwrap +from typing import List, Optional, Type import docker from crewai_tools.tools.base_tool import BaseTool from pydantic.v1 import BaseModel, Field -class FixedCodeInterpreterSchemaSchema(BaseModel): +class CodeInterpreterSchema(BaseModel): """Input for CodeInterpreterTool.""" - - pass - - -class CodeInterpreterSchema(FixedCodeInterpreterSchemaSchema): - """Input for CodeInterpreterTool.""" - code: str = Field( ..., - description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code", + description="Mandatory string of python3 code used to be interpreted with a final print statement.", ) - libraries_used: Optional[str] = Field( - None, - description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4", + dependencies_used_in_code: List[str] = Field( + ..., + description="Mandatory list of libraries used in the code with proper installing names.", ) - class CodeInterpreterTool(BaseTool): name: str = "Code Interpreter" - description: str = "Interprets Python code in a Docker container. ALWAYS PRINT the final result and the output of the code" + description: str = "Interprets Python3 code strings with a final print statement." args_schema: Type[BaseModel] = CodeInterpreterSchema code: Optional[str] = None + @staticmethod + def _get_installed_package_path(): + spec = importlib.util.find_spec('crewai_tools') + return os.path.dirname(spec.origin) + def _verify_docker_image(self) -> None: """ Verify if the Docker image is available """ image_tag = "code-interpreter:latest" - client = docker.from_env() - images = client.images.list() - all_tags = [tag for image in images for tag in image.tags] + try: + client.images.get(image_tag) + except: + package_path = self._get_installed_package_path() + dockerfile_path = os.path.join(package_path, 'tools/code_interpreter_tool') + if not os.path.exists(dockerfile_path): + raise FileNotFoundError(f"Dockerfile not found in {dockerfile_path}") - if image_tag not in all_tags: client.images.build( - path=os.path.dirname(os.path.abspath(__file__)), + path=dockerfile_path, tag=image_tag, rm=True, ) - def __init__(self, code: Optional[str] = None, **kwargs) -> None: - super().__init__(**kwargs) - if code is not None: - self._verify_docker_image() - self.code = code - self.description = "Interprets Python code in a Docker container. ALWAYS PRINT the final result and the output of the code" - self.args_schema = FixedCodeInterpreterSchemaSchema - self._generate_description() - def _run(self, **kwargs) -> str: code = kwargs.get("code", self.code) - libraries_used = kwargs.get("libraries_used", None) + libraries_used = kwargs.get("dependencies_used_in_code", []) return self.run_code_in_docker(code, libraries_used) def _install_libraries( - self, container: docker.models.containers.Container, libraries: list[str] + self, container: docker.models.containers.Container, libraries: List[str] ) -> None: """ Install missing libraries in the Docker container @@ -78,10 +71,9 @@ class CodeInterpreterTool(BaseTool): ) def run_code_in_docker(self, code: str, libraries_used: str) -> str: + self._verify_docker_image() container = self._init_docker_container() - - if libraries_used: - self._install_libraries(container, libraries_used.split(",")) + self._install_libraries(container, libraries_used) cmd_to_run = f'python3 -c "{code}"' exec_result = container.exec_run(cmd_to_run) From d000bd2fc8eaed205b77064bc4f908ba6c8aed4e Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 2 Jul 2024 12:00:04 -0300 Subject: [PATCH 104/391] fix: add code interpreter tool --- src/crewai_tools/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 1cc222ec9..b85a16ffb 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,6 +1,7 @@ from .tools import ( BrowserbaseLoadTool, CodeDocsSearchTool, + CodeInterpreterTool, ComposioTool, CSVSearchTool, DirectoryReadTool, @@ -25,4 +26,4 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, ) -from .tools.base_tool import BaseTool, Tool, tool \ No newline at end of file +from .tools.base_tool import BaseTool, Tool, tool From b4d91d1ce01644893eb0c93be84d5067616f5f52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 4 Jul 2024 00:09:18 -0400 Subject: [PATCH 105/391] adding new result_as_answer options --- src/crewai_tools/tools/base_tool.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index e8e497859..4e0bd1fd5 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -22,6 +22,8 @@ class BaseTool(BaseModel, ABC): """Flag to check if the description has been updated.""" cache_function: Optional[Callable] = lambda _args, _result: True """Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.""" + result_as_answer: bool = False + """Flag to check if the tool should be the final agent answer.""" @validator("args_schema", always=True, pre=True) def _default_args_schema(cls, v: Type[V1BaseModel]) -> Type[V1BaseModel]: From ba05d18ab16db4dd36f3b3ce414d3652ccfc9d20 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 4 Jul 2024 16:42:29 -0300 Subject: [PATCH 106/391] fix: fix type hinting, add container name and handle exception and returned old description --- .../code_interpreter_tool.py | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index ec756d8c0..f341e52d0 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -1,6 +1,5 @@ -import os import importlib.util -import textwrap +import os from typing import List, Optional, Type import docker @@ -10,15 +9,18 @@ from pydantic.v1 import BaseModel, Field class CodeInterpreterSchema(BaseModel): """Input for CodeInterpreterTool.""" + code: str = Field( ..., - description="Mandatory string of python3 code used to be interpreted with a final print statement.", + description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code", ) - dependencies_used_in_code: List[str] = Field( + + libraries_used: List[str] = Field( ..., - description="Mandatory list of libraries used in the code with proper installing names.", + description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4", ) + class CodeInterpreterTool(BaseTool): name: str = "Code Interpreter" description: str = "Interprets Python3 code strings with a final print statement." @@ -27,7 +29,7 @@ class CodeInterpreterTool(BaseTool): @staticmethod def _get_installed_package_path(): - spec = importlib.util.find_spec('crewai_tools') + spec = importlib.util.find_spec("crewai_tools") return os.path.dirname(spec.origin) def _verify_docker_image(self) -> None: @@ -36,11 +38,13 @@ class CodeInterpreterTool(BaseTool): """ image_tag = "code-interpreter:latest" client = docker.from_env() + try: client.images.get(image_tag) - except: + + except docker.errors.ImageNotFound: package_path = self._get_installed_package_path() - dockerfile_path = os.path.join(package_path, 'tools/code_interpreter_tool') + dockerfile_path = os.path.join(package_path, "tools/code_interpreter_tool") if not os.path.exists(dockerfile_path): raise FileNotFoundError(f"Dockerfile not found in {dockerfile_path}") @@ -52,7 +56,7 @@ class CodeInterpreterTool(BaseTool): def _run(self, **kwargs) -> str: code = kwargs.get("code", self.code) - libraries_used = kwargs.get("dependencies_used_in_code", []) + libraries_used = kwargs.get("libraries_used", []) return self.run_code_in_docker(code, libraries_used) def _install_libraries( @@ -67,10 +71,14 @@ class CodeInterpreterTool(BaseTool): def _init_docker_container(self) -> docker.models.containers.Container: client = docker.from_env() return client.containers.run( - "code-interpreter", detach=True, tty=True, working_dir="/workspace" + "code-interpreter", + detach=True, + tty=True, + working_dir="/workspace", + name="code-interpreter", ) - def run_code_in_docker(self, code: str, libraries_used: str) -> str: + def run_code_in_docker(self, code: str, libraries_used: List[str]) -> str: self._verify_docker_image() container = self._init_docker_container() self._install_libraries(container, libraries_used) From a5d283943160e10185aa6bc6353d7c79d1c64aa9 Mon Sep 17 00:00:00 2001 From: Jakub Strnad Date: Fri, 5 Jul 2024 16:30:41 +0200 Subject: [PATCH 107/391] arguments descriptions added to tool description so now the agent knows how to use the tools params --- src/crewai_tools/tools/base_tool.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 4e0bd1fd5..dff1e37aa 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -86,13 +86,16 @@ class BaseTool(BaseModel, ABC): ) def _generate_description(self): - args = [] - for arg, attribute in self.args_schema.schema()["properties"].items(): - if "type" in attribute: - args.append(f"{arg}: '{attribute['type']}'") + args = [] + args_description = [] + for arg, attribute in self.args_schema.schema()["properties"].items(): + if "type" in attribute: + args.append(f"{arg}: '{attribute['type']}'") + if "description" in attribute: + args_description.append(f"{arg}: '{attribute['description']}'") - description = self.description.replace("\n", " ") - self.description = f"{self.name}({', '.join(args)}) - {description}" + description = self.description.replace("\n", " ") + self.description = f"{self.name}({', '.join(args)}) - {description} {', '.join(args_description)}" class Tool(BaseTool): From cb1dc13a9d214622f8ea8d0c3573b4207c9402bf Mon Sep 17 00:00:00 2001 From: Jakub Date: Fri, 5 Jul 2024 18:42:16 +0200 Subject: [PATCH 108/391] fixed intendation --- src/crewai_tools/tools/base_tool.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index dff1e37aa..4b60d93d4 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -86,16 +86,16 @@ class BaseTool(BaseModel, ABC): ) def _generate_description(self): - args = [] - args_description = [] - for arg, attribute in self.args_schema.schema()["properties"].items(): - if "type" in attribute: - args.append(f"{arg}: '{attribute['type']}'") - if "description" in attribute: - args_description.append(f"{arg}: '{attribute['description']}'") + args = [] + args_description = [] + for arg, attribute in self.args_schema.schema()["properties"].items(): + if "type" in attribute: + args.append(f"{arg}: '{attribute['type']}'") + if "description" in attribute: + args_description.append(f"{arg}: '{attribute['description']}'") - description = self.description.replace("\n", " ") - self.description = f"{self.name}({', '.join(args)}) - {description} {', '.join(args_description)}" + description = self.description.replace("\n", " ") + self.description = f"{self.name}({', '.join(args)}) - {description} {', '.join(args_description)}" class Tool(BaseTool): From f056764132b5caa93846ad67fbc1fce841255796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 8 Jul 2024 01:15:00 -0400 Subject: [PATCH 109/391] adding firecrawl imports --- src/crewai_tools/__init__.py | 3 +++ src/crewai_tools/tools/__init__.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index b85a16ffb..a9c9a4168 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -9,6 +9,9 @@ from .tools import ( DOCXSearchTool, EXASearchTool, FileReadTool, + FirecrawlCrawlWebsiteTool, + FirecrawlScrapeWebsiteTool, + FirecrawlSearchTool, GithubSearchTool, JSONSearchTool, LlamaIndexTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index f5ac94052..17d289832 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -8,6 +8,9 @@ from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool +from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import FirecrawlCrawlWebsiteTool +from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import FirecrawlScrapeWebsiteTool +from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool from .json_search_tool.json_search_tool import JSONSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool From 65855cbe56e243bc61b8b48e3f731b5c5c1c3627 Mon Sep 17 00:00:00 2001 From: Jakub Strnad Date: Mon, 8 Jul 2024 15:24:26 +0200 Subject: [PATCH 110/391] bugfix: ScrapeWebsiteTool encoding fixed problem with garbage output of ScrapeWebsiteTool on some websites --- .../tools/scrape_website_tool/scrape_website_tool.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 148a0b320..e59064151 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -25,8 +25,7 @@ class ScrapeWebsiteTool(BaseTool): 'Accept-Language': 'en-US,en;q=0.9', 'Referer': 'https://www.google.com/', 'Connection': 'keep-alive', - 'Upgrade-Insecure-Requests': '1', - 'Accept-Encoding': 'gzip, deflate, br' + 'Upgrade-Insecure-Requests': '1' } def __init__(self, website_url: Optional[str] = None, cookies: Optional[dict] = None, **kwargs): From 6f45c6ed0949fece97b48ecf29df22336ec365e8 Mon Sep 17 00:00:00 2001 From: Naman Garg Date: Mon, 8 Jul 2024 15:11:04 -0700 Subject: [PATCH 111/391] Updated Readme --- src/crewai_tools/tools/multion_tool/README.md | 29 +++++++++++++++++-- .../tools/multion_tool/multion_tool.py | 11 ++++++- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/multion_tool/README.md b/src/crewai_tools/tools/multion_tool/README.md index 608931166..ea530037f 100644 --- a/src/crewai_tools/tools/multion_tool/README.md +++ b/src/crewai_tools/tools/multion_tool/README.md @@ -5,27 +5,50 @@ The MultiOnTool, integrated within the crewai_tools package, empowers CrewAI age ## Installation Ensure the `crewai[tools]` package is installed in your environment to use the MultiOnTool. If it's not already installed, you can add it using the command below: +```shell +pip install 'crewai[tools]' +``` ## Example The following example demonstrates how to initialize the tool and execute a search with a given query: ```python +from crewai import Agent, Task, Crew from crewai_tools import MultiOnTool # Initialize the tool from a MultiOn Tool multion_tool = MultiOnTool(api_key= "YOUR_MULTION_API_KEY", local=False) +Browser = Agent( + role="Browser Agent", + goal="control web browsers using natural language ", + backstory="An expert browsing agent.", + tools=[multion_remote_tool], + verbose=True, +) + +# example task to search and summarize news +browse = Task( + description="Summarize the top 3 trending AI News headlines", + expected_output="A summary of the top 3 trending AI News headlines", + agent=Browser, +) + +crew = Crew(agents=[Browser], tasks=[browse]) + +crew.kickoff() ``` ## Arguments - `api_key`: Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. -- `local`: Optional. Use the local flag set as "true" to run the agent locally on your browser. Make sure the multion browser extension is installed and API Enabled is checked. +- `local`: Use the local flag set as "true" to run the agent locally on your browser. Make sure the multion browser extension is installed and API Enabled is checked. +- `max_steps`: Optional. Set the max_steps the multion agent can take for a command ## Steps to Get Started To effectively use the `MultiOnTool`, follow these steps: 1. **Install CrewAI**: Confirm that the `crewai[tools]` package is installed in your Python environment. -2. **Install and use MultiOn**: Follow MultiOn documentation (https://docs.multion.ai/). - +2. **Install and use MultiOn**: Follow MultiOn documentation for installing the MultiOn Browser Extension (https://docs.multion.ai/learn/browser-extension). +3. **Enable API Usage**: Click on the MultiOn extension in the extensions folder of your browser (not the hovering MultiOn icon on the web page) to open the extension configurations. Click the API Enabled toggle to enable the API diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index 1253627a2..2dc944f23 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -15,8 +15,15 @@ class MultiOnTool(BaseTool): multion: Optional[Any] = None session_id: Optional[str] = None local: bool = False + max_steps: int = 3 - def __init__(self, api_key: Optional[str] = None, local: bool = False, **kwargs): + def __init__( + self, + api_key: Optional[str] = None, + local: bool = False, + max_steps: int = 3, + **kwargs, + ): super().__init__(**kwargs) try: from multion.client import MultiOn # type: ignore @@ -27,6 +34,7 @@ class MultiOnTool(BaseTool): self.session_id = None self.local = local self.multion = MultiOn(api_key=api_key) + self.max_steps = max_steps def _run( self, @@ -48,6 +56,7 @@ class MultiOnTool(BaseTool): cmd=cmd, session_id=self.session_id, local=self.local, + max_steps=self.max_steps, *args, **kwargs, ) From f447f71a8ee017298acaac590e30b9ca5ae2589e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 14 Jul 2024 17:59:17 -0300 Subject: [PATCH 112/391] Update serper_dev_tool.py --- .../tools/serper_dev_tool/serper_dev_tool.py | 23 ++++++++----------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 77d008be9..651d9c5a6 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -27,7 +27,7 @@ class SerperDevTool(BaseTool): location: Optional[str] = None locale: Optional[str] = None n_results: int = Field(default=10, description="Number of search results to return") - save_file: bool = Field(default=False, description="Flag to determine whether to save the results to a file") + save_file: bool = Field(default=False, description="Flag to determine whether to save the results to a file") def _run( self, @@ -35,22 +35,19 @@ class SerperDevTool(BaseTool): ) -> Any: search_query = kwargs.get('search_query') or kwargs.get('query') - save_file = kwargs.get('save_file', self.save_file) - n_results = kwargs.get('n_results', self.n_results) + save_file = kwargs.get('save_file', self.save_file) + n_results = kwargs.get('n_results', self.n_results) - payload = json.dumps( - { - "q": search_query, - "num": n_results, - "gl": self.country, - "location": self.location, - "hl": self.locale, - } - ) + payload = { "q": search_query, "num": n_results } + payload["gl"] = self.country if self.country + payload["location"] = self.country if self.location + payload["hl"] = self.country if self.locale + + payload = json.dumps(payload) headers = { 'X-API-KEY': os.environ['SERPER_API_KEY'], - 'content-type': 'application/json' + 'content-type': 'application/json' } response = requests.request("POST", self.search_url, headers=headers, data=payload) results = response.json() From d5e6b95817755f10725f5059d4dba77918024dda Mon Sep 17 00:00:00 2001 From: Carlos Antunes Date: Mon, 15 Jul 2024 12:41:53 -0300 Subject: [PATCH 113/391] adding the appropriate imports on the __init__.py files --- src/crewai_tools/__init__.py | 3 ++- src/crewai_tools/tools/__init__.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index faac5d37d..36d48b4a4 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -8,6 +8,7 @@ from .tools import ( DirectoryReadTool, EXASearchTool, FileReadTool, + FileWriterTool, GithubSearchTool, SerperDevTool, TXTSearchTool, @@ -23,4 +24,4 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, -) \ No newline at end of file +) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..e6dd234a5 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -6,6 +6,7 @@ from .directory_read_tool.directory_read_tool import DirectoryReadTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool +from .file_writer_tool.file_writer_tool import FileWriterTool from .github_search_tool.github_search_tool import GithubSearchTool from .serper_dev_tool.serper_dev_tool import SerperDevTool from .txt_search_tool.txt_search_tool import TXTSearchTool From 21342fa0f6ceb183dc89376c845ee6c0973ebfb8 Mon Sep 17 00:00:00 2001 From: Carlos Antunes Date: Mon, 15 Jul 2024 13:00:06 -0300 Subject: [PATCH 114/391] adding the proper imports into __init__.py files, and fixing the folder name --- src/crewai_tools/__init__.py | 3 ++- src/crewai_tools/tools/__init__.py | 1 + .../tools/{mysql_seach_tool => mysql_search_tool}/README.md | 0 .../mysql_search_tool.py | 0 4 files changed, 3 insertions(+), 1 deletion(-) rename src/crewai_tools/tools/{mysql_seach_tool => mysql_search_tool}/README.md (100%) rename src/crewai_tools/tools/{mysql_seach_tool => mysql_search_tool}/mysql_search_tool.py (100%) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index faac5d37d..4988a2b25 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -23,4 +23,5 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, -) \ No newline at end of file + MySQLSearchTool +) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 648671d97..985537712 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -21,3 +21,4 @@ from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .mysql_search_tool.mysql_search_tool import MySQLSearchTool diff --git a/src/crewai_tools/tools/mysql_seach_tool/README.md b/src/crewai_tools/tools/mysql_search_tool/README.md similarity index 100% rename from src/crewai_tools/tools/mysql_seach_tool/README.md rename to src/crewai_tools/tools/mysql_search_tool/README.md diff --git a/src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py similarity index 100% rename from src/crewai_tools/tools/mysql_seach_tool/mysql_search_tool.py rename to src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py From 0386120a5ac86368c72293b243d653ef5b941645 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 19 Jul 2024 00:28:27 -0400 Subject: [PATCH 115/391] fixing serper tool --- .../scrape_website_tool.py | 10 +++-- .../tools/serper_dev_tool/serper_dev_tool.py | 37 +++++++++++-------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index e59064151..92f84cba9 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -39,8 +39,8 @@ class ScrapeWebsiteTool(BaseTool): self.cookies = {cookies["name"]: os.getenv(cookies["value"])} def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: website_url = kwargs.get('website_url', self.website_url) page = requests.get( @@ -49,9 +49,11 @@ class ScrapeWebsiteTool(BaseTool): headers=self.headers, cookies=self.cookies if self.cookies else {} ) - parsed = BeautifulSoup(page.content, "html.parser") + + page.encoding = page.apparent_encoding + parsed = BeautifulSoup(page.text, "html.parser") + text = parsed.get_text() text = '\n'.join([i for i in text.split('\n') if i.strip() != '']) text = ' '.join([i for i in text.split(' ') if i.strip() != '']) return text - diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 651d9c5a6..f89768064 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -1,3 +1,4 @@ +import datetime import os import json import requests @@ -7,11 +8,11 @@ from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool def _save_results_to_file(content: str) -> None: - """Saves the search results to a file.""" - filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" - with open(filename, 'w') as file: - file.write(content) - print(f"Results saved to {filename}") + """Saves the search results to a file.""" + filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, 'w') as file: + file.write(content) + print(f"Results saved to {filename}") class SerperDevToolSchema(BaseModel): @@ -23,11 +24,11 @@ class SerperDevTool(BaseTool): description: str = "A tool that can be used to search the internet with a search_query." args_schema: Type[BaseModel] = SerperDevToolSchema search_url: str = "https://google.serper.dev/search" - country: Optional[str] = None - location: Optional[str] = None - locale: Optional[str] = None - n_results: int = Field(default=10, description="Number of search results to return") - save_file: bool = Field(default=False, description="Flag to determine whether to save the results to a file") + country: Optional[str] = '' + location: Optional[str] = '' + locale: Optional[str] = '' + n_results: int = 10 + save_file: bool = False def _run( self, @@ -39,18 +40,24 @@ class SerperDevTool(BaseTool): n_results = kwargs.get('n_results', self.n_results) payload = { "q": search_query, "num": n_results } - payload["gl"] = self.country if self.country - payload["location"] = self.country if self.location - payload["hl"] = self.country if self.locale - + + if self.country != '': + payload["gl"] = self.country + if self.location != '': + payload["location"] = self.location + if self.locale != '': + payload["hl"] = self.locale + payload = json.dumps(payload) headers = { 'X-API-KEY': os.environ['SERPER_API_KEY'], 'content-type': 'application/json' } + response = requests.request("POST", self.search_url, headers=headers, data=payload) results = response.json() + if 'organic' in results: results = results['organic'][:self.n_results] string = [] @@ -67,7 +74,7 @@ class SerperDevTool(BaseTool): content = '\n'.join(string) if save_file: - _save_results_to_file(content) + _save_results_to_file(content) return f"\nSearch results: {content}\n" else: return results From 25343727fd4c06eda96bbb5c7d173730063cdae6 Mon Sep 17 00:00:00 2001 From: "ai.flyingwheel" Date: Fri, 19 Jul 2024 23:11:10 +0800 Subject: [PATCH 116/391] fixing serply_api_tool --- .../tools/serply_api_tool/serply_job_search_tool.py | 2 +- .../tools/serply_api_tool/serply_scholar_search_tool.py | 4 ++-- .../tools/serply_api_tool/serply_webpage_to_markdown_tool.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py index 358e312c7..a69ff3de6 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -7,7 +7,7 @@ from crewai_tools.tools.rag.rag_tool import RagTool class SerplyJobSearchToolSchema(BaseModel): - """Input for Serply Scholar Search.""" + """Input for Job Search.""" search_query: str = Field(..., description="Mandatory search query you want to use to fetch jobs postings.") diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index 62c3bef7f..a37c36e5f 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -13,7 +13,7 @@ class SerplyScholarSearchToolSchema(BaseModel): class SerplyScholarSearchTool(BaseTool): name: str = "Scholar Search" - description: str = "A tool to perform News article search with a search_query." + description: str = "A tool to perform scholarly literature search with a search_query." args_schema: Type[BaseModel] = SerplyScholarSearchToolSchema search_url: str = "https://api.serply.io/v1/scholar/" hl: Optional[str] = "us" @@ -29,7 +29,7 @@ class SerplyScholarSearchTool(BaseTool): """ param: hl (str): host Language code to display results in (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) - proxy_location: (str): Where to get news, specifically for a specific country results. + proxy_location: (str): Specify the proxy location for the search, specifically for a specific country results. ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index 27ffc54ce..5049826c5 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -6,7 +6,7 @@ from crewai_tools.tools.rag.rag_tool import RagTool class SerplyWebpageToMarkdownToolSchema(BaseModel): - """Input for Serply Scholar Search.""" + """Input for Serply Search.""" url: str = Field(..., description="Mandatory url you want to use to fetch and convert to markdown") @@ -24,7 +24,7 @@ class SerplyWebpageToMarkdownTool(RagTool): **kwargs ): """ - proxy_location: (str): Where to get news, specifically for a specific country results. + proxy_location: (str): Where to perform the search, specifically for a specific country results. ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) From 2df29f3ddec395116720fa809a9f5f0f4dd2d3b8 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Wed, 24 Jul 2024 21:37:22 -0300 Subject: [PATCH 117/391] feat: Add nl2sql tool to run and execute sql queries in databases --- src/crewai_tools/tools/nl2sql/README.md | 0 src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 74 ++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 src/crewai_tools/tools/nl2sql/README.md create mode 100644 src/crewai_tools/tools/nl2sql/nl2sql_tool.py diff --git a/src/crewai_tools/tools/nl2sql/README.md b/src/crewai_tools/tools/nl2sql/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py new file mode 100644 index 000000000..aec3f0105 --- /dev/null +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -0,0 +1,74 @@ +from typing import Any, Union + +from crewai_tools import BaseTool +from pydantic import Field +from sqlalchemy import create_engine, text +from sqlalchemy.orm import sessionmaker + + +class NL2SQL(BaseTool): + name: str = "NL2SQL" + description: str = "Converts natural language to SQL queries and executes them." + db_uri: str = Field( + title="Database URI", + description="The URI of the database to connect to.", + ) + tables: list = [] + columns: dict = {} + + def model_post_init(self, __context: Any) -> None: + data = {} + tables = self._fetch_available_tables() + + for table in tables: + table_columns = self._fetch_all_available_columns(table["table_name"]) + data[f'{table["table_name"]}_columns'] = table_columns + + self.tables = tables + self.columns = data + + def _fetch_available_tables(self): + return self.execute_sql( + "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';" + ) + + def _fetch_all_available_columns(self, table_name: str): + return self.execute_sql( + f"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '{table_name}';" + ) + + def _run(self, sql_query: str): + try: + data = self.execute_sql(sql_query) + except Exception: + data = ( + f"Based on these tables {self.tables} and columns {self.columns}, " + "you can create SQL queries to retrieve data from the database." + f"Get the original request {sql_query} and try to create a SQL query that retrieves the requested data." + ) + + return data + + def execute_sql(self, sql_query: str) -> Union[list, str]: + engine = create_engine(self.db_uri) + Session = sessionmaker(bind=engine) + session = Session() + + try: + result = session.execute(text(sql_query)) + session.commit() + + if result.returns_rows: + columns = result.keys() + data = [dict(zip(columns, row)) for row in result.fetchall()] + return data + else: + return f"Query {sql_query} executed successfully" + + except Exception as e: + session.rollback() + print(f"SQL execution error: {e}") + raise e + + finally: + session.close() From 6fd02cdf82382c651b013d1a7938df413fb8ffc9 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Wed, 24 Jul 2024 21:38:52 -0300 Subject: [PATCH 118/391] feat: remove unecessary code --- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index aec3f0105..c33411ca8 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -40,11 +40,11 @@ class NL2SQL(BaseTool): def _run(self, sql_query: str): try: data = self.execute_sql(sql_query) - except Exception: + except Exception as exc: data = ( f"Based on these tables {self.tables} and columns {self.columns}, " "you can create SQL queries to retrieve data from the database." - f"Get the original request {sql_query} and try to create a SQL query that retrieves the requested data." + f"Get the original request {sql_query} and the error {exc} and create the correct SQL query." ) return data @@ -53,7 +53,6 @@ class NL2SQL(BaseTool): engine = create_engine(self.db_uri) Session = sessionmaker(bind=engine) session = Session() - try: result = session.execute(text(sql_query)) session.commit() @@ -64,11 +63,8 @@ class NL2SQL(BaseTool): return data else: return f"Query {sql_query} executed successfully" - except Exception as e: session.rollback() - print(f"SQL execution error: {e}") raise e - finally: session.close() From c372641be811070589da47af400c38af2008f818 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Wed, 24 Jul 2024 21:39:49 -0300 Subject: [PATCH 119/391] feat: format code --- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index c33411ca8..da70db8ac 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -63,8 +63,10 @@ class NL2SQL(BaseTool): return data else: return f"Query {sql_query} executed successfully" + except Exception as e: session.rollback() raise e + finally: session.close() From 18a28261a4100fa5ff1ce5328802f914642b8838 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Thu, 25 Jul 2024 15:25:48 -0300 Subject: [PATCH 120/391] docs: Add documentation for NL2SqlTool --- src/crewai_tools/tools/nl2sql/README.md | 74 ++++++++++++++++++ .../tools/nl2sql/images/image-2.png | Bin 0 -> 84676 bytes .../tools/nl2sql/images/image-3.png | Bin 0 -> 83521 bytes .../tools/nl2sql/images/image-4.png | Bin 0 -> 84400 bytes .../tools/nl2sql/images/image-5.png | Bin 0 -> 66131 bytes .../tools/nl2sql/images/image-7.png | Bin 0 -> 24641 bytes .../tools/nl2sql/images/image-9.png | Bin 0 -> 56650 bytes src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 4 +- 8 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 src/crewai_tools/tools/nl2sql/images/image-2.png create mode 100644 src/crewai_tools/tools/nl2sql/images/image-3.png create mode 100644 src/crewai_tools/tools/nl2sql/images/image-4.png create mode 100644 src/crewai_tools/tools/nl2sql/images/image-5.png create mode 100644 src/crewai_tools/tools/nl2sql/images/image-7.png create mode 100644 src/crewai_tools/tools/nl2sql/images/image-9.png diff --git a/src/crewai_tools/tools/nl2sql/README.md b/src/crewai_tools/tools/nl2sql/README.md index e69de29bb..d0bb82271 100644 --- a/src/crewai_tools/tools/nl2sql/README.md +++ b/src/crewai_tools/tools/nl2sql/README.md @@ -0,0 +1,74 @@ +# NL2SQL Tool + +## Description + +This tool is used to convert natural language to SQL queries. When passsed to the agent it will generate queries and then use them to interact with the database. + +This enables multiple workflows like having an Agent to access the database fetch information based on the goal and then use the information to generate a response, report or any other output. Along with that proivdes the ability for the Agent to update the database based on its goal. + +**Attention**: Make sure that the Agent has access to a Read-Replica or that is okay for the Agent to run insert/update queries on the database. + +## Requirements + +- SqlAlchemy +- Any DB compatible library (e.g. psycopg2, mysql-connector-python) + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Usage + +In order to use the NL2SQLTool, you need to pass the database URI to the tool. The URI should be in the format `dialect+driver://username:password@host:port/database`. + + +```python +from crewai_tools import NL2SQLTool + +# psycopg2 was installed to run this example with PostgreSQL +nl2sql = NL2SQLTool(db_uri="postgresql://example@localhost:5432/test_db") + +@agent +def researcher(self) -> Agent: + return Agent( + config=self.agents_config["researcher"], + allow_delegation=False, + tools=[nl2sql] + ) +``` + +## Example + +The primary task goal was: + +"Retrieve the average, maximum, and minimum monthly revenue for each city, but only include cities that have more than one user. Also, count the number of user in each city and sort the results by the average monthly revenue in descending order" + +So the Agent tried to get information from the DB, the first one is wrong so the Agent tries again and gets the correct information and passes to the next agent. + +![alt text](images/image-2.png) +![alt text](images/image-3.png) + + +The second task goal was: + +"Review the data and create a detailed report, and then create the table on the database with the fields based on the data provided. +Include information on the average, maximum, and minimum monthly revenue for each city, but only include cities that have more than one user. Also, count the number of users in each city and sort the results by the average monthly revenue in descending order." + +Now things start to get interesting, the Agent generates the SQL query to not only create the table but also insert the data into the table. And in the end the Agent still returns the final report which is exactly what was in the database. + +![alt text](images/image-4.png) +![alt text](images/image-5.png) + +![alt text](images/image-9.png) +![alt text](images/image-7.png) + + +This is a simple example of how the NL2SQLTool can be used to interact with the database and generate reports based on the data in the database. + +The Tool provides endless possibilities on the logic of the Agent and how it can interact with the database. + +``` + DB -> Agent -> ... -> Agent -> DB +``` diff --git a/src/crewai_tools/tools/nl2sql/images/image-2.png b/src/crewai_tools/tools/nl2sql/images/image-2.png new file mode 100644 index 0000000000000000000000000000000000000000..b3844f0ddc25e4d407143b7d886f4b3c2ccebc55 GIT binary patch literal 84676 zcmbrmbzBr}_diYvD0y3SC@Nsk-H3pIGziGj(k!qnOQ$GEhje$dfWXq7(!Ida4NFN% z{|3G9=Xt)L@Avo5=h2tUFf-R&GiR>%iT622e}*WTt&A}+n8IOU^>VKb?ss@bKgv{-z!Craja2h1 z{~J}k!!JH&28gyJ3(DAfkj)#3!KvZK8p>emCsAOH_@9QWpCYG8=-kr(_OWm69^eR{1hMbre(OL5|P|ZL#%D%w_Qs- zgPdx!eA%cW;KZ+(>E?N^tzi0GXHZf!^HUASJ6oL341(F;o%O1aVM;C1Jf@p&CeZKw zWEII->*agFwxP*mDlLjFS0BHnm@oECBd(&F6?SFTVvN$=Rj@zDwe|>czL8w&9gp=i zd0i1_j!@b%ED`s8nf*>7Afqe%p)P%BKfcETJ!W25J$g>i%1QQnP*Rub0Fli$W`NJ2 z`^27`@oE2Kkk0eIK@T2uE*}9pqgkLfa1dgpWdpG#519Zj)C)g9|PkVu*JZ@j_}361Gaa8 ze@dxX|D3%&k&6A#K1R*Wh2p9bGBUtc)yU4+*cxhPWABzLTaJN&C1S3oX|E~&PQb_p z%wq7-#?Y9>32b{a1VhM40N4c^+Z)h3fvv2e0#3q@e_tU0?B5(_eN6xR5_?PG$C~m= z^b$69#`HWaY%FY#Mefkk(+k;sG!amFBlY)i;5XsNX7={B0<5f#j*cvjoGdnWrmXDz z{QRtJ9IPB1uYfCFL7lDb4V+$CLmB?up| z|N8UiIgOpn|2>m6^zYXKUXbF_DB76=(;3_dQG{KH_5V8d_w_=oHxm92iU0YWzmEdnQ{;{i z>%Z0p@QVf55(7gFL*|XRniJ-h`|U(J$*60B>SsTe8=jp!iJNxB`fRJ$Vt0-CfK>dJ zvZjWyx~;fc8+Mrb=LZgCpKtkhmC|>mi-nPXZJx%f=8WrIj{Rsick;6s*_gMpnLMYw zdt~2dW1{OW!l^#fC@_0IUZ62HR;|CH>%y2tNF zn14i%>+l%=zrB$dB~Dut3lWo@94RRd4yL%+|9*ol82(?4znPw%2+L4E2KRpT)Bje` zU%ivM{+kc=TgYL!-X`WdKVJQ(Si0~*_7b-V1&b0itj{|x_gW+fx?pd;JN&)66qdFe z7e0{Y+TeeMckj|#nq7_z{`z6Fk*3OcDbe{(_V2aBWrg$6+G^tR#q@2gPh$J{UuorO zm-t@2y>1g{J^el`it$N&-|pEfe5|ix>9`KxST?aQJMH(wX0>1#SNrEtN0djt9V9uN zl!CZM15BS5avX~>XZ-Im&iBo+cwgd$j{TCiUKYh7bMv6L-_=9 zuj4L3t4=@cKbyv&%Xajvq(*G>E$hj(2xA>dI!rS#yyC&d$wh~Cc*!*EGu1rTbA2YM zA%UjTxZoOJ`%*Q$mG8MYtc6-J?q7WLd>}_?nlCF?6Pa^KLrklZZNLy_<4P3_v*yYw zEQJ^3)*##74S(_?z}XB8su|7*R@Z2c;?}sl7t`PIp`qbWF#=ub)vZMt39OAdx?Q@VI3o%aUFB*=`zf)*B7J@B)79Gdzp zlKIeOm-Vj2!ZqZxadq8t$V$)q!XM_G3cxNp-Iy>M6;z$9y92(heN+%Xh6cIqStooA z{Ve8%2j$cOR!$`!$7a4K-^CN1Y>{5AM3I$wDRb#$MSgrdbtIwG8{z=WqTWSDmYEX^MbLk zrE=!EHFGT8{B492;j}-eHiy2&M2!_{o&L*+P4gE5bWOGViE_ zrwfjqYshO}AZ^4uit*g`DPAJ%Ew)#s?)Ro4b&l0(VOHBS#*|w_4@8N!oxMkL5+Q$t ztCijGN4O@a5TO*iL82I$yXdp;jrXM9z;V3qW1W^)HpfwcJ009Nb~|d|5zZ_x(}?8Z ztJpyCKZY_AY0V33y4wPN3s)v5bNjDc#mjG_XRqeiy8PA(GfNOLy-xgKSX5#A z;zBQT(nu3rTISamXSme(tCK}0=d zIcywg$GB*bm5j^fB+gW$%J_c-%&;H}|7M+YQbL?YT*fX` zz-ni^18e>(wlkF0FNRhZpGwZr^#Yz}BzzxhX7STwJjh|iE#`=)*jVb8$4#D|Hrdzc z=|=d`$Z0ea1#*({cIcPc`s-ZJSGP~_M;Rv}p$gj-Z-+7}kolvL1epB-Pc)MDT{-ZT zU^nYFhFs~Kk%Bp|{)5iXk+5VkzICYasQR@n;Ml%%k1eZmmu(Lo21KheM6{+z`A4*p z|A zn=Ql0$}6ey<3o@iBvAzbv&d!1T@U9GKM$%(VY~d@W^9|Qv*^(I_3);0Gt~;RVv)_a zYMiOcT8t~VJ~qX<2$KcdK3(BpmeR*VXXUP-PY>yJIA)4))2HUvSr671!5<{gI}VXSQw zm*2O@6JbKv(W9Xy=1?~^k9cO*i_QI&l&WsPnX9y~q}4+^*V}-q-NmO?p1nWx)Z(X^@!K?d zP?4p1N$T^51S#AyL{ph1zi7Ra=y;LZ<%MgWEk3^WB*0deSl_6gfj2k!ZduWpm4dklVEOb{c%?ADyZN z;z4N1B8gt-5YCi%_s`_L&Vt5jj&Cc-Xn3DVv9LE)bT&bB7MhYtuZz@Ap8Je>GYGsk=)}_GHZ7|}Fwst73OLvhy)b!yY6+~DdqDTjB7%uAy${WuI5 zY(6+dP(y2i`AUhbH9Zsg13zKHSlG{fhjY|=tG1WX2jyrQn= zaYjT{RvqvlWWtm94nlwEfwQR=UFB6hNDDwN_YZ7WBlv2%-%Ew_5Z<56BdSIW**xn< z9FXiAMz>lfTs6CqGgU#(xW?{Wie`DHSE$&oo8HOF61^bADoXe7Y@&>e+ zI3cTGmT|Regbp3O=&1fNqohW=PM|V->VDP%syif3RK~TG&*)K(Hw|TW-Md)Y zx;m@pC?ddPTDvxRb{2YvYC^|xfSl<)E6ToUX~M(f5>H9JLT_$=@!P=0ZM~~W941QA z<3zvCe38>_Mg0(tU!1Mm*B+FK9bI<(w;0ZGcH>E&@7uR-fs{VeyLb@`KE zAGt=Jcg~OHjKy~q2BtZw8V;Lq%+hp84W0J2Q=0IKqnyp3i?%9m{qocqEX zg>kj9R}Y>2v~N+Ya+VyzdOl1FBV6O-v!5rgyBK`^t7lhZGi9O_EkaXB7;Eimb@x!6 zRih73YT`~8AsZ#@bn2^MhqD+uU0kQnRK78^APk8}?^wY-P!L!1HP>9x5GXb-^?3$yoNwSeSN-5vK$uJmpR9VNwWwmay5{B>H}(e&V2Xu8K&D3!f{R?&%5 zK*3TD&aD$?Xe~%hDEnAi(nFbMO=xrLaZ0&-_Y0(ttcLmJ!gcChnQrgN5HKYfsp6|s z(9{;gz{^iLFii4z!ZmdEnEm3C)l!&;?_eW$q_&krw;>gm5IUz#LgHck zA^7~vmw<2&^D;zF@e?PUdA!d>s?0_R(UwBC=QqY4`W^O#dssFK`=vzlnOy}-jdD?_ z{jcLZi|z796%H+~l~;>ueAgZNm-~Te5ijQv-p#Gd*cXCJ1Dlw<(}M3Q#QoL`tS}NA01YTJZGDt(yk= zm8wEfr|&7KFq{!}NfR^h?k(AhGx32@YGHVlYuKm*o2WW*386ZjJ_X@@LY1fM*ZeM# zKHdfOM8O!wV3{N;ewWds*o^f`KJ@co-ly5!+|p9q!uR>VX1Ky!BV%7vY4|CP14?B4 zLS^|?f1P|3O=c+PwOK<%z>N;crjBiDVFjbYN}rqrKkOXD?`*Kg>w??)Ygr;bji;Lki|8E6vz?XiKs5Kd7auDFcML}FQV=YO z+#^RW2p>+^5AIDp@Z%XNVj8b^FkfW|%iB(6&!P}uQv4{8n4>#wxg=0`vLMHdt@JKb zs21;CU7byE%-Jt0%7|nKNzq@!ii#a_qN(i3#7s-s5SrQ0kddh{%NbonEQ%PPJfgnHfbb^VumuB;ohhCLCfkRaY?LG7amEj7oNz_xbSFl?sIjQgT-9X$F-} zMihBApHA&2vnA15+*8h}?^ z3gUl`3lf%1BEzNZ*qo{&c7CZu3#)+8=vJ!HUMf^`XM~VUKiCsGFN3YzDYR3WMZ?NH&X6*YIgEmOXE|w1tupC*^x``bV z?aurJQtXWH_b6jO;4MtyG=;FG_CK8#$ujH!GwP)%P8zXxAXbIJ zy%bPt3d#6fJ5uQr6}I`oj-f}R6{6DcaI*Y5$Qn3g%XXE{La(E5PHEOs<%nrfDoL)Y zCg6F-TPi>&*vXICOhdXi=!gM)Nfz>ZPl-<8s{2*B>9uUoF_QCv8X;`+-Bd95RO8WO zeu4wlHSo@dxaY{Xz#%W=Ty`hyWAATJ<>Smg{};?e44aAOEI*@s$7KOzmM_%5?v?yDghJXqEqt(Ib0v#-{j9=r zZa%mF`xi{%Y&Njk?84zt>C(U!rl~uk5#ACwG$K_~qCi9B@7+2e0xSFww;$l+X(Hxv z=pH0`X?D*F6+YY4?^YTNiRx@LsSXTWUeK zT!X-<+)^V=!f~tgH5K{9QSouUpKiX$)g1TUna#u2@|YeBqIWR1PU@OPYQAS|B_qxu z`5A_N`q6+h#DngSTt!HFt67?eca~RV`E<9MuK#C!fk`XVhz<2*^_e!qCBrZT?sEM_CH7+EbZ-{=LRis! z;(dFB{;Sx$ZP3ZJgtN;yQl!G)J+V#8ggIe zux8rj&U1<6jIp(UAT3pTo%g*p+|tS~e~xeIJ}T{)1dmrCY@05G7;@BrbDDEZ z5)JY8?m41*fDNxxUej;;fQMRp*F-cel)MzwW)##B}jpy~3H|kkaYeB{rezdCKdC4dZ2j6-e)) z)6ZtQ_pMOTM50?o@hzUcHB$#p-nZmi>vLD!wT|SJxZZ3Ea_!Zq2zVCEtrCfnqjT83 zS(S1MGB}H?jC1#Q9ysN-EXh?`$vM|A?H%wM|N87no~+~aDiu$Juwd2t%Q-x0F$;RB zT}()~ck8LkONphnxvQQakYraQ<5*5|)Y*XIk=|zpg8?_shZnH@=b;@aT0RPv2H4ja zT6hS#)oi&;A=%Mo@RDDtiqNMmf15rpirvf*ww>Af3P-iaw9Wu}qx%NgUh&0SuWX4m z2>Iq;Lr{~t6vr&%h_Z+z^;-Sm2kk(#Jm0r-Bu&;T-iiU+q?0uWS#-P_l{HHvR40g9 z6FiLNeCwP&(h>h<+Xph4;g}{05Btc4@vD?F=iGtLd#?Y=Q{;soQL$&mBcw;W*4=4` zT=GL|YVuHee!f}Ss#SL;?K+wr*9|Okmk+!t-NZB6&HWY(x9j4{secjOyW8{7>Ps!F zd*_Rvs0|I5*^@oP`UqEd(GN7zCx$eU>}24PV*s9N?tL^l2%$Fz%C<|x4pAJ3?5zxX z@wjEz2-=2B!b}!U!uX+{#Ax$=w*YY6b+^Utt_4ge)4AMCAVX088v!A_SR`Mi@)WHt zvL8}@UD$#voVe+QJAvXPpF|5y8j@*3S1>lR^@C4&a~Vn@^+R>Ws&$7wp(>rT2_54E zIo6+@>1+=7ZLc98b3VS-zu#c7!}F`yGhu`GIzWK(N^K?Yf%8|YVH($3ms5qNM*Fmn zn$lIxVJ~_Q4rj{r<0M`2L} z)gcUmqgTxrYC7$J<*VUym!(0yjpoy-1!cQf(J00lK6AM|#mVqPp{>IZU7btiV|kXibqrfF)+6L5WB-spjSp-vqJ z+ zg7YOV5MNytsjIWQ0WAPLdgQ6}(yG9JY5{dA*tP0MKL%tCO}@uckg8SSlqmHypL;$e zwIlLsVUv_>SB`5Acx5{lHromh36ETI+vQ+RJO(g1(|}f@b3y>O24L;UTzvq~Stqc) z83U4v1DO3oCTRygqvqwLkNUj&_WEj{qf#+7YX%*iCd^iBzR9J%qi~x4Z0_4VtaG(8 z=>3qQON9PJR<^MZ-FZ5eGZXNk^($wqbmK9_IObppu|)ABZ_FpOWaHJ^FDCBu zBO+P`yuNn6IV{WwMwcY#=U?tk53IVXKWxt|s*GSgh#glk=89vnWuJhXaZ9*9mB`)O ztCFU10Q2LI=n?@O_ikULc-tv2@h3sr1e_vN#h{kN&tv)KpPuf%$O8s;M?6h4ZyWi;&4nSBPaRa{JY1(|pka}M{FA)t?Xi0H}?cnl=wi-ICL&s4f z;bTLSdyvPHv$W7b9g-~Z0y#cPC15j5&ZVxaTA|F2I1LrmaO!}?CKs@=5pBtq1rhb8 zygJ-Ljo%)Je2o0Wz3=jdrZ~Vd`_Y*02RtU$oL{$kHv8Lu6!7qs~InquFyKbC8|p%D@?Df^)xz#(y_j2F`x zgkvd}Kc*r!q7QN3qvKaExC;pI2cx~gpu9kFbydV@8)XmM) z+up}0kcI=JX=x8;$f$!o*q(gXD!W0)f?M;-`rVNF1g>!}#BA}98D_PVIKS!a&0zD) z8gbdW;`cb?na;HR@CCij>2V#K1Bi@{1yzkh&=yX!!&k9QhcxQP8+_c{94uh^2taF< zRe)|D?3z3|2BT*L=Z}62~+EO}xaO_RHE4w?i6I z^QUfNvw%MkGqK8XKjdzJ6M`DZ@8HXgMVCLQI58efiszcU_Dt@-B`to!u{DA^C(D81_s?^Sz zn31?Yv*n%54=;&v@6)oe0-!x00sLr0RSmBd`zLOb+t|VKib4;ewE~nZI$u46*|xLB zbP>7eKJJw45cJE383PiEyLEHELRePGKvB zuat!46DBY0hslF#ODg9cJ`>>xIk39NF2cec5>4{pd(}Ec+*-tqgB*Gm#yLjSF6Y;HkJG9l!Syd63dF-aF1fFR$F z3cm%YzCUoP(PRD%CpJBnM_`$4u>K4Xm_`pZJ6MmNvS+U=_of`iE^YE=68UV`Jda8^ zPWgi>aq1a4@lQ9HGIOc2^wj>BYrH5H=gDOaU-I48y$8uySKb5}9tRe|7rSz;{T>Ta zS9dt@oym`w3M#-ng23l~f!c{8+#BUGs%L%~63a8A%C<+=-CH)=6zv?;&OqcGFVYQt z2%B9kjN-Q|a$bOw*gMMtnrO5_lbVo8NM9ESRgZp$s{NWxb^@8R9vcNOA;Cr(YsSZI zJht!lj@T&$i;6t{(x&F`P<2g2^FH3%*1rsxZ-3>YK;b5VKqX0hZumzn9|Br}P#Kx@2WB|1}pn9FR=m##5Y_K@m9oqgMRyeX%m zPS&?A)x*bTRK#>bcLa^7hmhhl($n>PdyyPA-MD=AT*jZ{LVT;riQ-ASu*Vxcw7R`G zr`{(zW=Ude>&v)dm-_UoSQ99B(?OyPAJZ?1uH#pHixWBIICoQ+G3# zAwrL#wuo9^Jij4?Q^ArnehK#hFEQAR)Y#Xq6=P!BsYz&+eRox}Bu=r9=Zwk5{}$M1 zDJSMdaar6a0>T<5#Yz(<-qnNI?r-@N&m-Z>xEh+!Wen$Z!yBOc1-!2jvq1<9IAXHx<=Lu1nN8g@UZmfi|vzAD2pQ=v!?U5AHD|hft*D4&TUorP8PtX zT0(m#c6Qe}Sd!y%2cVGgw~_w`bihV%+APC)3mfdyZ1^krLaQ~YGD68v!|KdVYK=zo zWGke)faufJE^ZAe{@kkifT))N3A*$>G$^6T$(zo*2Fz{yD}|JJ`&C)ha{x1-dRBI4z{I{> z!orS0^9Ezz!Val}0H%rRk7+7d?W$P)QY$jJ!hB=dkcI$vwddsw#HwQ%YLntqd`6f( z8~WSL#F#dp#;wO5eM7QbOn$@>UN%$YS^eyMu@2GHbAMwsk}Y1Z*#N7Zm%QBjRx54_ zD=#2^5lnU>9O*|$Q_h%7y!rH+0(l>e@?sUJ)S$Ad2&8tOOmlsq2?VDSS(Hat6}mK0 zdDu~OI>bwu8J}W8q)F&}y1BiTQ{q^4T(&4AZHHoTn-SfYd&rz|GVAR-rh9Uzv88| zOzI(1lb{CV2|d?U`s^T*um7m8^I@wlY&wzHXn=sqPvmDGz|0&)I(F=eIA?n%(yjs~ zm(qXZZfnoiKE8Gifwpi<9Y1-i8~)UxD4~F13ybEHbW{4r)ZL7J8tvy2!O0^2JtC9T z`{Z9mtHQ8TFbrD`dA>-#VDOEu5dfA>2>%Q%FCq%NFeSejhy$LBs!j8i!-C0UD$J7N zSAV{UK4@TLd-I)tKzU8IEIoH&cJnljXirCfkj0MLT|BKtZ1Um(kkDi~pHli}Z%j|q zdz41^k_BV{kVj8a0xC@S6NVOEkg#^I*K5Y)wB4Uzw90PPR=md ze(pDTe7ncGjm>#gGv4SB#g{!^E>Mmh9{yJ0BmK$sjF)ruUl5E8A{m#ojNbB6y<%pw zf2oe^;O>h{mr$r(+LNHD)5A^1TQ*=J*r(-Z3(4pPG8bQdRfSoy;6lzq^9xE|q1ZdK z0;h5kOM}a+opk5B6FNeMML`}QAmF*6cK}?%rZ~C9Zer8KsbfW*Ab#OzcJ0*Dn$YMB zcad*;z+zqjk3>4`ADu>Wu5P}De`}lCvH=1&osM?Sdq>VJ{gnRS@qU2)IPp}G(V!{& z#;Fc!;Q*K_yKP0Kceye$T2l%SRjs_%84ILMJco@!3g*Ii2odOfHR~zQ5D@h5D6Ydj zEaA5kIR1~pvYa((z|QCge88CBJ|O#VdzFrAns~1P`FJ0l(@iW5uRbH%Xw?@~P_0(I z{+Ge(jmkbIF}(sve+?d9elS|^?ia13ug(w~UV zs|9;CtD9IORK*yehJ$Xs%#oNFemf_B4_RW(TelcHS?g#+1P+ct6_DYwPRYxSRczi$ z@HnlKq@gkP9U1KzcIalp7$CpuSo+qR5-GQrDste5NYZ|t#t~M1T$_>R+>?E!n00_< zRPYMjB3-0=o}_GhlaJ^(?Z+J%h5@BNCFUa@#l*y-9!t-MjP&QVw&!Ov#(h6xM~ROC zsW%LT`a)J9O{Y}#vvaA>!t!a-$CP+I(Z{7eQOL@G2ZH)p`vxW7pt~R zJ^B|$Js%stnzjWH&eqtwL(C)?qLQ&ciOiHTZ;idH^o9VoE^;A<7|2a@ua1gFZ>H2$ zDYh|;w2<&Pow%NW3GT6??{(LDNOs3&zIFb_{3JxR>@gga0Gr4JqZafBp|*l-ioPsc zgIFEOlh_}~xV~t6^0Q}3n>5t3yn(;q$LE%m?R{9T1zz#W z0MIv5-eyuQ(;h5=@^&{E(8|#nBKyE&iG}ol;t+BK=?|(7BWV1FRV;zRhFg8@1hdk@ zQOs3M`krmB6U-L0;G*$c$h-_m`42ytc3z=~O&)Wvx&JnU{yBz46mT_KwV^UAE~^h^ zc-ilCDe7s^Q(Gj3ObJ`lZUdU+!lenw*_}&igX2-6y%a}}yB3XF1ZZP$E@+xyMB6Ix zlZoxs(bME`nsHREp7yVNfe!btKbs}Xg;DHec-Y!E364F62M6djB8wyg>##JuAB*AM z-a6(LcvdlXGupL_RMJxj?rSn1diNMsJY8(pjBvwYG``y?l+%Fe3_wT?{;u2H=4C+z@w zqtd$$!0R(z1k|XC5KOG#(hE6E_z}8Y)BVyY0kDbEQsK1u)V8bq+6@_}x#f~QD1h<3 zXS!`uOPtnC&6EM{Q<19 zv09~gI1L;GJxa(Pa!A~~AV&clY-N?Z#c0!0-{G>V(la|5QrSE(_ibIpl zJ!3L4h1Okx#0h>VaQ%Vtoucb%Y4NY~+_Jzr(Ikz|b+7~;v1rZ9jv`ccNLvdRC6{pH z{y_;WiDOm}f~D)E-*EDY+%wSkuN<;3L7r1gODR*tGRGS=S%MOmeo~*vW`*9;$W&oZ zJ=d*a*5Z?Km-I<~w}?9u%rKr6+{G~lgjPJ=zwU&lj~M)-m|4i<*J$N2F8q}C&e_`K zOd48epXiX@k*fP;bB;>!D|+f-ktPe&(}^6GA-Lud>OTW6E}44@SYX1k9T zjP8Ov^5A$z{{}57j62|P`h5SCyX!SWqIofn^u8gkhMkfRc_5V;{e-El?VBiU+{Y6Zos-4Q1a;XsGI5bb$S|CU<`l4>1d%M-DU3Xv@|ZcM;$flgT&>O?`(h4Ez%6qp306DNg7r}JOh z(R6F}koA~=F%NkPT3?YQVewwsQg5kBZ#rxaP1V-B9Y54Gpw=inFtp-aT{P4$0C<*` zz1z)@O|z<6?Vr|ecs@+qJWziv-;o>>XH+@(?^*yL#zMVz`!?mes9$uR@ckwhbTUa78EDsH zP`=5Hv_)aJ%}CMNnvJ;2`u|f0u$)wb;QIMJiK>qS;i48J^Bcd}!QbJ-#sC8Y=jRCE zQeL4C*#Qb7(q4UN3?}xkJ+1_DCv_|54RZI<1r7XnJJeFD?E8`!43722Tb@_MXe6Zl zc5?rQZR+~-{Gd%9CQGt!WX`yw_`}F<@gLL7ve)|H#M4foFyNau>L-wn)t|S!%!ZV- zH&Hukjvr43)rjnEJf2Wp=MX>u4m1#w2mmJb;Eg{Rc|Y-{M&Qo2$xUFmMpq_JHMtOS zt1PMMtfMaBnXoO$^m=Cp?5BSaeVAOEzJ_IKW2kJ{Lpb128`+tp;dz;-*QgNN0h7zt zXjXL`I1I5FZOq_}0;RR^-Gy#!^|1l`Uv+?3-p}Er*3ikOJ4dg_2-hbw*HRq@RVkXd zR6ECv@3=bP(t1U{qOH3=FF}5Px59MtZEJ*x>bON)k%cJSC;8Tp&t2M{Jfg-y`d8L$ zGkVs_Qq+PBvw`VHooPh%tEtj#obx@_%}M)MI$#52#ohfAS_k0E%Y5yMo1r3WaEG)}N1^|F!z= zFg1L4H*%Iebh*6_zD$2lXL9F+mRzKt>RecL{w7rH8Ex2h*XEyJNAoR>MtdZM1^`Op zRCU}H7hTi$u5?4^;eit1tImG^Iz&qOC}XYfDnroOQ=U*GoP{m+WLALBiaG;4Y>_Q* z06ks*%@vfp^Vb!Xx`kJW@fU9j1bKp}K{aXT#^P!qqyMFG}B?mGO81#8x$ ztnXK(n4Jb$E$#NIddimz!7r*q0=08TEKWa_5icxCMJe~!+*yD;LV^ev?Nf^295+0R z359I@RL&^is#dxDc2#>2Kr#2|7h;(Rx?rsapdNWGqUw7qMb6&_(dHk6=vA(|cK085 z74e(6bnFFa%M8~(kGFR%XNZkGrDK};HcX#w9)J?L?;4Xtj1X%O=bm{J1|zrKPdK5gE$>aF#a}08MdgOmzv)dF2LL`Tqje?-~GlfSAGS ziA{5DZ2EC$c6Un94ZOONP1Wrx-`7|~yVCo#hA04@-kodh^H>m6y}HROpRvt%Q`xu; zR0crKMl*+=-5A5rKgRI%KSIYp#;~{fE!EL)Ruj;ju<0Z39Y)!kVDzRq;Qmx!o!aEW zy0TU1bHV9S{r%|4mY&|M;WQ&X23Fm^!u6zgei?qD&At{4RXj8wHc`5uQm%`!mTTb( zLfv0Kg_<}_n$Rc7I=4e-1^Pr)-d|sBG6d?Mj5oO=5#8==r#S~l1G;qzG;;aalw`s% z`Ugx*J#f(2g^5>|^}B-h#yQ%_UP*Jf;D2wk+3EYNl9GwT2a zxf@e`u;PlL%GgSwbjyqt{+bxh)%0x@IN+=n6jv`r0^tGBP>==o+owI6*dCFPc1u5z z9R+w8z5BhvPALK@@i2LJXYRV}pyi*IWoAw6>eVc4vOtA^L;J2#3vNez_t6bmtFNSWP;)ilMs@OU z&6vJ#QY>~C&GD|+g7b`=M-^WF>OPtO#%l>5v%Ta+Z&#E&GW3$BfkN0z#xlMV80#`^ zkry)VEyeL{`*3Sbwrty~-1B-rMw?%1J@Tpw$nXVH4JWFxbut+PRd2tY)CY^pvT%O# zwhd`*9b<$z1=NHe#Z{qLs@px$aRI-fZhW~6OMt~H3QLoSSZ4eZyDyb&`rB27Z`pR_ zfv@Su;hl*)b0Bs+-kn6rb`0~J{-@6x0UOtY$_AtUWB?`Ha^1U!>Pqf*z!l7ctL*B@fcuG=j;qD~GQkr~tdROl1zre|0oVdw>xb!*At7_j+ zl}`b`+o027{9U(`Zk~A<`a7@$(u6?D0?%>^$P$h^Cj@e*{^RChc6wujbyXCAuQN-OL<=;S$~IB?TnGE*Sap2_r28xm0Z^ z2qMXNY0`B|4=;TmGR*1JRLH*S9}66B?IxJ+@58g8hJ7G&11y;yWu@f^bvy!pON}Jg z4_A$Rh%_h3OeYRr>+s48VaAnRCkf7{%kBb)wp$v*ot zzUI;<$dx8Pl9mVC7+4Q+=0K*=0!3o+e8!v{es)4}+v1uYmnKl96T0Xm65Ui6VpY`^ zAApH%Lhfl2ItZCmP1d`0?Y{d`K|PZLKnF{;;L*Dyv^e*gb@3DlUVbRUGwxU=lSn@n z3mun>2?$@w`P*QK2~W@8fPk;w0t(EkI@|lUYBPr;Zz%tOAiHsO7J$SQPrND{Q#YYP z81=uD0Qm0vXc#v_p4N(OzAIr8H=FKR|P~({I)&#(v zD|R9-N0cBLy;^F7q}nKVkr}$7*pZwe0W0`7D?D^RvYyK#WlBHpg<^yz8Ua3W+}w}aH=mb%1wXf2j_?|?xxcOAEF7?5ATyl~FwR+#pn6iUAbl)8R=pR1~fC{T^tw6qPT zT5KE2kXb~_O63W)4cXMZLP7EmjI21TQUew!rrbX|fESDK~&YIfVc>0FaBWr*Jl#;PuK2Z2#6sTI`=4`4T6Vf$RbJA2m z*0Ki_2-B|oFq!$DG)wT|0L^H%7Tg)rwCqPV`u>|r=s!K9N^AJj*Za5F_In?P5+`Ce za%_5{6SI9~#oXIm5+jHrL*ezPCgzx9Orc>K?C$Q9eh&h~HhxHvP44A0H=tk8>z^C_ z-HIA^8`IgO`*YAV8BT<_SO+lFPUPhm;yg*`|9(SzGu=HxDX? zzU=LB)*y2Y`_i}f`bT;!remo787Jm0iP1>ROo5tYoI7@+X!Lqg_*ZVRrrzO7vu*cN zcZo5CXP^sVT^cbX`@Qb^ey#uQCPF+_ndYwq!p|xN|489~*WD~8@g`ME;-jon zw;m2lO&X4G(s&<(?cc4j`!Skny>~D-RA0nC`SxcXF-+C}d8t191GLEV_c_2{u~)2$Rb z{F(CyF7(!*daCS(hK}HQ*%zgxr2JjPdJUUCTWUSUpTxEq#ApaAgiXgpq<1WyP)$S; zqH_t6Q%?7HR1*#Z*Q_DZ;E?D$QS38FPZ;I9ntZ+BMChy*Bd!Ct62vt_!T>r~MXaqW zY?GjkwW(zpbA7-@ZYQe|x(QUDu4Q@C->j1Ye zE^u&DdMf@}%wFg=%MTD9ewW8%44`PD1F`d7L9oV3!k?d9sU&-cj_W=M9YL~@Qu9V{ z3c%}swD(H4-?aC7O4AZC^ZxUH0Wj8d6PHz0FYf%MdZ-Qh@a<(v@&3a~&-B!92O1tG_NnFra4H4Eq+i4#L2Qf^_sz=@!~5u+A+WKLWct^@Ck zD5g{*Q2ATtiWaXj?zMDoouWV<4wxw^WeZKn$X$uJ-nZ}05Z0a{~V79bEIZf{459VDmGR80tZO?lO(y6Bdo6`O7H3F4yliawfB=d zhEBY1Y=6epd}m*4cJWy8K4NN`V125(i5K4LAiD+*=3zwgljauu93fw6dWd!#$K!94 z@py<+Ncz_9<>+yual2Bo$zzI%q|Aml6ns2vXUuwa48;=yCpGiriA&O zGWb}x8N?_EsNMJe?poa>7=E^wWIAwMt;KIjKz&mD1VA7QPA>HQ((4@S>IX@hmA~ZB zL?qr3kR(WRTlO2+@ThcB&^-mB!B@Omu)N19%cao+m zy&4z<#0y$Xl+p-sUX(e}Sj=VfCuY_q8%+Fe9M!{K2*+;E80&cebM5;CE|HrF#bp5x zxp3$t?E5&c)+KS|3)-)fZ)tdLVAVWiWqRwsn%4S!uGwfXPlAA)Et>#HH(-!c^P$?Cp2UJ7^>4qUxB&0i}OLFLLM7l)j?sn*AXc*~c zVCe2{hImiBuIIj=_g&Ak?%#UXnm@-SCmj3Sdw=)-d=G#n)lS`2n5C#Ln!(mkN@7g2 zIAgNh>iSs7)u$V1lJc(n{o4|otRUwZW?X@Zt-hcP)Bh3ikwnAM8 z1>I^*8-M*7C4ieN>NIb#LG{Ub0}lztqIpyj=X-NopJo%B<_xJ4lCErGSOB{by}1~x zRDYRUKHCdgG6c1%0qhqr>K>p!5<0G(dNq z9HHqD2ET{z{?_a0rPql_JW*JsDu6DAKFA8o#E*HG9FeY?`jL73R>g=X0Zg9>;7^L8 zZ|#T<^{Bz}o&aU31Sn36MRXRd&F)Mer&$9ykn143D_v4X?zm2yseZN81XqqPObEX3 zJ@}QfwL34GqMlTPhJxrSL}-y4+Mfzbx7ydwD~7Bzr|mV)Ls_gfRe4I*Sc>AB#@xQ{ z@aDm8)IE&)&%XDKy;Pd%>F`G4#ZnC&9G5~)!*2A8bT$-4`4#fO(rV_YfIif7LBWs|=&gkc~ANcOb+w^er zvLIUi$SSxMXcjH4W60dChasluwg75}WS4951qeTsCSIUFg_W-S6@47<{sL>Lr9L z(^NO{?ki=O+w2L={#bzQNg?WAI`rn`yxbMYY2nHt;rsgc*(p>KS!L%q3p;C1e8*Z> zPT_Ap=omfJ_fJm5Z(VYd3J|%?W3zJG(D8I8wO0{!C((AbM#JH)eHRKVi9OU_o;eJa ze)A94>9Xm6IgsARY!Of5;OcbV(nCD@N8k*0vOlkyGgcG9E$D?HsaZ@7aBKRlA_hQh z*`1VDi%-Rbnr<-i3Xpp%K}Qk64yAmnr*=*e@+lPHcNl|&vvnRuI*-_Ii_udq`=yL( z6i4C?elp{I12p)Iue?SNIkuR$=2dlWC%9j23DH~VI?V_eq^XfU-jGWq(M={q-f1N- zW4$0G)6R1|eN65sB~B>U{iV%uoutQWb0q62I+|x_y@`!O1F81k=&^`r<O%D8)sQIX{Sv`r_E-1oo4$98&AcwV&<1-;uP!9#d>g~r;=LE8pJN$CxE@_jEWJeO(CO3u3Eo;5dHQl$K+;S%=dbv!A z&2l>QFCGf-C|ZIAf5imx>;%f#SK~cR{^Qo)EtVW8{HtKD1jm@EC3ec`%Jo^2jWnyu z!h`8$&!tkD8zhbuR6EXdS2p_3O=_WVULA4%yzYBQKu^TFv+M1jI&Kn;jHM$*IDAqz z3mPd$#$)?L{3&C!6$&gapsZsa&THrK(=BsH&?NO)WW+9G;tUw5eQ-mY8dk7JQvg>sXplygV#_ye{4krz9`G)xB1 z@@svSDc(2*GOw&HT7G%TF(R&Tt^}7}NX?%hZwC;f3g$GA@fTP0neR=}krH?o?~7F4 z@&UN;RTp;RXpj+p!CA#B%KFNOok)Rm_y$1#%yUoaV$#=rd6oi#H5$8p3eTi8i)t^u zkpRRx`d;g()*Z{gL9P>hH5Y*ImSN@FO&z5tzya!}aI(nsaT*7fELk?Y66CLI0Pa## zz3fKN_M>Xe)`guJ2|_R3v77DYAFLBSvwT*RPorastCPE(4YmZCDH*V`yyqHNh-ac( zY3w%NVv)plSijbBG)#ujizMonoU&Ol?Z+YZO26-UdfLGWaG$aPmJh6pk$*@%zBrW! zu1!h-v?e}cj-9B};|&Y6&21gDmA3FaM3{VB7&pG(U>0B>B}KMAEpdV{3zov908(?# z&e|=`SzRaD4`f$D>EehV=j<{^CTc5wpNus5UtSaXVdP|vbcIw+B^Yo;-25&EUbo+G;u(WrF zP)?~oKc)$wD)N1XNrvwUAe24CdCZg*{~(mFWotPGsewVmRvIt`SKCJhrQ=IE&8s1h z*&7jwj=;!*O(Gfupo{ca6ZlD-51wOKz(sT;co*I43@_hI6yk{4*DUIYI+IJp&hqmZ zYE~PJcslRSPVyy^ooygWRy^S|DQf`YcW_M?XV_KhbhABnKY0s;kPc zJv%wS_b*OrQUCkWdMPJ0OM88Z6Z80cAFd|o;}}ew>N&uD|95~9(GCJaBize& zrS+t*2WDiKiz0b!->-qpg&J)ZI$g21 zv92K1=iuif5nL@*pq3C>_G0O}Vm_`TC~?1E&ZeY0Xt0gEDiec@MXL}e+MdVuZQ6&q zHWJ(!Ddei`qs9!@qsg41=SGOe7FIVSYz&MCilkObvnT-OR684+Bio=tIV*u2E^n~2 z?*=#vur(Fe*(es-j-P2v926g|oK3PaFkG!P>Me2`k8y0EH6C$ZVL`LPBTf9Gd(s++ zy`u(%USn`}OV4w9h*}E4X+frJW|{AaAz#junIk+30xqVutH4y!L2nkBdHtCtBspQ@ zGQ3z>=eAEj?$kD()-ECCEd)VN?LFY>RB9@K5P3U zcWeFHHp{XClpOI)(X_2D|1RgPgtT4e>;*QLoOYr*0~&1k&)Rq!xp^M%*&HgYG;?%! zIAV|W$l^FNw9@E+HnR_?$o$=po9HmW&`{fBwCL0W<5O<{>M+Oxok-V}Krh9OnS?er zdmW1n-BoXm`Ku;JL8vkJsy3GMcM3;%nEh96L&`~|PgJRJ1ZkQL68Ze~3iZ;KF|waA z!6^VfxeTBiZI->ubLMq*Ppy8M_Qwgk=D8Y2T#CIvz`1 z%oFFdt}^=A5CTbSzimTN1}u2={eCV`)MgOKxAV9Hfn{c`a6*n%J;Jn$VbH(zbQp6T z#-x>}Ny;yogA$k$2*+!=%&Uv_Mau=@31uuPij^v+4jIiPlBt*NX76R&tfmqoD{hmn zKlPJ6%FpOV3;XrLsd0Djh2N-)&pRyWDPLr6+ZdGs;KhZ!grP-J$UO!_buXSr z&F&a2*UQ3?M~iYZVPFi*0<4g;DS7^lB~TC#jU=}W+f3%>R*eDJ!C&W%+30l>odNz; z+x~W|xqz%~oHnP^yJ@E#LaA|y8W|TYb{H-&6dc>1#y1xJP?)n%RSrFEz||AZSr{sN zps}zkId_1(-8p_I5aQARY{fnqJA+Xf+=O^ee#ybfWJ$A;noDo-72de|ogO3c=Dxo^ z09pFpURKB7>=ox3Xv!+j_H(`Et|PG~SbZlXG{*g>&Fz1ol~Sz`{G@F+6H;inR)<|k zW!edMI9^A?`L~OIsY*?2r@dJ+rdmj#Qw9A^xX(w8g8O^7sNy2btNJgXDHFt|9YOS_X%R$5rk%^pQm&X(=yGF<0elms2rXmQjHEu@p@wmPNq zPEjQ?3)DQRJAq)tzVrC5SDk^2VSKZ&4&=eWO#Clg!oDlb(Wvgp1>SCh#>>j= z+fuAxq2Trht5v>vxXKi}`~CDI!{A!J`&*go+PwoU|8}JfH<1p^5_w3uH``;@Cy7OF zZMh$_ehgfvmZ$y!c79e~aIb9GEg#+BDM-Z=*BGEp*#KQA1f+oasWmp+PjCc16^d#! z=ry+vwpr}_xuIMl@3HRh^9^m)>9<&^bYgQdu*RFpQ)bXs9}hRWYey3rCUbZ_B!F6O zTX8c%cInd)*ycRFh>imSUGk)ElX=mN3xD>8aTM0iQxm^~%%M|*r-!`)i-T1~OBswJ z;xhlc!jxOVHC;9>S`q26`M%x_u4?>?2)^OJnJvJ1xN^7 zLVzhK1q|S1*oXf&+%azWqUK;QsO5WuH0)uV`z^&pF^<^W2#^LQdF{I|zmL^j_$`*&uz)cxDLItCd7C3q=%pd2^cM>rSzCT;yQUtApK ztuS8A_77&pazWeX3JXSUOob97J`2g29AS5e<8WUR3uq}rrAdiI3%A)h8mv)M%L-~R z_uOhzqt%%#ev}Ocu{nixUsMa?f8!^`C0kFWeb$o71&JxJed2XIONX+dF71_5uj^@eoy%JhA2xBC&m=tb(u6;F$yB`Z>^E9>;Y&{~ z>yhi0NL3T50~A%?&=@#`*S16}>;rp+DlqM=T_mRyX1;6di*8rp_GZcgs{&Q}Gr?`Jz!2ir9HrfaE|hhy+B zw)l=(3@7P=HXND}>IZay)d`usO97J6dhR*%K<{T*5y;D32XOtoX;3=NuvDqC$Q{AE zCNMll579`yKmJQnxRd&_dM-cW*{X>Bj~iTrMBb2pw4u1GiL6?n_q)GGO{BW@WGJX& zt;l^mh=+;yGK?=4Fmqe?;`fH!EDbzQu>s|*HP#DeQA--|6WcjF98UKQ+wEvj!m+>R zt1`W7{>}+XFU7`T$(3=gHdQb(^|Oyf9;HtPzheXQ3gfWvS#qnc=o{%p7Y$7Y&&+_0 z<=zvfGd}zsp~uGVmfiq+>t1CtiPO*8a4ieCAN=xQhWPrZ|B0sLoa&8P?FUE}eAS4r zOWX*n&u0-Kl}1dI?AQGpKoByA3;y}!l!F@OOoawyI4$lk=`#)S+W`1Q+koR1HP!n~ z*L=5R8|x8>g8t)in`1l?#~+^1P<{1oO4e2Q`ys2i)NcHkh5Ke;z?30@=dw@awg!kD zoM39qa^_pxrwqRxS<2Tl!(L+46Lkj#ZrK1XOn~759-)W#g+mDl0G|^I2=*(=i7lA( z1zr{<(qpN0uDHFi7$7q5l^nxLy7@4LFEd|sp$WqPKxhM>?uZ5_8Q!rBXtcDtq%A|- zj@&pQ(+Qf0=u*)f2hsQ)k#OH1TaCv|D(BsDyNJLS46>wkmXnpS^i04|(?HaLr)KRT zg`z*_SC+7W{YIDXh4B%Ilq9UWg+*N5>)iaIpDz|c>2GdsM0wq{U1&$rn_T}iH<5d? zV9qMRlVAtu`aIS=4)-nb9r!IcG=VAXkj72rM$2zQ_Y%_($~i$-Mx(4HW0_xRgE2Wg z8$@+pRA1ePN^Hl80`1dvbPm>8a_%U|m!|*)$ycaWHo`0{ZC%>6cAlr5rQ@$vU39L^)mPp-XQv!YKe2;8~YBYU3@5QC@ z_IKoKKy6Fpy`B2aoZJCy)(Pt!ujarCEB6%c0dVFwLH~vA)KQzZIDCw|_o*_K+y$wA zsFB59rs}wE)MMxH>ZdBIR1cm6P-WI7b?FP!GXFy?u30%QMRJfM_8uImS#bcTNyx_j zxe-JA5Y{#IrLjZh*_hOOAc6pKw>!L$6&{JPneZ*-Lyjl350%6ku%mbq{`sh=8sQ%* zwqJcV>b`Gz;@Z+%I8tRt6ia26YCAP_HPM;m_=y&CJy-^(q&ce3?y~_RD3Cy`N#2H? z+rP9gLK}BnAM%RoUNn=G7Xv{|&vPEW_M#D$GksEe%p}K}tHAIGgizToZi>QcHH=cI-$pTlV7Vij0GJyJs1DTaHW)A$JpPcv9C+aBUXU15A;Iw zl+K|qw={h{ndhn|PLe}*+IhZ)$#V$O?Y8E)l$>(wD1B?$~-Yg7Zku1y!~QF zleD7@P*E9#|6joa;FH8aD2mwgR4||5N$Tt=(pq(pV%JehNJU~; zroIv&^w1r)0(p-- zl>+_kEI4;NCC}VQaU7VieFcwl`n~gbA3201nKJog_WUyJF}a{Av!#lko#7_;t4jw- zT@GLELd?`>4E?bKP}3ykwn0W;)CPs2;nn1JR9K-Cs?#fHh=d5LNy>HkUre7^RYG*8 zSo*(AU);kX@)St3bVBl&*!BFy=}ZU)zVuO!g}1O+8)o3fX7*qVlz&B=YDKFg4PtGv zX}c21qR2vPG3~9b0eP=X!TW_K{K}?8@cL&XpuBWPgO~9~BTLDnoOs#e;B$^UBb~R8 z1k84nFf(dfn^}DtX#LQJo71I7WHuDF{0oFt#IzsIp8oly742lPyA{E7Sgb_@Ujs_DEKB_GTR)3M%<`@L3xY_5i=QfYg2g4RbbFO&P}^B6C2-g=~y;+&#fH zzh^}ZVL*=05-zNjxeQOz=#D}tUoC#35(%e#!u=N6$3M}3`5aAXay6BwE|`sIaozU= z0lL=WK@~&2JT!z=EP9BiY)SvFd#lR{^!3@9SXc#2s={Z7jw_i=9y{@S)-@h4RgyLa zkCR%gG2j8R=-mv$#PiO&CW8ROUW67?%}_FafF?|f))p{`sb=cVtS9X*L})k{S08#? z`aeEtXvL)f=~o#6oIK8R3>mPd=O;LPi%R_rxi{9Xe$-Wm7=2t(vE%v5DeRNSO}aeQ za{n2id5k)02eK1s?mC1E){E~HKF3T>QvjyRi#h}6ihfOAMOH}tv?GIUiHHQqS4q27 zK=z;w-!@$0owEA;S4-r&VY}|!0TRl9QK|yMos2wqcHwZz>@JrqsxZoYYZUf9)5t!m zGAi<~Lv(d8QlqJRrs@9^5ncaNM06}0*B}1z6Z6_+G1OO0zV8k%GuT1{e%`C@hmV3O ztWi+~_)>=u_&;r1m`{j+@=OflcwA}$KF>AK{k2bXQdKSJGkfz9@m&3JTZ?L!Lv-gP z(eM)WI_8-_CcgBh&#Yvv`YkYlOET64#~;ur)+8W#c`n34Af4zBqpY+fCNaPjswKBq z5qq{-!WL!|voZGgNLdso#a`WVTG-{o@CP^eRvJzxV2Z3ZhslwG)P@}ASY@P7X$&w0 zm@-GV{s@{`nBKx_Ztp>Drs^$bd+2uRAY2ja<6}4xGlHOkZggLYqv}kt@C(ZMjFcIb z-hTJ{Y3f*hT;&wB5Kq|m71e=(D{p0eXY5Z!mAPO$jJ5Njr4qr;gFK2NcnUEWh3jG> zJ77&nXZ3W><521rTY0&7dY{2bYN$ZCo{2F%?c%fbzPPpQ?bpuLMNT@L5&=9&g3Lc@ z-aLy^4Q44 z95sPBp#a7{HY_X$vL9(}J1NBWUQn&?ERaVe;gwxd@PG z>QCJRMEcyFh9|}hK!O<+8%(RdYQ3(ybZR_yI zF&NXK_*Y0`j^8uI(r{sNpeDn*tI0MgSpfx|yH|Dv3-FPv1eGnV4?R?=5 z$UtiW+wh|k&1o-b^xYAva8`k9sK#m6>L{3K6$NQ}r4QIk12&OBG!a?A@x=m`%PCB7 zG`I6Nt1L=8uM9or#i6z4g3DbU+V>n7tj~egXd#>?$ zR+jU_QvexkT=*5~mpq*jzQf*T+Wqji*4EeA$;mlq_n#NPY#G0d@R+CBd@@tMNJ$dC zpZ++A7y_>T{;@Kx=3hwBb7R;=udON(@IlO^bv*K%gWbOEO{qh>IqH|ax{5Q&Y1|%a zsbK4L^Nn6h|A#y54Dix;E=oN>_w8L1FGV*FMyu!30n6t^v#7iZQ?lC9+M0I0Q%j+! zDti8-nn7p13Rk^BM2^Vz7bB8iJ>rZIj4^IrwX+~+U$yfV%;Kt9kEz!7p3CnE8dlE3 ztgxxdwEg)t(a4=BKm8d)j63z2fDV>XJ`*U0 z>|YD4E_S00o0J1739ZWK<)h?3fYAiZm*4$huG3#oq1lbkUS&3^zoD+FvFPbkuezpZ zM%c>c2-1ueX?dJ56~5kn@AFU)8WGpuy!7e|;E~n>@oJ|&rHeiB8{e!wuB(TC*RBJ$ z51v$6fJy9Mzw8)$`IUgHj`+xqC9O)4{7N@5V`&U-+iq+RMAdMDdGVY61le&ITK{Mi zkg0(7r$R2(EAM#g>aK{h|69b(yA~@N2(8pcTTh(pKHFOon~+?y7+1fVAi`+!mZ%xb z{OUG246tZp=L_9H?Phl!V|ZKhMPqf5b!HFne=EAUv+17GG#3neq`YT+5d@AsR?fw~ zwm!VrKb|gJz;+57T1J>@woz|q9H&(9x+QB)^WZ4_RTq=rGFPz$ukg6FtM#ii>CM($ z>4La?ka%LVJeQ0TZVG=j36~At8W6{$Z6g@|qdOvX-!SqHNIlkeU!B_GKdm)*8Jyh5 zsf-6$tdC*6*{NCz@YN9P@#$>tNGY)FAub+!h z11}C|s-(W~5O$)91{GBG@*P@EX;-P%qLqF=X!x(V%385b+qU5bKg3dr-9>zLCUC&8hXu3ZE)=6D)_)V=(1UN5 zw}8Mt&cpScI#Z1b(2387;EiUNV)+L0`^;p621$6Dp7t(@6Qfk?RqoYf2G?PkeF!=$ zS7R?GmS#GQs0_gSHV*O-nZj^$E$|nuG%r zilxw6IT3ZHzsc_>rJc>nJ-sq@#rBr>P9o3vhL%&=R(V{p!kmlGJFWZhwiWCS2ZU|~ z`S;gCuBq3B3PU}F6jF@j!!ujH_HUYy=qsKFRYe!d*4Btjbvg9i4b!}f*4qghvCtFO zo+++A5!sv$G_nn&>it#B_tX^dH^Z}3e!dVfVkt-ExS+#PltOiLkKxj-3UED#%2W)& zDi3f>M#9&f)z7+tk@W$EfZ68#BDz!u_h!>wLL;Hba?y{E^9F=!^JP-0=gypdn2~#g z9RZW4E&XBw7X^*Q{T3>Tg|gxRfj^%6`ld+y5eRE@or86$SK3tf^TayuGC^xjPoA=0 zA#0jVw39;6@t9g?sWWh{tn@_-A7u4s-MV5{-)immOsATw_GC%GhgqCN(SvV*(<%J> zq?bB&;V3v4iF1vMg|1dCIQANeX$&MxjS5JjIXy76<|!k{o)HywAngY-X62YwWlKa3 zr$(=70JRmxHBmM=9~k8kdpCTFQ0j0yYjhVBP4}ick#;kxbc$dn`WUUQ4fw-NBO*kU@rmHk3RF&y0vkES@{?YLa#ips`-AlmqpvVLaM8wt>P9bG zXU8AcZQ(a<>?I=ZJ-&-GAz9thn5d=+p&sT)_6AYq(@OTmq2}HYAUKEk@(@O2od$Ij zWFBc)#sA=Y1wsLDxkB|=if>xM1xnqa#j6g5ceLS3qrb3y~{ zQ8Y_@TZTn(@31D;t8n!a_GbaZ*W*!QbOG-(mb=bO{;o62F6V9w6^b^nQy0!ZP>Ai3 z9mjknbjsP_5<@RADd~}%kK4O%ekj3-*SDlbUx4e(dnBu=Wpo#B7{^J$lv;h>u2^e? zS6ey!COO#Ctg%q2{XYdJ5M!EmBH*HK8tTR09H<0i*Yb=bH=XC& zgE?^WujpzG>3VKIxn@Lgss_ZJApaHQWhj6?4U01Esnh0BevzBvuvxh(fpH)0mbT)% ziCj8V5Wos5eDt^}%@qWM*zLWEe(a=DIbyfu2@GI`7>oQ}oA%AY&72N1JWmv0U>Vjw zMkMSQN!tB8pq@OhykP9Qv8k-8SYS^G0B%s>OzjcOF1mPv9}YBX1WrsFmPzM66;3-K z9=7H!Dto1NJoM&xtQY*;(}1Kn_9Q_s>`sRh@1hSy#70_ z7N*oZjtGI>TQ-`HlL79Cp$$*A7~(*|vo9@wlmJ!~@9hhWhw)chenD4_}9-%RjFsV4VB9Rnm|YshbV= za`hE;CKF=VL7$tIz%~ni{6_Def%<_W?^JD+C$2$FES37hfot7xWBbgbt@FFSOPthY zLG{Piw@SvdTWJyeqlgWCCdkHS8H^iH8ut^}(%Os3GZ*G(spV>!jP5I#9MddTq$5j8 zr31Xh7*7s>r*yd?uA@UVMUk9 zL+%4T+f*KR#=<~%9ue{R2D-9nDPDo#{Jt+{mh22CK}w?=QLl%SKv6>d^VyqXqmtu2 z<(FI;2nx@S&sS>zUN02H(Fz$693(fwuJ+|)ef~w+?Q=&#uZ?7~*WxDq)ZKXcYV+NQ z?odUT_mLO?LW=K=`GZ$8PK@hvy5EksMJM0c8{y@d2lJ4udx`@c8!)`yn?5*yJr_cB zcyzuqqPOxhO7Al2R{bfT#MS7Z$ejeQztVcuhm8@QJJDnDZm|mI2~o1it>*BA!z&3~ zRvMBIHb3qaq{DpFYv^f{W^y&*mGn zjA^f>|B)}*%e&UX!8*lslj*rFodjnKAL(}z;@I=5x++!2G6L6}0R5ZNWLIZ#-# zqN}CP0>K6N`$~|jI_9yN9rjLASB+U|dP-ed3EcvHwfNhxmW-dvEN(;P+f zSqMhsy#&=K6t_^}r>|bR-KYmsxQyhAvr6@>#5 za-_i97DQJrkUgSlPY)Z(<>I>!BZ`OS=JGc^zCq2ZkN-%vG1WOo*x6M>@E^9ykVu-6C!2Na+R0`vByHcm|6`Y6Pqbk1XkouB7tv%gltSC z6=>^*t!g~?)S_JbHH%YZBeK<<aQQ>gf~J2Vy@T-2ovx>Eiz(*7D8`AZr60@g4H8vw_==$amn3x$+pvdYViPa? z-b2D_uNEcpf&h+&H8@Zc8-3&HljJzcSbs-AOA_9aUl4mzBK+RI0VMPD|8i$z+KU$h zQ-l?-tq9Rv_9#TMJaxD+J#=fH70xBojA8>2*|pew1~b0DBGZPVZ)9#Gcm!EvIV=KO z(71K+SA~~S!xyUOl3sQ?a6Va?8#Ip3+`3&bwl9_+N)VY^NY0HNt6}CoB=A9!*j;qR z%Vh&UUS8x6wI_eVyw}^ONyg47G6=TZ@E>1)yGMmKbl|?7mGyrH%c48c_K#Xr|==Ig?i9SDfdZ)$%`|8!=FT$MU||*`hxtS3CFF^0e)Is^7PAr z4qK*qV)rsUeM*myamvN@!(a(q(< zQaF~O@O(?%O8%+H57dvu*RNmRM}Pf&`sJgmRO2}($F-S;&$G#|nbACtE?$GdV0JKS zW-{Kx$qU8*;vV{c`WgIoA6c!dmG}7m`+NU?KH((jPsMs8DKN3vuFLGnxN2j^hBH9V zkGnGq@qAS>gBYdnU8iLSWi(d3zjw{&Zj4sc1}Aev_oC+tf z{vLWgOG<~GC7DbRhW>AXRZ9lTa`gJ;Q8m7U435O!+*_o$pIhOM!>4Zq52L3RhVj2q z8BK}fMmtQI>JU0?E+*cTBM66DWvgq;tGiC3VqcKZ@PG2l4A(?|GU2x|?HICQ60&6&8~^TcTZQpnJ*hw? zv(|oLq+`o-yQ3E+<)$1IkNl<}hMQFGP4v7>ZC`n-jo} z&Q7FP&41&hpKgsX$>!3no#G=yi?#c*g0Bu;r=+?^3zu0wKg`w%=>8(KOhPoGb$h;? zwjJwHw)5lh_HaN)`8+ey%#s(o7Qr<4n_J+8=lDRt&<_KGq6!%J-HQ_JG~a0Iu=nO+ zSiieAfQQuhMZJlM_T=9G^f7U}5B%d1-p>>y#3i{!V-Qu{*wyMMS!78~E~&x#lMj95 zYCtmAX_3ML%ZVTLJpPbRqT`BxJ&k{WEs+Ckm-MFCi-K=_q z`YV?pj6VgT0W(>kcp4=IdGliGVU~X%Yk^weSOa*hb&bg@qSv@vL<%&#b9=aSI_5@? zD|LiWg?@wsDi_E~bFu11=btl4eLapNXl{%w`U-DK{M(EfqdY098Psi(LUTY;-(IPcc37yAG8L*XdTBFHKD|$iepoT? zJSfpuSs;+mxi!8VSoh&Sh;k&Q&zlF1`kZgx)Md^b!e~%CHJ{qGw7@Qs{pmTY_o9`@ zi47d~kUF-K^+f5F*PS$SH@G!rMAhX4?Hs7 zLdA>=pm~`;Tj{%dCd8Ddt;imO;`9{Eij<*}_V;&IJWJlyM+FCCpwhJkN2@pkDv2!@ zcClvk!3xNUl+mE#u&|KFc{RZB@{n^nrP%0Ec7wLtd|G(#Tc))fFIS2aso-=xci8iN zm&q2dXq0cNEDYCZg;Pn1vL99IOS7Mce^2?nid#C-@FU;g-LOVXry#4_`gAPjYjN~V157Z{P=#{c^C2k7XgHmp(;d9gM zpEb3QP0J+a`8N<#-K5B!H%3aZbS7$WRW8%E=*bHmVk3sPlsvCe^cSg3d&;!N`J?zM z$UDx$Go(YXw|Pa2r?x6z7{QLs3C0g@j>u+yv*RWVj#%zq?l}%MOp=bfGDg6%Z+JC? z;AJ*c11Fc!MFaU|uW52{I#(N?i`#HqO#03DnoHU-KKEzEJzm9jJ{bIX3kGbYn7>AVE(nkU*}e< z$RwScYPQp8H0)L(P~6M3Rta(ZfFCIjKgn@hOHwIrKO8t*k1Sk6NXFhL!2Iye?3Mg| z8txm1#~`l{1Z3`cPg@u+OvnX1ues6CpAtC zAEnTF_BrRXOjIWl9ZX72?EYh9`%Ne~pLymQNv9$(*H9as(qN=F$Y-nxj`F-}b~c~7 zw8CD$2@#_G<@uXl7PO+q_=|&dFY)mkql5xQh!CoC!M;n&plIyS14Wl0vhb`xEl#UB zqxh!H!%oe?g>lBM6CTwUBcQzR${2TSUR)$nbTnNTqC|iZLoF+wwZ?s3wPHn=o+#@% zhx8MHf ztA@+YoHNc`bSr^(g0>pdt%MFA?Xone) z9IhW;uOmQ{S0i&e6vRkGsVn()*Lvf3CWBtCh9>PrJK10f9So!0;SJ=d8yTfhbJ-#- zE0s=)059_CjR|H5Xq`>G2EJ(8@gTa41&^Bc^&p5@`Zw@UkC$9UhqcOO(<@&3WPEvg z_0}eNx&C;!lw%hwyhdhtxk2UPw`qJC*9MmM)Efw{Q(#E&!}*yXMxTp!f^`ei=>t33 z{97Lp+v$bCv9sNW6vT#&$}giCl~Y>n>K?!9MR^X%RBoEa zBbysYihd5ant|1KA04NKY`qqkxb^`~)*f|KjphecAs(OpMpNhJYdM^O+_PR@&KJ^D zZfy>Giw^&sCu-SE;a~j{`M_gCULB{pJ9^a}p27c^z2A5Bdf^AVdx!Q2Hk9EZoL@kW zT$`wk66|E3ZaRbdZ{EPa|EGtE!a)Rsb3KEz>lJetO<;XKyC>gha8DCZ`mJwqd!U0| zNSVi%K22wSQ`798ktPp`v`E{@eGK{SyLSFMsC zO_NUjQnkrmN8(d{B}qL4I$dd#O*)@b`;Zpu0Y5!fi|qOniw3Sb9@gzw4Lz&$RjZo< z_Nxje7}LMwI5y>e`#dG3mT3Kp%RiGUR45%kIWH*9ut`7Yo!SIZ`CU=kMYh3HNlO2~>+*2lw!$;25w*xy8%~S;HQOe%OzwNLE(0iN-GORR5#dj;(Vd zsOJlJ%yUNd;7Xy=w2RZZ@!>08_1D)*53Z9W5)deiUu(hFT4NvRJ}@|SBu(HtBGtV6?moovMM4cDOtisfWq9-N~koZ;QT@DLpJD5+b0br z8sPp^^G6>1ZXusG>H@;kM~XrE9RcOoW8omAI`by#b%)xxPZ92w>~|&nlKRQRpJcAC z8!q(mrzl3v&)=pbWQnj#UI(5t8|4{z&>E~zzMH9lsOb56r8Y%=K6DbSryAAT3vzy; z#-jJ6C?#i@>7@Rn(#4HyAEIJ^1Y-XZd#b%G?(r*kdVMq{yP9lS>rcNlVu*)@Jj>OT zWvxmQ2ZJoqzoNz!z>FSh{8~AcAGr9v)}AE{P-J^`lqD+K(5e2|DMvR!auJMc*XH{^ z789!3)?RDq^1OAtM(^=kpG4poVZSF(QWT$N-(#{b;zg2w4ApF16RqAqS@B{s1^%)M zhOv0ZUXuOQ#d_5wU%gkmux_Qb%J z(Rl|kj(3r&I!>;#lMWuZYNx+B{$}^q##@#!4vvVGP<1HG4sCm-wly(n+$E<)oH>zQ z$YlFPzzXa4d8cuQT=YrV)i=NML)wLoF?Pnn*k^NP?)Wp@cF<2-euCHfZ-4rw(g&^| zd}tJUJ5`^WqUGQtQT02W%b3699X6{i(?FuD^LF<|#c>FK-A|Dq_KJvy?Y59oofnk- zZp03h%H)Y;Zf#@uSpr8lzdrOhTR&%}fSKAekf0eeDUgaWJjChr{di)rgldk943UuYeuOMHl5=CWC)J3-= z2_YfI**6@qCv>IW+3gNxFmHIgI(~7~+xQZ$9jr1TX2`FrfLB?yMQ)Z;>*dFYpOAcL zUiK2RM0tmUf>Un)Qx4_wP@cWYUe|h@N}a-QpZNv1pdIXLYGX<)bR$a1W^T_yT$P{5 z+q6`%Il{LHvOZs$5#4H{);~r29jfUvwQN-mh7>E(ko`PN4~6}d#n)DM3fl!lmaG)1 z+c+p9f01*$Kx4K0rRUbS;Wa}t*ka28eTb3i;R-5ta;q=s^*-!-O_a(@J7w#IS_@fH z?c19dAsyddg^vn*`tMD181oNqnbW64J(J9ok*FwVbPrgo8aOICG2&0%LHp zUmrpE4YPY3XDO{c&LHSbj-D{#f27KVKIEV)Ay^1igDDVs)*WIW48ds>%HBk|1?x=27tH-=Cf?%R3f?Kh(8@@5Z`yBx?9jRW zmJQWMa=8UyKDFuNv?!x4nQAcP1`ST9ZYU?P4E6*(L+DUQ-W1=~k1-MLxV9uOIMq&3 zSk5&jN)wlN@IJ>3-L(vAVXAk+ft~fbx}N1%D%IY4Bc)6;-6m{;dXp>ZbZ20|e^6W` zU9$m!^m(#;+R`Z-D4w(KH}%#Ln~kZZ9|RT8cIGSwd1Q#+ucGfw{AF6N9aCZJ7*zC8 z*%Wuo6eQr$V5i-|w}P;`xL-@()X1R+LE;-WX(My0*^>@@3_z@+uX<8v+lL7bzbLnUgv_JO#brly3h{73NP6A*2L0x@oiq_ z%j$lS(5KX?V|lx&a$mV~O`}14dMsbL+UH$C0Rdz^^HT&wqchM2_FdhmZudJo{rp}x(o{~9`mf=)&8^-PQ2>CKD>4N0V*axtwK3SE2==oJ0nf2Nrw zV4e{tS}747?G8jNCQIc?he<2$@^8Y_UhRvvyW+v{eGy8GN(Md0U!ilPKF7a(L}bhR za8-y5E@K0k`t)swmoa$qh!o#J>%q25&TS(5hjp@oc((-bRp>tV9d~xAc{C?#>1^eH zyyA6}vkK!p8O3pkdSFsJe${uwWTYA`rJP=WB`15SuH3|R(p4_T(s3Uy#F-G1qBRN<9Oe>Mmumvw~ zhFBNNuxa9(ZBh}U41ck&rdKavv%sW`H;73HBQtjJ{7go}&bPWiA;t6J4hOpMgUDU+Hhq{C>9{eTWWE&;$2Bc=+G4y!vxJ6rRoV!$K8!rA_ zK~@3V96&ie#!im;*Tqt!=@N~t+tflmNWPpon&e2_UCi?aE;<=;3ozZ6z?JmpN}2Yb zpiR3V<=)1rUEJP`nAd7XNrR_POlk+FQT~EU;ovE|-r5`hn3a43KaK+%o~$Hz2=(5o z`U9H%5;1P{42U}n`fGrHkZ~j*M|9>(wr&}W5n342?4980RgClND|owVAF zaiR6$mvj>2u$hF%GIQKBe7Yjs5%ZHN&z#|-6F%t!1;!TlAUCHj!$PflhCkeXIE?s7 z!t^{O?M}%;fD8R#fOyaczJe>9YPR^uq>fx8zPD% zkSruWHYHDHv!8mO@5mmXmMBt5tnClA{W@Du1JD}_L(bhE5q7m|0#gk~R|&q-HM3;n zgBz7Dt6`DofvWY1Mv>nW<%B?_hlo1tSxYj`qzXiAhg605MFu~s`bm}<77vO>Cfi}^YJz{Q!RO*h!}Cn(=#yiskn+5Ap(Ub|2RW8l7?# zt{8$(gKCPf?{;RLuf0q?g$*BUISgl-8V){uJL6yWp7_)!!1Q&`>TDv$%aUqn|MMY6 zm2Z!=EfQWazqooy`>!i60Lv!jhKF6cL{VJvwp)a4Gcmq@T4kvX?pJ9v={Y}}k$>L` zEGwLLGG+x^pbPMwo?rN066QX{9io|dZ5cpkcqe@Hy-7-HH}O32E3Swq6vT$zC!5MV zAUd0MhrAN=4T4hPd_!Me+cZ$j8<~I@F6}#Yra6p{u*={XI!m>8AUhu)hMO5DRX@>f zuUIDYg1kx4o{LAQ?NTe{F`v(^Bx+5Jjd>%Gdz}UkNAg2e13~JI#^bEK?^N6Q=ZR+& za`LZgjx;^l1!YLAlIsWGH{c0kq3$Yl$lFb#W%YQhf;r^R0(dABmXnJ4uq}k0{ zR?3*0z4d%{@|y*jVoyyrL*RVR!qLP-D2XKp%q4{BQ`2m_w<& z!B6PSfU=2qUiw{(Qbn>`KqRWRmkXV{-eYe&(V67=gs#=RhTVtge;+&KXO@6WS-q}o z)fT7IGVZibGH6K3IRR@+y_bWIQ-VMELUc-1?sKX1^XsL6gF@9k*ewu(q;-~Ba?Z?T z5}E?1`H$KCFtdN;k3MegwTZR21mHh#W(rioA0N&bROlsogk%^h+3xNP?yg!@oIc(> z3M#ENFx1+stcUEe#7b1XWI4qxTnU_&D<0@SkaF2vto8(aX4w9vBGHm%1cHGV2cr+U}EKdMXZ}Ei~ ziwCW82&Ec(QXo6I*I>;KF~gG&&VL$MsZa;MFuENs(1JZw_0oK5@s%1%x{jAiLmy>X zxL!M$@8Bu>foRP@oiyMqZ=5xY^I{Tps&9!#40q(95dFoBkBXuEFplqd4;4T?bVBED zAwLo#6L?4*_gykJ(@j2BU#-qL>BB+@rl|6Xn2!jiC&`jzJMSxb#_1_&Ev3pu6$}K3 zM)uE$zk!fZDLgpMOS`O8RhhN=3T$a2xAkFGlFqGAs_sgOwPC6*3SGB%dbQ& zOOS=$DSh9Yax-;%%)x$tKR@F3w17p$K1lkHmRG=uJLe0Sz0%Yi z{eaJ(lW}5?ai<@z?SyNsS?g-<={JF4B*GA2E8P9SD1Y@@1oMI}rT_7q!W1+nbbIwm z9jB(SS&1arw_P&Yr!A4E8D=SItRDgv8|f9YwPt~<{nvRIOp5iFN(!6U>4GUfrR>H{ z?H!Y4uAZ$TdkLJC>Gqr!zK`3lcP`$#*RU{884-^!?<5t`<*Pr{CgGP)t#w}W=SeQm zhXH)6c2P-mUNbqLt)b?zV1xJoH@4=vj1isA4O{>fV;aC4By_2~>c<~ijP_?>K2s&{ z5+=L#nC4>nHDuTfpP|1b1le2&1l=;~4D$ z(Z9h6onc1U$UXS7(CB21TfsEpUvW2rQK=@s5EmGB|L9I*`+_kl<^HB^8yj)ix=I$h zVt@M>b-Wwh;U*GMvMt~#%FZ*ypYf`-y;hY1SVS#K;`&eT+BcRvcTK)_LGzKglP`*N$+6!oQh~EJiEz&44G{1hA8% z{FjW}1$#Nu?T7VgY#wiQMpfj*iUZ8L?Z(bxm6o>tfWuCv4xurkFi9Y6I4w@WsfIacTVFh)h zS!<&Uv{iEDQjrCDNg%w+3Sg-E3Qx-U3xCVAEjF(|1`D%=bT0knblLn#cTUq zQ`jSl6DxwBGi%j_Didb?@?g!01$mZM@3(>y6I2>1xusEXpOjue@CIJrqfNn|dupzo z-K69o$eP#x9QH8`vQACpEC=zpu$$VVBbF`)U6MU1pq`)4hl9V=18~gOlB!^^?6n*W ze5G~Vk`%_ltM!6|qGP9)r5GjvIUj>6ArF%pqd#uf(ujwyNs)FPRNOTNCR5L?hAj0Y z-;@7~OCch-#h;W|`nwV%jxpJv~O1h0Dsz;*e?~Au)*Q8l@+`2hHN@khg3~cExYe zs^(wv9X?Hx+o!P#wpS+Z;%XK~7`A)Xf2<(ULC*`FSc7AteuxKkvBke{$4PKs;o0Li zj4p_zkL6T8UUwVML?6A-=?zU7c%7uDv?c&bttF6?9Er016}tOy@MQ%hOh^Brj8^%E zc59U)e6-j?^Ptg8opQDj)Hy-e6he#@V;&OWY zE|AR6U40PN+tUaQ`j`5n8+Yo-0(D5I8+7tCxbM7uEB!O)S~YJ8DEz*L$uv1g*DzX5 zRD6*if@B%U)m_*3v%Y`yd#`yG=)%k}CV!S2D3MC}-q-8|J^Tf4-#yPM6YT)q8j2 z4y6vZr$vGW>guBM*lv1q9oRRR4F}Xswt0|>C?sBUJ}@Yx&B5k(hJ#>y=I5stj-s{|sPdBjbb0t|gVyXyjd= zwNY)kE|(?Rf4SEF#}9l}`WZ?xo#8g_tVsC)tnp)B?_MB?{kZ6?N%i4;MV+?qXysi% ze~oXemO($OG2CU78zh4cFD)*wg^jwJnbjCJsy-xHOgTZ7=0ev5KCSn@`a2QWh7~Il z$8!TD_|aCp<0W{B=QT`lV%jzACsZ(5w^A_FnW+Jo8B7N!2 zxz&BVaxXuCY0{9Bow})vG%w2mOxW%F}xkK7MT%m!Km!yU5Nq9(yz(Iky@{TW1zb$O-H9g zNqh`jS!Oh4;UB$~P?cXSmp?bXb7B zQAW7XM=I4fYA3;8A7AxY#*XX%dOpbg9skB4`>o#NY~MwpPgh<@@_N@m`r0@Nx9hZV z76*Gm2oYHPDPzx;rkk|`4A32~?VgaU7P^dIOtFub1>BpL0JC9G*7!DGLWFN3+Nsr~ z-jx3(M)99u{o9DQ%z^V^e7F-Na0RHw&eu9d;LCp6|D4|S)U1Q=HmrP-y$eemlXNn% zx7NRMsicy8O;2a4WH#XH^XOUlOnZtYqrS^^Pr8S1NovKkf-~Qck+!^t{io`h=;uAc z%nj)6;@tYgWgj840Z+e$p=Mi|x1V=Q7il!rjC3_Ya;WB;R3hQG^m4}lAv;TPH3Yg; zC5KB2pLs0g+KpMU*=KIn(zNy$^MU@QjO3q(;BA`JcPvqtL+=G{xuyZb?NJx1u!A7S zT4K@aWHwfT#O0Yu>!i&)v#n*Zo|Z|kHTL~mqnoflS&~-~cL~_%OLA=kB415EBjzxJ zH%PfHquB#Fkx54nZ7P*D z($SWi*VjvaUdT*(xNl*9M+KV&9X}GETv6k>jku_tlpum3pF-QrIUTdDgt^>2VlpZS zY3hcA_^`nNmVO-)K92NI%pynsf}U~%$9JWr!;1}XK9*cf+%m_S*M-Khg*vPd zX&!M8j9`3*{>vDS$|n*ff}nFW9)qKa_U4xJ=+PdM{!hAGa!xjX!UijA;el1-~f@CD0EJml!=64m)JhZsKFrxi&G z_7(?O64nZV`{WRtE@Vbw=3`4FzM~i?6*uz6sUlE}>Os+0l;&d5&PQUf$An8eazVJL zMCHWw+}iu-`{W_Y1G1W20!5aFFsqvxWE<+c$f0*QBWz}L1H->W>^=JSwQ5O6AcY;D zlV`zYqiA+=9ms(LnvMo;DA}L93b5!&klqZ(?bYzGq#E>g7l0leSV72!w_CDRNL8sG zt1VkKe5ja2Cf=y)fM9#FexrPv9*?+yj$?fZ@joh<4Q~H2z*+$0`ti|}<^^H$8o}{7 z^Tin^>MXW2m(UhK{mFV3cXu!A4#UJ+=wmcY(zNL?GanU-D;b`PtTbBv16>tgPo zzT}X1G}yCW&lA}oq8+xAM}v<`f_|kLZ*|TMsi3b`q9Xk+3dyKbnI!3V1w8O!O{xdn zSD&q4s4qJ%t^F{Ri>-zSHF?d0^emY(Tx-ntRf#2i{PWs510vM4N{B^jg2hM;w!E~U zzZ2zuUdjLRij+K7YfbNIPW+q5pF2ST_StrxF{Ll<+g3WX3kCsgBw5TSow6y{0BueO zkfw~brNER=&%yl3iK{0%wcAl~O^wjQ9;sX~aDc9Yyk0VvGi7#EnzziqOe76FE*9x7 zK>n$Zlr6bC#x)alciGj0DdLv)gT~5RL3d{CX0*$xdobBq9TX49Z)Q^4HDHiPzs2QM4UN4a3FE6bX^g>~rBfn92FQq1>52v^RzlZ8R$ z3-g(SX<10*8^N;38`}|rb@EpXZB0IfmI|uNhKYMQXse+Fn~`6_aMpm0MWJ-APN(z? z=HyM)=f+=A#Q3gke&<=rsmF+dhcgAYFpIH=C2sYE3Kx34yhd61k3>QJVN$;W-6Zq= zA!ICNn>En~_s++(sdO{Ed|t!#d&5w>Hp>x>*f^Fa!`mXe?3zs@7AX9LRHc*9dKxew z!FJfFmLGhvALgr9!?3}4k7hA$ckF$*t8zfIorJc{*8vXCwAR&cMyZ>%JIpU-&e10* z`-@(Oh+@}HwK*YYqv5wqD)NBFIb%3I9$pNv zbjJ8}aT;Zcl48z&DYWn8|Lz2kY@7i8E#2T3%t7|2!ptT49||*+x#eEWLlr8~`nvbl zK%spbTDu=H^Eke_CYjhrW=nf~=vDb=l&O+^BlWdJcP+jF(Fcdc^-%mFD=C&%BPREt|)!6lna) zSB!PK!f_DLDO9Sj*o6cU?%!bnWIyl6mA*C_aGRbiqvmP`^NSy%g!%2m=>atxH4DGe z6O>*+TrGf{#>#B?GsPGrOnu2(;~m&S@!@acGEVG`Yt!^@>2WihgymLp{M3B2kgm*D z;#@wI047^3RRk9gnJ8>+ettgaEAXu}cQIOO2{hDVrtL9eDG|~4yn@x9$R%Tk*7E&0 z7g4&y07nj|64Vhg{t8l*LqXy)SoShj=~;3+tmB#Lng_R?w%Hj}fB_-LGIW@>b&uC&Qp24L_Em?|*C zSamjrr0c+_`Xubhf{Bl}14cq)+qoc-ju*32hKeazaVb zl+u`sZavvKOmSP6i;L)be8v*=U(U$nZ;F;?>MkJxyAz zGEd53>pI?1j-h%o|Eb@ObDV!PSx;_YaVzvE7Ox!u)rG$bDGW`_<=D*>-)i-nl-5e8 z?|DxuC; z$CD>3LmQT$xzDB9k;32v`g~1qS-DZ;cK^BwF08?Rj*ds=!&1E>#@hN7oK|%ZN`^6O zQzI!meGQM#Ee10gx`nw@t_S+8NAtcj6T%|z9DVE*Kf^w*1yA{ynLX@w-?Mibuy~Vl z7f_*n?J$D%Gxd^l+_Ok3KOGYOG<$vwUEVsX+3v(k6j-f~hdT<&;Eg@`C0C^BufqyRJ@6K;PU@U&L?ar3%tyn+yMnPe z&M!}GSDM?g`8`1`_q69S{S-}yKYfG_=k%Kj#{0;^*ouNCvxJdPpfRTWGo!TrmRee7 z7sw+Qqg+m)4QF&73$Z;!)9+(K#^$**_hqPbZhUt^Qx;I|sEgqPQ0YbLUGWrKzdbIy zNs-=8^rE{jD9T9m?7reA4GC>!9O|_(cF?XfB5Y4HauMn{_Oo(|Ckt{YFLIhhUt%LC z?6L72nO<<2RAn_iX%AWnU1!jcfwokuS^8l{C)pH@qpVH5H+REP8MIjcKFW61BEGZL zs@-L7W~mzN2a|8SKxEiW8tCXw&TbQBh^TAj)1O=%b*t2i3XV{u=!I!6i?AlQK37*R zU_*3$6%`M~Y5Suxj}N2c|B{s{r|+a`JC^wnMj`l?v#rlWv=b>rq>DMNS}e0ac})(j zFjSWso^MW$=WLnK*!PqOTRr0}Y7T}h^ETwD8|>&!kVLp-b?$`rQ?QL|`5ZVGf#@F0 zWiB<1dGP(1cn?m1J^FZV6)ew_K2&z~%u^Y=7EU^zQFdB!zddNv8|cK^kJJ(Utv#b(wWBA@I=34B?JgdtN~MiPOQD*fDa@{dB`_uRxi}l zvILpq7R;86MhUXYG@yom)cDyO;v~ErZ?zwMzB_{;CG6rK6?WVvpHHLZg9@1_ zL#T%=#BHueQCxT%3iy|ZX#0(#h56B^?8r_ICf^-qlhJpC+in`((x5F=|9n%_`{uj- z?G*;Y=L@!Jh}o?E6=O&JC6aqRE#eDQBVx)qk29fBDZBKoO7}`)=&NNJRdp;^psKXqV-spZ&w1#6u6zHf|=!=~CH- zgs%L#bqR&znP!THt4R=0VH;sTqi%X@O8PGtA_8+Puib!0Sz`Q{P-f>Yyd8k>6JjI? zZBRAZ?J>Kv6M4opSBop;B?D1>Or+)_E~>dvd`HCVCvE%Tgh6Sg_;jtPUGQuVb(Z|M zsBKWyI$(n^=Brkf7hc-pgNVL?_2rIxV-_RwQ< zB=dxndZG8d_YnNJEnTzk3C&_kZmUOb?7X&?k&c_*@!|)Hv{Y(266<`uG(Mo2EgVOb zqCfnIK3>YLH|Rf%Q-=Q2I3?`2+GMERMW|uJQsIpNl-F13v*&N3m2v_5=-hoTvp?FO zkMh&ua6HhkX{rp_wjd|h8k6I8ay;r?U^)~j0_QZ4(_5aip8G2WP?H~SL+5hErPe;CoxFaQzMcWF$H1oB5->q-{; zT4lq55dg^foUPD2pDB;JY6x36M_(`nN^aAQ_?YuuNEZ;EjNrXI365FYUikaFK>yVy0^+s_C9~UQKgElC zNmW+LzP)*(DUn@DvB=@_ALRmVkS6r4qvUbWE0fjKT{%x$cmw4)AjD;X0&0XeE(v#q zw_OW88 z#Nk$s`}(nTv7E`Gc(8q(1ZEGNI2y|hD=VKm$P7>`xS0p#94vB?_O5xy~fTH+W|?q)tdCQhWKPl2)1LoH(hXqf1ZW4e|lo zWQdqupS6dI@?kYw0sDeVVFa3i^Dz^0+LCvA$1KF08^K2t9qwPMUd{Hgudh#%OnV?H zUy#pBq>8x5XQ{}5I~NqZn+rT2%SionW*ABA%}@_;k_7aq=!La&(Io62wlRL~SKhn# zwuraU&7HP=ju!);@OLyPU+l(kJu=03Ixo^&Rv`vXH{_yHDj;C%c*dccu#N}UoPc0u z%|!-v*R6U6X&<(6W##*-?axXMyJ^-J9b()b90uzOiA!wHJ`RqLYDaiYSOn{L^mv4emPB?+IKJXB zRU!s%^-*^=sm{_(5t(B!CLB){9*lkvu?9>_3~WP|O|F;rUqB3dP~2~uxLBTycfsb2 ziGl|I!wY~=d6vl>jd4=VtFIA%M@Mus>LbQY-@X*vQ$bq#Nm>6XstDunlA$HB+$4kc zDdDQJtkGCGuph^Kk6IAb;}^Av9ky&jf4(5xdxg2}(LkMk`eZtxrOu;OT^f|J%?c^T zF{I7v6N4xA8gt77v9CneICSo6c_Q1q^$00yB9TJfc>y*!N>Srr75tD_c#;#DA*wgH z)ilZ+Tc>koa`8UXb1|XbAfS`l`*qgaqnkie8dmF-J(2bxp+|aWZE0dGt_RpNfCRJs zNsY=nV*5Ggf^62+#;*o8&-^2eN!_mv{ErFe9Fz_X&0izW{auO;k_zTl0X74Q*9Xwc_3a@iG;qQ?GC+oxQhGig3?<+B&W$5gAA&lg~QDdVe8=PNi^2(8Wy zw&d5E%eVvViIhztz!Y|kie4xGiJ$i|K}FK~vlHLb;B1o5ex>75l-RSk1O07XfP&)r z10{pWPm+F#&(_MX;>GgINOi9Xnlf?Ti#YAg8N756r$Z6y_|{e)n-AtKGAT zBlRARp%OByksEf%c`XEwq~VImf0F~cA-ovUlV3sEh!noBiw3$-clxemM$UhAo2U;Z zLbW!ojQEZ2^G*-fbsEU;U(!}nr7g7_cgO8eJm*HtFOxB$`iX?;?id}T&9}m~?MknzF}-enwdw;L9`SxJBJcDZ882)}wqv-+HySsAjKjR5Y7B zKxV)Y+AYET25})Rc;%C^G513{L#_SS9bIGY4XxSVA#Ubf_J>)obEn?1vEjqw#NLYz zdk599ZHDW_1Ye-I5Gc+_kZIVjvuQVbfIOLq+(mL2VCyVEI}>);Z}XSRinI zweB@wv2~loHspRFOl&v#YGL=mQr)u>)vtiiPej;N{gUT9QwgNP6f6`6k;lK+7&_OmQDV!d}#2wGZhmA?WXea(mxs&kdAV_MP_*q6!dxxZex97Hm>yIxIbjd;U;js>nZL?20>YhqSzA?9PGa0CsygIlTnsl#pDi(KO6}Je?@N0k? zM~C3MJ{xtp>~1rk=~&qB3%bmo&$kcYMwv(br#+U$fP}|o?yL6qq$E)q z_TB1VzXaiyG)-6!jvS?Lp|xCub%_n6e=(tGJYt6pa(7l%$e(MFwPL_X+NyTq?pZ1m zTo-W>O%GYBgoipB#yt`UXrXUbjbG+%syHb+&)M&VLf|v7evL@Z93(MuQt#^n*2b7_ zMdAxvllK0LL>{WE(oKS)VlWn-<3nrX~5e$Cs_j8R$NirAxYph1YLRpzx(E<%Gc9x)Kx1%QXVsJ6tc z$K;NsW6kl_u2g!exx{*?EN)sp#_zpdt6i9UCO=zipX3o0*Dooy>?=1~*o|`{{;7i| zta0jcWkIcYTSSy~e8MXm&VE|)A5LgpSg)Ilwg(c$w7=V|`oAFGI9VZo%L|F$$CxYK zu6aG<a0GAT9>Wh2@0U^`dQ^TlBD zOhj{rK5glJZLpu(a~N#MXi9nF-*$*t2N11|;BWx3m5~60X` zb#+f}YcybdsyboO)J>5E9n;cwa*NfRcUNAR^rb^ue^sK~ieG6YC^6^9@gPdHHVS|f zB_xuavrWZsySkGW7LTo2d=PD2#}}zS>pv?6<2%NUZr2X0pU|HZ6m84RBF5fGMHAwh zPge|0ub?ODjG1Zl*A6iycU?2v#F(?t2V{n(%$-lwI_zl^$aKbm!g?tit2LCT-FcYU z7bN=(mz}sH>K6~L;FaG)^U!gVqv_G-xPm~R>~t`%({^WF{>M~uMy}Iims!z{54-0K zPQ^FfsvA$;94m7cXqpBZdRmZ_JI5GXlOXHHi+wW!Pn(|*=kYxj9$Zn3Y}6Y90#wF( zU)B3-&7FOF!k9U)FmnRHx}n;Oe)JnVVR<%a)=9UaVy-ICxpVJ_)e7x(iyKct)V4y> zym+EWU82%E4&UQ2*g{y^A5c)+S0yQ@qt|Ar?n^X|XWX!IiS=_6s^mLl6nCvJy&hx? z08J#ORk+?W&8vA1M?(0*G9$cxX51~;6}CQPq7tZZ_gG0%$E*tpOY3;Ovb@M`6@YtI zai~oikQ~U9tITw&YNhX0Mk(oviv_N(wNQ8Y%+xh<}lywzN~~|nt8z~vgy7nGQVh&dWtw0=CY5Kf7Gb~1#Bn9 zmFwA_oQs*ro3#)ZS;hGn zIQqO{ulR(3dM3m1XGiMtYZLOlQZq9AcP*6{hq`EidJNCzKf}` z{)dIyn7-r_tFVfRE60@YP)O_H=c8Fk)qG8ENKfV;q?BH@w58KjZ3Eh)_!9|Cv6|IV z;tYKou-n*Y^PgetMPup-OAH%7yBO;>ZtR~mKEsa4VXdtC*7(6RY<%0_V)=4EXrJN?^Y z*1TEN{oowdbf%3Xt&UUmDe{7=s7!=B&5v0NlpRBBR@O<}ixbCI#tUpu+sY*N=2Yhn z!HB#({mrtMA52C#xJ>7tdRVkE4pqDXd$b5Fl|0ickvRvrUS}>ut!Mozg+4*<~g1$n<+Pt1@=t zfvY+LQu%h}pL`=y4scRJ90~8m(+Hnd)}rD99O7~MdU?nfc>2T30s_!8SZE13iARQw zrpzi5+zE<7cWk6HfSXTzP@;g(;V$mWq&e{(*Mc&k=O-Mr6aN|TsEU$m>}D;I3^eyj zo%Q?6rb5(e7vdzzynN2jPn-;Dn*9IpD9qe21)TOgEq=#8?t2WKq5#`c0&iZZ-}^og z%6=wY%6Bf<(b7pow2u$rE?}ogmCEqC7yoN#Fk*9{(Qm47euH_!!$@)=^FCYUV7tGT zIA=5EcQa*3=nA3h(7Ei-UV~UUs71knZ$=}WdtUay?}c~>nMN!1s$jp#xiAcEZrv9o zJpD9SSZG8hRL3liZA(dC0qkMrIDse=U>%=ukOc>BZ>}6*S_Q7x%Vn*qCzOxK#69P>ya)?G z`(E#*)18d~el)5-&eg`|J}Mfxy)@F2kwR0{v6+p*+&iSSlKnid-$uxekxbv66^Ifc z>|#{$<0`AR_H7P-ovL^H5VzS`DZ-D8Sb4alR4EZC?2b&@JIG@@ADCT!MPSiLA-z>r zZq=VCQJouxtCZa}aGb*nb3ZLB!X||)1ghFU-hXR#uEf<}c82OaAJZ&6f7Ja}PRl=gQq*`0Q6m5u60x_q=AuYZzQKve_5u zL7ZM8a;{v{l&ri z%Z|g~fH)DgPOIu!Qs~ho6PwwRp7|GW0N>-P_8~VWs8;0YHU=RzI95muR}hb}pOFD< z{6HOD!il{Eq+fZii=p1X}bRfa~N) z(XkF^?D0iJsl(PAIjs$Xn=~QzOTP6g{(+~fZWp*eln!DN(DYc|XLNv$cB7*PQ^VnJ5yF4wMS|nxTI4bMR}+$5k5UyY&L1% z4b5oVl{;AidN3cyx%B2c)N+geXWW=O>Ux;!(oSDVvWq%@@yjDhr4~@Vk?lMV&kvz+ z#!$9wGf%iZw39fGR=TEDOb|W!!9d|{mJ4_hY|*nTJWFX@TRBiOwmA!0Vm=QtIMP zmi8TCvng?5KMGH8&xH!>bt+~wNvO$h#atQ9xD%XQ z%$ew=&P6B)SKV}TMq~zgR2cx&00k+$ues{9W8CSUod3qj4!I?SaP)F#W@-GEH$Wbz zFCsi|iM?v;=zZq>yg2n?>}OTwl_DGNB+0Yh+oe-;Aw4~!Iv}lXzH_UKuZUM=+F;hi z7FZ=Kowp0;(;$#>DXkS=-83ls5}6ap$x`C3?|*%)$~L#%w#sfUShvgjy5nhMGV4z1 zNu;hS!CAk_EF)UuMTh5PCquo+i|C}yKMfF9ZA)kgk(0WOudSu8P%5)iL7W7h)`A7Q zKP9NWv*Sm;ZM;%be#e~OH7$E|vdK;9S#9}ZO=RjQeO7cSl*=pj$N(jVJWnObdNwcY z6ufhx0Rin)o?f2?CN*s(blUF@5MQ1C5D`VCT|0V^=Nvjw+Zx^>>B;JheNh+C87q<2 zkg|%ejVGKsVso?~X5Z{+BPBj27Zi+oVKhS^F-0WTx4+}k{*X{dt=N{yfe$H4=SWf( zdLU(lQ?tyntE3&B#0&U3Dpwc`T~F{)>-nsX&#*%scx(tmj}`+1)Uup})1 z!;%pGvwV@Ou7WH1=oUESDxhMfsvYJKwSCjkiJJbuYWg&LqgRP(BfxPsY61BfuYU@iia2tg?dFE@0mh|teus^w=ki$(F1wVepY?Hiozd1Ue-^O#PZ zkSiJI>T{pAPK8}NPVDH8WvI~%@4;6!o>sqxx4oX7AFbX?{tXka8X=qCe%H6!^jjm4bIcGDkAX4;Q-xh zF~@@xEwoey_`yy>k+Z#&D9oE$hYHQvgC$AIz;m7=W*mOCa3q?RhU{UBCnUmGV{?`(JK?IJ@g(~I4} zq#?KRE%a;XH4iRs#~e9Za?q0i2|M1%RHGt*O@tT2y;T*1R|G5i#GC5}xA(u}itI_$ z8HXFZkjZM4fY5<3xEs>K`xL*vN3D-m&Ii)8l=0?p{U*()(2B)-YH5c%WNy#aO{;#V zP}{mCB#=d28f0gX3ILpjF)F*=>2=GTAB3AwLtpmoSEWB%mC)X{pw1G_TUgvr4aD33 zS`gw1*h$h%N&LFtr{FC~YPaPHNaxJl{Rga=zd=Y;wa25Jd4Fqg|6G1V^Ig|4pc&0z zqf1!ZQFL0bqKfb|!;vOL?%Y=QJK9Uo#?FvYHm(CZ}UGqJr%wR9G z=6+81zJrK?)tlb^>T)u1HLg*h-$91dnIQ_t;^3uGm)60JHKYm0grFch7q^nC81rOp zdM=y6xpznSZP0Bu!c;&{xmuMIE%Bw9Xuh^B`f5L|+jB|~IIutlHdG4*mt=TyMQ9%X znoc;hMUs2FGvaqx*}UVe8PnSV3?(l!=+E$t2F-@o3x>|sy;JM>ZPVZ#=K6i;Z(TQt z`tTu~IZ@V5HL3pd(!J9rpYL0fk5YO6B*zC6-)@={=qEh0o)+Kys^-Tf;d>fcJGUGD zgc)sQR0Zm0z8yYhAf<|`|hS64iEL?Tb#J2lp(p&RY>@IJGGPX{y(&&ne?%bTy~Qp}y4bY#(&)g(r~`Mp1^%f?fQ7)ZVpw6_ z*_Fmiyv6PJZdV$pgO(!^YNIVkOxejH9NYop&;flvr7)|J$7$|#`azM4Ha4kDOT6=^ zDQ2W`AKaF?qtI-c4W^e$)isry@JPI{%*~-+q8*5To*z~WY_>6j7WejjGWwf&{%e40 zyIM%ZXeCSYHX4B~rAB;*4au02YE z!|LxvelpmK0QmHfB#n5Dyv?OFN2yz^NBQtgD@cUCJGdY6x?&Vr1GWK zKvQb5pEco3(qnt;-nW|UrX}>}MG8liQ+Yb?ZP`On|Nca?5UFB56`3~9&QBt>$Q~zE z3g&|{Cd?7ZXuibH#hRE$bx&MGO;{><92GfQOctm)YE5r+^3a=e@om4#UxaLph|~`+ zIJH@H{?J3q3p+~$$&n5-J9Yd#Zf`euS8@Xsad>3;0r`fPZJiJF88*&6kV=%Hj! z2et?4e8qr_thE?T372n7yXazhZ3K)99$W|so;?`q>(r(K2pp4Ob)&!A7$m-ror{Pr z^zwZlOAE30c_|*P5-<~a6y#bTn@+39t2_sSx5#jbb6wl7F0p`l_HpnYz&HI7}J%u)McM4rJkOc55p-QG*jRvV&amzyvg)5#g6PE14;;b zIe}WGr+?LvQA{>vKkMB7^?qk*P%6-fX$rFv+2D$D^^?nThf@(&W`P*t_wZpao~V&c zuRyVvKpi%pjG^f-ai>4|f*jVZ{E|{6>@=3Lp|hK?oO2guoCLK2o2e56@yEF?(msp5 z8$kUorAATLBdX$d)8kcu5}y9x`tQ(vzXd{r@<3Z$*lD7|&)lDr-Zzn3IA6J=*cdy? zYlR!>uIeVBj&lqmWix6JySIm&%@Ym1K{tjv1207?&N2s~E&YND1{J|gs`G=XN)!Zj z8QDiw9k9)t-i0&PgUAq#C56;2(1tCppUvdA%wMKo@Zm-F(!1_0v%N~5R>_klqR+2Y zV!DZy*}lf<-<<_%z1OjSe-)y3OOpyExv60jC(^=!U9sPAKcgvJ$t$Yu+@CwET_8cs zm(ur|f0gjR&-b#j1Kv15MUU~{N&AilxIf*eT6DXok4LXUf$tN@&d%*jTUh^nAKq#0da;xVV31O9wV^t!OXKT;G}j+9z9EEJ(Os;!C>RCwsI3jQ6f=dd2}OAEx*m z9NKv2@6$kwkzZ9Dx?q&(y8+HFwWl!zX9fh=x%MBHoDBR}H$oRyARd~>#SQ0qy5zXjB|Gc15!mvIIp)Vh zfW?75F_g8*y}`GRyq>%2nC>Jmr)s%dYiLwxJs2AB1!sQC?f-3?KU}CN(sN6Q%`iBG zE7$#+Uxak{e+j`b=(vvV*cY9Ohy&@l(YUKh76LZ^s%-lQ_r#F#9GQK`wKrS2!U+22nV zo;P93h^xzjdc6-1Wh}&vyQZKyq2gLw#YzF@MO^bt*GcZ^_>;kH!%%2X-3BR_GAf)UR=xtDoylxKV$qTU1>j_1xN>jiTlxOLq^MgFIk*!{b>cQ;t$p1PNo4pv2 z(-Q1L7-GwzAigGy3L%TPb$N7Q*juJNK`hJWqy+0WB%)KPWHKp@Dr3B+t zKBR>N0O$Da1bp}A1}U1&CW=9cXytI{Lagu4IE@x8zE)=6(S*pvT~r?UGnUdm6m>@U zNG9MUCbZJli9779;hjLyf(iM{D)(j|cCQHr^g`CqGEp4{4YHBhW+nf@1cNEz?)(fC zKZ;UNhTl*t4*W3bn5e22y!5HWcfBZo;KwCFh<>(-k+Vl>N7>{mic2mD3w+n#|84U8;Zm?J1K$?bsgM zQeLOsro!7(gUw?lQQV5x&c81^mISE2O#I9=6oMqD0ZD!NnbLaPN7nkOe4?j7 zh-5I<5$h=fVE-<7xkx2^G{;QbXcC}%XR7zJb0{`?n3dH}&)UP+?z2LS`Ng@rB$xh$Wrm3k~!I^{BQb)^dr383eQC?WqYTCLJ)mK(2stHMK;JQEOHY#5>SlJTrd#L><^D_2S|JE| zeWHo`u1(+-8ox5WPUk_h1)*5XT<{*Np<0Zkjf+Y^QXE3qIL#lFI$x8ur;WLjdGg>G z=Q3JaEfF6(1NSphgt$}Y4j%3vxd^?^Ypjww8hja5CioB_!NQ4QJTZfz{*%$1R*`-- z-UuBGBsH7i<`Nwbco4QqtuF+kn;8A5dw}3hXls`S3avrq2>iB`*pG|c85IOr-my2F z{GxMIjV53AbjWJ~(a@zt(FEs#E@tBF+5LcZ-d~;rPca{hIbY>XIy*R|cIwCP z<~1Y3s0i%0tz}4jJp-_PNl>3Bvdc&P?!YCdi{bDk21p*LhIgTT{PDFRM~+XTUA8@O zeA-Z${u1L3(ajvW>P08ORv5u$2LX+c8c~?$|^_o zgmvK(Pf2@N|8_T`wbtj~?mlrTszQsfpA1$IOIEtQ_p^`j{r%%z5Khl$6qU+QXKCh` z<%i{8jtjJG67o+Llb4s0qWQE={CLoPO-<45iDgJz=q)$xZcQ?o1u5sidL!emKrG1M zhrMoQa~IQ`zm3RBt?=f=oX{QMHeAY6&G{=rtv$0x3OQt%jG zo-S%9{DNZD$wWJ>DjJgzSacB---_;aB-_>U@02G*82`Y8%dnnp@yj&@FJCxX1C>ZK zNJehVR7A+m{7XRXi@zO}nhs0~u9q-Vx+&^Qr5_r3($4hyuh?Pe_B0Lya<&RL-l3Ih zFg3VWvCkKTPktR)Y{8_{ED-nedIqU+cG6~imJZ&(V>|(sbN}8}?8x!oiLRT_sJRD2 zC-f^^8=hqDEM}KPho5&4)Knfl;3FI2GLI6u8{Mr8ETSu9lS+6fdpgSu7*O0_-zSS( zPiBvXZ9mSAGIqTn>l@e`zLD1JqTxTYVP55RDHk4vGd~m#7M_uQmfCHOFwIaW*dL+T zYCOXD6bC0$x@RZd{2|#XXrmp*yTWI3+wU1NBaTIYrW^aCq&3kZq|?GA+S76h_m18e)LTxcEULTA!>HfP?ztF>ej z(U6JYlzLNX-5?@G+NTJ%1ZM20y!VE-;r?^=LICH&9idJS06=+_pUKmi4Z>AU}lOSrsNnzVT+ruv%L?0fu&t{#9!#FRM7z4Kum4n z*DGTmPcs-JiZi=Moy0(0s%K!lSq?wuVd?i*KD07U982p2`_UU6*B?0-u;uB#O=w0b zx^L(J&0q@fv!pY#d7}n7fxD9rExaDqA4oVoi&*t;fkn?k&I%?)ejaQMxtWvI9Vpdv zyPCy!_HgZ~0|+6riT-z=!CY37Lz3~_`}h^Ma4*dvLMGbUh_D<&7(Q)6*~Ak9xRpYC((!o>HUC-Z%HyuX+$$kH3SC z;_Q9;YA4L|IAYt<@oD+>STARHYgos!JR8qH#3%>wSX_&p@Y zL+N~hE}4tmkE?N*iS6CX)E{^JN?}V~$Klj@lzk-JYRjk}eS{S}IIRxNih)zs%y8%{ z@LZ);LCKrQM3+;B&UzY?kK$L|0GPBc{A{lFe@ubZrd+MMN5S+qfc(7e*wmT&^A-v| zJzEfUtoV)3=)A~v?_3;E59IIH)FE{P0rgWil?V{0D5b!v*; zPe&rv*gmH)c5Ct7w334}DC9JlLv4#M+Iz|)0NDU9nRCqsStcKVh#5J|kjbpb5m)hB ze(Wx@ZN7n5{zZa=gC_McBIq0EB-{Zt^^l%Y8#x$C= z9+E)tl5#n@JKc`2`9-X7oSZVNceHfNO^DekUUNiv5wO<$2jR^G>aKOqQ1?sdndetw z&XBS0M1_W)9m&GRGYao=*|rZ!uYGp{ELsEiKJ3@`r+Q}v2=0oAC7`8Er5xvt*WqGc z`Y4H>ElRv(Gb2;7S9=f>ve_?tD6}fuJv_0LhX`N230XshwN|7l|FJlaklY?XM{C+* z4oRB47ZiYO&pdfHDdF@s?AB(lUlR13MWD+1p({ovApkymO>ZDR< zrr#HtxWX5L7mfpu2%MYL9^kCK) zEg{4dsTGqHbhS=?^>0mRd0ZA)og@xo?@i}w^tf&$ww5XK7AP6fc%YNIG9`3?f^C~O zs==n&(m1cPBIkcBk_{f~CO&*W0OzLKvxXQ!mf!96y<8nxq!7J0iONiu^$DtXtxzg| zIm$-F2z(2_IKTZAwGn>CvbSZ4Bn(=2rhf3vUi}V|5=}h39sTU(aEltq_fb!83ew~s zua_6G+2Xc08R&RB$-Dd;^P^WSN2PQr!20zojsla3A7Yj2>%jW+Jjd^adPBBbHP!Ex zm|y(CF0ImX77A}+syZL?<$MoVrq?_lswzZ)JlmH5ho$nb!R)9tMZq<4~WiIzc8i3@e@K^u@ero#ak z3;c{K9%;@Om%^IOZDWC0w6!kGz9G838#gtGDSfH}K|lk_GVw23@H%T6s#uRu95RL{ z$ik8(5}@I?LzZxDI-0%YQgg+aJ>JN4$}eUPE*Y+a&rgZ-7pwYH8yy=o{<$c+i4=yj|giPX^-S*t?IzKIr-LA`)X78$i&pVRu+s5a7neT{E zmq^43iHsyaP%=|F+#Q@bZOcjXY?~_gF0n$#azvE#$ebWYhUgjdBjDaWw9pD0no^%i zPC~QgD0<~M_o@8MNkGz^K;&%Xv$KKI|HW059FngP5xWii?Hq4j9ylx0MwjmxnrRJR^13l$;516h9pI&^%7ZJ7G zhLzkZD;ZgiKMQ`Mo5RmIxU(W|?g_%l$ao&T+N4$acT#|=DaRd{)@pPd##As|BjIeR zrxh3W6**_#-m3$#?U=sXbXZD;5x7XhG}mr?N=Ij0XEHRc;UFoF4%>0XRMp9TX~|{F zcFNvzj&;w%X-DTn)=;-pkk+ST2DiCtb|JZFA@7>f>e`abr*U;|1TRCTPap=`TQ)(9EdYU5pmC+Zc~wueoMky;##AsTVx)m>v*Ch+@oj<4S zzo|4)?i>8l;nv_5xT_hJ#Ydkj`grf-GPr>y9-E=3{Sg@+#15M0F8}1Qs&YG)M01rj zEYn-f(~tJiz_^Yo;aN^M)8aWp`A%S; zdL3H|6;gN_I_7Xnv`SZxCr`y_YZ1#TOWEfJ?3lYe2Z_sqk}tHAJzoFgGGz$Z!Bdru z{>;Aai3Fm!t2nmLw%5V--5wy{RW266u+mjLT4_W?D;eWZX`B*W>JKI zCP(`vesblg^*XgCRocG}JU@_l+@G3dm|FFXx-P$p@tE$^l`xOC&wo+QB71kJ;pNAf8RM zN+MY6w39MJBlVp~%WohDH-UTI<=ST~>)qBY)PUj2hY^6sX$fq`@`JIQLQdNP492u@ zDYsDSQ!^E)o88$DmLS1^RgZF(-b}d_rj?B&_x#z%>OSl+qJsd*x7p_x{~W0O__X3# z^uZIgIKD=ucI*$$=eLy+t+Mx*F;P3SM`<#3$W%3VKEmaKYJJI`#+{_g4&U=)eVXu+ zC^0`6^RP!f3>^YgzBB#$(zJ6(4W*8ClwWu>r8>R$CVPP&2$@3p`_7ZD8yM!Xaud#hh!?5;pOQI*Ccu2 zggG?7^)2oDcIraA;E^`07;JM0A6#l%-m8099R>nsFU_ zpShlsE4_`+L5k(fe)1AFgW7PCkifLC)At~}r*^cXGdVk6%_G7nNo4ru=_IA7R`DE8 zBU%4FSKTNyH^any?RnlPz5nbqSe}Lboaz-yc@D{TQjuX1k#1XMh0~Lnqq= zyUrU;?W~_LzSnLWh-Duw{jXV(JlC9;bYYm`QAl1_Y*2)|t4fRZuKPuv!O~D;CnrZX zx8~(3aUFJuzRIvqaA@7|S(GSAf*G8O41Eu{IGT6Y5+SkLRrMRRhFi7i9pm}>PAqDR zw<>dhz^9uK`hQv@HyWWXe*cMewK#0qXEh^oR-MJR}9q_OCalzT^;5&Uv;gQRuK zWItd&trtr95#-@;O5DtT)c!w6@SanCUXLI0V)ezh2~LRbPPw|gg7hBl_-dgPa1nhS zFesOuRw17df%D;@DY)NldcNl4)YZoE%ITT)r|YFEiN`LUv`ZYYF2D5)5FSnUO1c^L zr6L_tCBel$2(Zu%1WScRBUzJFU*zu9E-lF&t%X{g7S0MpOVTeMADh<4P|tY;Q@4Gs zUI%S1wnJ#{-~C?eJ`-g&Vq4@FVEFmP=7L({KX=J@%7daqf{AesSaphy;oP$X_OcD(na^uC!fP+a z%I|TcR+akFA_?@j3=La|hPMqg_dUHu_{POjx8%(CM$qgdNHd@=q%{7bD#I z9zY$qI2bIPVG1TwD#gm9*_68)!BFr$9C0T!L?y)ikwXD4_;K~Y`xY`86UYL z5!`I8WAujn_G7v3)zXKLX*a1mzpslYn4OH0axERGnj7!7(reNZ7p!O&0v~poiUc2| z+wUr7sr5cj{9tExGQL$(OdseTwid|<8QRRCPbWLt6x;HQU z<7AzN&1xJyia(>xJf<-@T30R%I-IIB4@Ue#Jzr>b@di$AV9%mS{%&Ad*mxL!mfji7jdjpNIz;ne!UOakj;#dm?PmL$qB9Zf`(h{J1Fb!2smo^e(=8zg`cvFe%%}gR5MdwJ39UEvx-HNg!`5G2Y>@ zi>7T?ATta?Xo}t(OYR3(^t!$+I@JAq*$y6r;vbgrt(z}7urOrGayg|6@6$CL3J|;A zI9{gaDS^Xa#&Dz#>!4E|hT7Ug4+gQ57PxtSuprra@hMvair;6=I*)@E3 zNe4(}axZSw@;U0NOg&$8c))tNkQ^ZTTgg#X{e_U)j*%}lYj_E3vpAOy^GPqNhOUwV zd0(-rO2UQIL~#)Ji@&*i50ooPtqdX*CM2NvacSS-X=s}9-{}mRS6}CFC?G{TD#@|?@z<2Ezn|E zChYtIt)3N#?gZ#r2E;x(_j0C)JC~h#-#soTOvLcaq;8}Ezra0sH+~`mvqH8CzYOp*gJ{KzsAfM)vbEVY|Oi3GU($%5ujHDK#DF6 zyO$mL`ybP#;(c0&|LLn1efCJF=*tvXU&Fc%H&U3z>BpaLcaoT7bSv!Er+A~- z#345vYJ^z{`kT~o`a<5e3NP;ozA!nVQHSMx%VhMq>3N|1MB>rrxN?TKyr$Lu2tgQX zz12Ty5Un$Pa`U3uR%PLbbGECR)4PnfeE*3h~J5qTPe+@MnM>WMkwIl?^{ze zA9SOnn}bF<r?>N>2O3hz?=#yDSw{Qr=gMVU3nF%BbFJ>{IAM|Iz8z`B zi!vEw?nsR{kzBaqpyl;K9Z?Y>)fb>VW>FldBP&_&Qd^FtjxiHi{}h zogYrGEEBQJp8bSrZdyZ?FZt&|#4&94^qOlMg<~FkMC!+n4XAH*A^rFlo%_CUN-U1E z%G{1zlhvScC}-P8F7C?DOgIHTU(5|- z4kTf)=;9@Gmgdo-%S4a%^xXx7TPgj3(5W&0);OS{MP6>rwGLM~Tw$&ih8`rtPJC!% zFI|fp>Hb9c<5Mcl&!ZStN8r2;K7xE!AAd`f(&r1XF^Zo?ZOah25ULx+A7utnai&5j zSC(K@o>Q*kZO;^7J~*z~h#X^G4I=^EXwy-rF8+C!!8nhxcxyLs^pQ4`y3NckY|Kp z9_)@|Zdr;=^t>ghYz1{U*V-xwCU}4m8k1Tt2Ppiji?JBEw?9jHTJla?eV}+#?aRLb z%1k{uZHp|9dHjtJ+CF@Wbmb=5EtH}>w^CR4*6P^!+%i(VA27Ig8x-ZJQZGTsF!Vw0 zs$76&tm!2?2Exr(seT{tEx~}nybKl!)Jk#fHuf34J=M!BSsgN?MkgC#B2vRAs{&UKn3PVpMsg7qb zItbwC^}*QuaJNEB9}hA3$1V?+1lrLU8uw{OBIl-@@`^4A3n?H6Z$s%#qrOFZ)u?>5 zpeDm`-Cmqa92at)cU&G}udOn7tXdb&oeklmnb@2NJ5y+?=Ih1jB{1=_#J1VZ5G%gZ z5XsiH9X4_OC!oy5dJ}Qy#Nh7_FgW$QsJqL4GMbD~CN)ye@Vze+h&R-6=e3uYeTD*5 zBl#I&Co2WM{kUa^Tcy4dE0s~c(e$N`a2d``+<{#T#zXyKIzEVj{|_bBZ4*e_cu;bmT$3=KA%6M;URz z0a#`NQ%cC>068f{CuTgC7Q0v*=Y`D7?Rr3ty3;?d>kF7z6*)$Pi(Q$} z%Z{KYYQ_Qo%Z?D$qO`kOqzcy{p^$&sfe1NVRWU%~ydN$B;@#cK$HHxZ%paWROmp+A z?jL*XmTP|TEZZkKU4V4zup1N}5KJ^0y@wHna(hV%@2*FOlXVP>e%p)}m*>0!^2gVh z=IGTK9|Ao>%s``&G297C3#Wala?KKn>9@isOt|V}XQkV30~QAg*-*4F{i_SH=FddL zxHq-^T9b_%{J6PRL3O9^pOs5;q;}ff819Ux+v!~(-*Mt8!%*?Mi0TI8f5xTsfl{kp1a?1G|xwu%Gf}}tP0k}_+pW|L2OVwnli(c9OXZR8n|KEmh+$Z!K z70NFC3{E`*Kkh4Ajp%-#nLjhhGT8 zu#wWKJMse?`{+gd9T130fzqqa;}>@DFdv_o#0tCZtaLcp$esX@F@uA2=sK4z_K)(i z$Z4G5o4|9&u%U4XO6^g9`NqFE;M-+~)|Ro(c|3B*j0f-C{O19xF9!&aQytHgl9E9G z8-o=k^xOZ(%OACXKYX&o@!LQCf86AM9u5qC7s0Qn5c}`Fe< zlOVJ5nFi9WWbjpy{|-cbgXY9PQQP7W{TnXiPygkG)#+uNsm>AxP5wEZ(prw}QZxge)ffQMa8%%r0a2>K{-`n=?BXe6oIjay}mt9K|N*q;ch zRnJvja)TS@N{m94Ahuf>0h_8Jh;bOT>yX{MfKkwqq0{AplAx2mT8RBDm8=NL4m2m3 zN?;pF2)VycxgO$&)dDk{BjQu0OU9F&TenG#9hYmP0N%EFM-7gQpH^%@9t0+3hg!&Q zWgzp{7b;e((Y^?7V}e0NFOuYqt>dba!gXL?TZa)g+o)9AaHsaX)@B^B(XU|u6WP7l zBW|u@YMuXPREjxKh(1&f?#?HrKoN4NP$P+|78^(Bc+?gNo|ap_>Fuc_*K9KjG1Zwr zR+^dpPCYZ6OCNX^q!NjJ8<*yVuFs*a>ojPoT_dNd0m~Dc7mz|$YjK}bbzv^$FPZON zzTH9&$p~(<#-wl+B((1@SbWE%JxRI-z;>Jm9&K07QhrEzd#BB{ponWE{#O!kEjZG} zU`*}n_t$bjuPwgm!uCVGlwT+X4u`~(G(4$Kr^TIQ*qbNHO1Uqm6}|1`M@ZAj10*s> zgXc({9##&zD|+ikGVIBcnBQ8J;(iqNi_q!z0gv3(ih|`{nECwzpB+j+_K2zwclxP^_vNZ(Pv?FFWkd)jDe|y!)$o z{{7+DBVgyc1}3%#7e35`U+a!>w^XsDLo{$bu0)o-71otK2R%l=e7R2xE7Y&kt@K;q z8`Sw}KP&O`k$1`f&$PG?E8yB|IfoN*AtvP%!IFu11Uz=ry0ZY9gPm>Y#itKOd%#vWcY(q6<;XVvK| zL~TLV!;GW-Xj_7M&N^7fgglnOhPma$ZR02~C%3En@1s z=!Gj8s`xo;kotfwPxBhnq?T$*YHn`NU zkgvc}oLL6jfDs2-P?MMLCQnrSsve=9N(ac@A%|X~?k%>m4>8$@=f!q<^Y=9)B z)jm|eUIRfl&4)MO==Z3(JJP^Xjnhi51~11p`femshpy&4xDN~7kW_Nz`nsGY%>>6A z@{fX;Hz<@F09;-u-CzAGVb`ADg?6{|SHNp?I`(!Jq(1SA++nTf>eoat!E!6~MElOm zSp4bzej4Aytsw7_v5{%Wl}Who7p;7z$ov|I zMGR3rQoBEhecsFgaKpTQyHp-D8)+e)?BCrk1z4);H>h^iIFL%|6)!3`h+;!||B}7f z(Sp+J>1pKm$cq%TniYrjM$GjoxzRFYG;DU{-{ThZ4B&5eoGx@#D@9wttus!r)D3>Z zywUpHuIoZFDBjfi*?8W6bJXI!qh(GcnA1y0rDb;|TT$Yz(5ubkjGe6|eO_p=*x6lg zxgqvqB1I;H{(a&)fy#AKrLJ#O$gd9${_!}sO)sAzBW_K=%g?@_qLgBnd?ouz1=Sf@ z4*p@UM~{1#tg2bboIf|o^zQQ)hF~H^;!IaH#q{_4IIXWvO?m+l3fO-4^T|_mC0T@y zhe5vaC6A?wmZezD`s8Oh##XWMj_zW1CWn!)jP=^R&>tS4$%jsxkVZBaRkA(XS$|_P zquyDIH;d#mXC-IAQ64#J39v%}!G_o~Rh>dHW|UknY`<8~8KK1m(`Cy0pMZ$iQrLyV z#-U1G%$-|lYu88nX2+_pmYp8Edv#%7B@?Hav7M*#9aGP9xc%t`$Sb}XG+8r>4e_8Zas9A}iYAj-;P=Qxg)(@FQ9GbpAN7Vb2bvR7u5jt18noC3 zXBZtg{xWRsuno?b$eY)hPqZoFzOkig+CofvZ-{L`5QcusnIm;bX7~$t>Ph6j9|5Ai zcZ}Gz|KPzpNbH=;N6{ZcbitbJ!XUNfMw#4v-G=5U@Y}lpR+RrBSjElo2~(fjwdzY6 zHBxvL*aS1NRIS$>NgWLOJy=m2CEU>54*#XO%cfqdDo^=u7Q$}~#!s{{->C{&%8s=p zH~>O6`eBy`fQ>@0vf-soms!m3TAvItUzMBLV`&v)@0*u6zfhUC9K3vB`uZ~jhNBo` zGgA)Gs!)rV5jZIh_Jio6%8lgpzE3r$Pj}b_l z6d^Fnl0N0w$A5y=v}uvg`deR_rE(XVkm^L}6SXNX>9`u@n)GY*>fM#T&qYWPRS z57W$xEV=xr0u-gBWdGoWv1vR5siR&^kb ztBW5?NkFyk$O*}i9kZdJEiPNDX21rw9QU>&xIJ2pUyK-JHS3w99z;}K8h)~D{mxNg z&V5u#=2V__QDPoAwnsy~--OyzCH3@$oGP?li+LHOt2Wkj*eS&_31jA=-EM)w?~Qe%MnpLu8;yk!RoxWdwh-k-u@rH+{`;6xRk|Z>|!l;M!OFhAxi0P4XW2A!pP?2edbC+Okm@aco)_f4>;o~SA zWvx|dPfhue)=n$njoXLjN)>Fa6P}VToX|x)nEiDo$sEp2>; zY2@4X)dbI$Xf6jK&nrh}dBN`DU(#gpsRa!&34a*Rps?ju*yum zWlV0zEBwUMjT3%m^fh!3Ad7E7PO5Q4L-wq#j$7WY1e>Kp=QZ$r$z?Eo7_k3%6@KI< z6jZ2ILfTR8@@|+(MX?k0EP8nB=QI$RKn?l)q4Sf76GhU-*Cz_=rmZ5P%wrdikGea5 za^-C~Hi^Uo6Ur26Q99%Z_Ahyp3D6#w6OT5U7*Pth&FHUk!^H12+21MZ(_@&ON~q1s z3LIJ_aiV4C3dYsx5uWa-NSIk;IQq;>9wS5axC2D@T%`0Bkj_qCw{NPK*AlLmm?cN- zoC|KxKC5(;p=anZ6wKj2;F)c?Q_B?26nL4k*Umvy`WflrkpA!CJZ&((*}9KKZ(3EMPz4Qy2-8HH4dkpSLlk9U~Iy})m5Bs*Hy`V)nr-PT$2H( z)tpWK{V6@ZVu5=z`W22cI|>fFP|>6D8T8b>SIyzDNO~hY<{nA0p;?a)$=h7a*7D{c zzV<_ zTlHD2ovZyk(eXL%sk(8D(vmuJ(uOIJ=E&6&<<{e9`^fuR&Aha0otyPIvK^SeE>D)| z*@Mt~Y2I8l2!HCmSwV#lC(%A^EQBrKvT6^pN4+S$)DSw0O9rL@yxxF=MQQ6+7!wIH!+^tVj_ANS*J_*Ib_ zgOj420I|UAuaRmhjxP!v!G53Vu6COKJJX^Y@0e7B4NvqiSKIZ7OZ?NK^SWeYE!E&rNwDlEVVdQi2u8R! zB_u*7jmIIzu*6i2vM{a@&5cYLjI=XKw@%Ph2dh7=h}{MXg%N;|y}>LtI%-0M44+#8 zVXv#}l?AXN`QqHg__wUI7e_nfizOEv<_Uj*Rb5_laD@j;X=o@=Tln#LAY z6ux1V>$XEpykq_yFEGEOpf2xMqKEV3caDO(QVBDMOLV?9dg!lFjifg(zQ^-Lw;Dg4 z%~}bRA%LY4!EzfbfdT}78zDIMJ288e71A+YB$|Eibd!`M7u9Q7h(#ZlKNa@7Z9rJ$ zY*|T6MvoV$Z|M`N@fpWKmdJaNo$Lwsap-*hq>PsR+N9g@RRmXhRYw7yx{9Jz3h!52 zj67N|`Ro;WW<_q?LT_5*fF>dD11qAB)`NWvG9T`4fEezV6kh&86N$Cv46qXlS->1TZXmHX+G<42IF3Tp zCilaOIPD}4*awb!P*fx<1maZJe+t|7TB+2r*_oJY)I0Cyl;H|{e$qoGbdGN>JXUD< zY>1`t1cU%|o(5D5k$`#q8HUiSmV71~*J>+yf;vJ8@4Lv{i4n<2}bC%I@Ty}o9=uX39 zgrv6gy960k?60Q-81!pk=6Br|ROKepqep-2OnRFWo1I4KJg7Kgas^|gsE6gcEMWo?#KD>wE7^T*c52dUM^b&54+=Mo?@y|NBO+>Oj&}lv1J$r5k_|A=Js!6 zw~^^3tDiFaEhSmGYRx9(Qp>Ry9==pDFS5`0!l`g!>zTbOY1N#F2UAr5^(OwQVcz&HSBE9N1!`a`m$r zRew9)0~bhu*?5ANvaLI726z3Xc)QXap$Gfb!d+@}0~rSU;%%`;J=shBVnNS1VjGxh ziwf+C|Hh#Whrq8KKi07HPIV#;*LONZXuX-uPLU<-7M(#x^40ij3#X&_m^qGy&4aSv z$iJA|*HbfeLi`Nm6RS@1tHOISAa*sl4c{4Gmr%Dqv81K%Xn?(+oT#&=y1NJn2R~t= z!}|^C2AaM>`|);vbJiL(prBS2(?Y~!=u?{xK89b5oG&PvgipYtLmiMlXeuB`F0DNr+7!@OpTT8&(|7^CNsuv5R z3exSkSjF$s2R=R!j-1q&VO*79No8v(ZU@b842O`OR8&WK40+-)PTph^tMgs$DH+V> z;U~Jf-|2XdnJW^+B9lE5?eXGQ23*gvGg8;MN;%(N`0;&T{1dA`*-zrZFZWhKG$9uK zbdoXflaFyFn7gXu&!&W`)?UoYi|28Wvli6-FMdm4VPWO!oh^q`<|Dnii_l^?#b0!p zJ_7|`v(P6r>UDM;_>?)MbT^x8RF8eA?1m&$FOaatsk1=KaK4|$It=BZBTqj z=|o7GS7}smz8)+NA;RSoA0T`+h_)yqYCSS{RGKVT%Z}3f&Z7Bt?3u-Q3rR|^jgbM| z?F4KdRO!uEXKYl(xd8ZlYsK5{ z9nzlH!&((dW#b?h*XS`jB0St$jbgKhX2rAU3s&Cx&ucV%8kbvT{5^u)Q}Zkjt1`cH^hX_C5OGQI6fPFeC`3$LM{^izT-nBpoExuUNfP^mR~D3b zFz~sk+!QV>{6{cfC0x$jwaE^Zc(va4TAGihV26%21NeKJrozVD9OJ;AJ6R!&&DyHPr{W6vu4<`Oxa{P-HO*#0~+0s zq<-31;%5i_grW#>YeNlA&@lJs>TUf)xv5BCo+7}xOcwA*$8hwrHtxd>c2a{U=S6$} zp>E+FP_={WHoNkPP(9G}hab)H;0YxdJ08{Zq&pJ=tE4e#t|3Fh3-6m>nEB6(j4&F@ zo~O8*Q=;Cv`TTJX|IE(90hP zjLu1jv(qGMc(lXqhMNZ zY_Eg2W?GENZ!9@=HCYbB>udC8w$&{$@5bcu=rqK-BO(_sDGRc|0$YKaK58^wuTph< z`w?JnkhlThJNDem_?F4`9bVowtKf3nWHAoc^>u;In?Bv>VB8A|XfLu`2W&g@9(pR%}v4As3JX z(W&zIdYif=bghw!5`X;z#>AK zvO>Nv6g4!c$u*>*>~)2?uFX$FPS6Q3pl?(53&$NHHD76W7eZJq-A+m*iW;VFsGt!6 zfBra-cJbXVpk}_IiB9a%^(NQZ<|)VAW6gJb9I%RZeK~Bu5}GF0UX0MwC>mzw2xChA zxS8K|E?ix`ae`$-I$R3K=WjXu5BSE8K2LUm2B8|&JE@gI+0GK>;31c1`2J7(KR(h? zBFR)xXMZbo;A zJ>RMXM)V!AI^=D{E)Bz@2hW7}Z+^h0e5vsKmni#i{)>5{E<`I(ImDDkV_o^O5OIqI z{u!fQw$4X_mz#U9Ss+g3`w4S@?!>nO`6ce{z>4U2#?%^S18wHX$2BY-C}DQwP9PKW zLYA7I0Ap`BA)6e2HBXvLL%1+C8Qy{Wb|e8`-N`zsTP85G=$>TCx!>`d(+2*R2`0<9FNg z+0azowfwR~@$uI1mU86j;tv(290r+;+O;SVd*6~S-|ZlD0&7@l%H78nbG7r8`NTPO zpX6Im&tVi5?!d7FF;zB2h7~TC?IR!}#wlzPonT~9`l)qd}Hf}w}A zyemjW&s0XyYDTF)u$2ianm{Fm5QN<5;N&*M>)RaWN?ahk>a@z?BwqbFV~H8- z&7E@l$zM78!;ZZW_B66nZ&3P2VrNF0Nm*0RpG2i$Mg7jdK}Ts4EGGIg!q#$zm23&_Blk`H(Tt09Y&p_6&qyM7be<)IJL2gryR+j zSl$Q+od4ai0mDrYzq4S6ngk=_%kr>(Gp{%Lt@`ZT-3~YJn(aecQQ2U>%l|0oP7_mw1DCzgS5wZ4owsHd|5Ml*hM{ zJepxE8X_lQa!HZ@M6JY*G98H1*kxyJ&cZ^{tj-RVWzA3-7~4l0T98w&?qDW#?Z!3NFs8whOe&P`jP@zK*E_@!y$;gJ>CFK7Ke5)m)wV$pe*ZDu`G@ z$x}kK>^MlRqSpVjS1%CYoa9cZGU@x7oN>;?&xvukh6P2t77AqeKff^+wBn}+2T6PH zS{5x$PFb$HN-_1f*^re_FfHdM>S{lyqNrSPL9yqXI+B>2WRKH@z}X=AdAo123wsXvUXoWPahk!IPuQ zE;5f`piTB(F0kz+7`Mdex~8t5GqaeK?~NnZ8A1^arx7-M>4A7h z64aGKwzmF=SfFtE>GrT^irx3I%qMH%gJWtLqY0>()i0~E-MdjkbY$-9Bs@{ol=mp= znJJyFQG1S=k5X+!mlVQC5W`~C&eT_>+|YLUfV3R813pNDA=1~Y*{DPppcfGm<+f=`WtL-g++lUuI4=AbR}V8(RKsgzI`O{# z=8Oqqs(CG`!GwXz@lLw!FzYp%mp=i)j?0(TY@(Z8ZYg$c(6Kpl!OzK_Va+9`pS3_v z!Gm&9Eh9eqU8=lbOjig6^CaB0w=X5~b%-M_?6?nguI50tW5#;kd)-4ymNe9|yu9q- z-Qo^d`?Ojr!n$|2Zi0$H;}En*Yo~9>XuC5LZKQ<5JcJ$G+WfsHJF0auC?L#SF=#Pk zn|WRaxn7qZZLe*he&QtVX0Sp4&v*$4(-IsZ+@x{cj(;u3>2={O8aXL~8g8T@Ia`+b zR$M;qMJ34_F_@5{uk6<@Jc}eot9cq@-o)YI1X=llHv3h`4i}0dx^4>}rnE%TT`C=2 zTEpHKsd{Ut^%mdvAn9V}6~oI?=ts@vQlVvQZm7`FF|I%bQo7#p)_c7+Z*Uw_V<)*$GF*8IO1w zYF}916n9NiLI*tvK8#$>3M^i>#~4VhWJ9lWAa7Cs!!3tT@ri5^yv-4^ zu=fXVDw_65Xh|SWK!tzOr0{fWIvm>%6=zV?V)amU$4Wg z-lugfS%UGCq_h4ZT^!bmd?N5PblrEbUhbOnccfg?bDgbu9$ut?Bi-=3qb2!@O#qIj zDz=|?Gp?mIG~i%tt@&J_gXV=*(uTN9GV&~wwf+++B1?Rw!#;2(AI)S?g-xGI;AFZc z)Av5(#m?B+aFu;AjVz3Kdc8S!iwS9o*KX%)-^_8|Q@P=7N8709u%o`VdWmw@WZx`z z@sXJ$@sDcGt1m}3qto;Gd$+-{r1y@R6KB~LG|zlqNAyu;Of5oH!72`@YF4t*2;U0e zD3p?m0QRGY>w~Bsi7{$n2`3YQ zx=tOs$>pDt{O@W!!E+6sRTu3@llg;X2WEaW)gItk*ywxu!wQiW!AoR8kKP$}bLIq} zG3F&11<3n%R@5OX1NUmTKc4Eerh3@tb#;AKJM>2z9=(rx&wxj3(zQS3taG)7AmFGPe3|owE?vUR!`Xje!nrZd}pyu81X_4D?JycHCvzp_b1-^V?!uz zuP+ih0ffKQ-_!?BB-Sy<2)0iD3U5%W<{on@LrF6~ zW&;jKds+AXi0Xn8&5$!{@h@Ba6dJbo~?>83)O&$eLY<*?%Fq z6wK&GsQ+}^ylcBpLD7jFIM%?_R+MiNgGnjXy;x|N<~N&=W$ilQvXYkE^c{zWfsI|v?||^l9Y!uZc-Nl?vF6v|EeWTK2Wwxy%QC}?dpZK*U(UY4QkJ;Cv%_QWTN*1 zTBj%iB{!X8^Zn;xE(aWDef7(4Ue7jjr~B8XqB!(W)ngc@_CKvX?7s4f+}0rZp6V=t zcekHLDP5>UaoFJ8C|{vAnEy|k)Ti_MrPavuNE=U4iV@A#f;Pel;gcV|q-@lbM&BN% zkO8NX)xfwSu596kGHNuZ=x|?gc>NDov3isO!Q_wY-v3qeTz) z3gD;J1@R7e?R(+Y#OMk$C27hDe(&y!zSGo1*u<<;_=cuuuB06qEX#w$J_evhba`*f!?)9GVPf zDagQUl6fHfX6;kFQ%&3mz02ZRBIm!z3tpI8*jw2;8NJlD7|pS;Wj~yeOffZcfhu9i z%@(e&ZQ|<6?DvK{ zYLuf2&p`o)FI}s3{42VC<(@D?X+P~R6s>G=Qih_RRi_%PCxRN1xyaLWrCjW?e4_o* zswmbzmI994Ax=Avt5{}}@={LYOnF2vY{{&iY8{6>url*xL7sHp3SG%g{l2VmY-L4O)nGzIu>ROf0-icj_@8)@nc5w|{!{G(eTv;3Qh_Ps zkjA$2tMXy1?yx4SOA+^Cvpi*IS07X64MJf=tGj8mBo#;5=!@z(1)&8l*j5hzEGA57f7q$NXl?UQYAmYv)>$je{$66q3ciI~h0ka_jUM zWL{d)%n>kb-juJR3DY^GDZ!tUI9oz23<8lIY{x`k$*17{y&d*3#q1Y7o!nD*+&%qI z5;_jp`kE1a^YOZLv5O)UrW(S4?X4c+1_<*;_I`yzxZ@<*eo6Hh1uFHPlR^t^u=$S! zeG3!5Hi$pjis4hr`2w*f^*XSy{tKC#)i?A8*Pr~-4%n^u`Hr$YI^wf9WhEAQV{m2A zciwt#4YuACR&9zAuXF{+qtW1xKbpNZ!XYWC5Ie7gsJ#fRods^yo|A9%c7=BR;qz7e zz_h*np7w*D2+j3RiKrJFU_;Gbw`@oy%eceAz`@~RzMCVZq{fc}sY0{wmQ9skaVxXW z`O2wN+L;+LkQl6Sg|L6{fALiIX>IOV`wz%~tPqqjq1i{|m~ogYqqVB6Km0#xp#3WA zu?E7|JE@aL^IegHT#LHqNY^qi9J$>(-7+I`UK!}VSl>EL{JiTz&;>RvyJ(a{L}uM|ameJ4JSrE75iP8-|-844@N@IdbI zD|a^V)vy-@#CBu}Uyo!r_gZ{lsh|jS3JKhpyZ<$hdp38jtW(Pcq`6Uz;^zux$C5&| zyDcnVH5cKicfKc_h*@EPGNx}IrfkS{t;KV`>RzRw&8JsNiwO-y1e=?*>g0wb$rV+d ze|F&q8sp29pfj=_`N0M zSAV@xt!zFLEe2v#tBAA80POxl-Q0H8Fd`u`IqtY8bD_bXRy$ z9_bC7T~zpz_G#)X>PnT~8$4x*XarYFAA9F8nM2E2Qha92 zeB7wbU|0@3PP_pAn0h^~)D5tf0#&jvZh$;Re_4k4L|g+9NDrGTQ6vQ;I|9ConwG;1 z*ZMnpM#`Tv5{@A|Aazd}{j!@&L3>2yOL5~miapIjqbP$2dWB<+DHo|8%ztc_flx0T zqwac~*@-BSVqW9uLqtW~^^vJ$r2RJm;TgxMx+u)3S^ybSbG#)^to)pPN~NR%?zv z^@RbeI_a6MdW7GPpAnLIOv5JqSG0k%XwjECBuBC#3#0R!@~JFC?m!I*7-oi2v!6Zu z3sP7#)xsGuVKiA^V7y8GRlxI^2x>~k^>^7Z=`ax>*nEdr;xdwx{KE+Br=e~rOh7(iw zOUUL7IE*!?6^u-}i`D?I$NF0Svh4T30v*mu6xrq|zyNxgEkT3N=jx^U!(wEVSOn zYgEL=_3xWmVtQS4+3}fM(QZ3rbnpTT{C?)8T45PMiDs*AXTsRlT&lI{xUxBEV2uJ; zh8h6N5Z)`Tk^pll`-iZ`?P?E2OoFuioaEo(ziB5|9pU^L$K`F$@x4?m(zIkERoCsN zT=HbTIw0s9Ig*f?tiH#iX)QIp>}$Mq_n)q)D>lelb*Ld6Ht6p6L^gy95Nj}9W`MI$ z1{TVg_AbZjHddVae;eB`=bNicIf5ksic4BiV3R~MOU>H2DQLUpo~$ty_Mwu`+Gtuf zqM}n{s$3(U6DyZ)wvvya_v2;z@Xo*u@<+eia%gz$$ehTft^l1CJE6kgw9)w$FZ^ld zR5kt2IshNg*Kqy<-19a4QtT&vQ=t}_gcf?tw z{`|Y&GUHm}abRm7z1`CUInLJAt>p}u(`=1r_|24(&mimA@%X_`tf_*_$Ho5akj9Z@ zV4LfFhnO`wE;@;H8H(e7!_ylvfU`LT`YjukMa1nt!_$y0$_(^^NRToM9}@z}&6&W! z{&JZ(q4&*I+}ZCUUh0cIjK6!!WdFG!er*6WejFu>V>q{w2pGpx&(@p$ z8u$_322kS6E|9N^k3U9Xlv%DN7P}k0hK8S67Rg-M4)3Z^mj1%tja*wkuSeegj;bB& zv*6U9E-^X73vRiK>z08r$l7r+Nr;d|aFrEQBD(f8D_GHmcd4nfEXtAI8y46;{1^-P z!(RM0SmXu5g7azA-l_p5ncb5oNUUISQI*K64dBD2Og;NeL=nhc{6%zCT&8UD<73P< zhhnV27Dqq+{XX~E)y>w#OPBiG+!sVlFWRcNJ3(@STFiL^dcDgpV9S^L(g)m)O(M4| zQ6VV8k8g^1`Ut^l2iY~61S%VI@h33ew|&)>%cFUwhw)(k3nI?Ol%N0Yuv6cF!oo|G z{Vp8cR$A;?aA9+j-ANY#fd!&}^VxnX@#IB*hz1ifaB@oY&zh0AvqEXAQ|y4ye_rOk zb)k+ry#?Wa3%+82o^gd2NHS?nJ1=&U2>d}mc(*LI7wm#1J#Fz9*XQ5Ah`a&fvG_>T z=vn_ikznAEf4<_$9ndY;EBhNU|NoXhe)SB9DSvwc{=579?|J;6VP+LU1PhgB*%=0A zON8(v`1hVHp!t^{(nXcWtgQb5LS*#X@!Vexm;etDaX{_np4<6O66SWU+Mfs=Fi=8m_t)_3y z0Ryi9{6K(RBHu*hzjCAw}zWtGIz(4)P;)*QLXz=MC2rA`+;3VXH z3%Lfpm<}HsPHBZRN$+pD-A_9)qq24CT}d&DtzUtiy{#rDd5l9u=5CY|2DK%AmSm0? zI(}zxT(*L3+jXI6<6}rITa6H0xYIk%7vwjoPPthf8XAuz4WAQ*^GI3f#$R_kHC(oT zC+ZvFF-8azxavav#=>ZWn^+J?%fGOzXxy0EsVhl|_g%Vul$0x2f!DqcXM1_&mfL*M z{e1U>3A_dj-4M~mz^Muu5lYg791;}jH8()=^_Ci(?&q`eA+xnRRi`Z3zU!Ya<8?F} zEO4987WSOZsqrU|j#{D1?w5iKO&p0QEpkWFI+zge&1yOuY;@iKIpn_D_kQ)^Y1CHi U;rSGC)f3_BM!y;K5yjy9T!ecXxMpcP9i05FCQLySoKSNCNC?72!{&?1_p*GAug;41_nC|I?jFn4)h*YhqMC* z2CiixBqT2(Bt$IlXlH6+Z2|^H731=wT}sdZz27hKW1g}Q)R*A659;5zgq8HJLxRi= z8h+ha;wHCi+X)LzE>ELR>3yIdV?b$Od-p{|Y3hg9hN2^n8g2c4NblA5X4;+48l(-O zOmQ08;%KUEJ<~mn4H?4(NLqHGC#^`U3(LQ(=sMN7 z-MQrQy#1cEWX))FCPf7D0$W(5?V*B6c?xPg^qLla$oGh`n;m+aI@OG0)o0}I*PTb& zM`z3__Q>_T8pmZ?Y>llnye4$Zo=E_cjBr*Tm%Ckd_V49gM-$C5|3{@4HD1Ui_o|Xr0H}U9#qT`k>MTqTLC2c+jNJ;<>lU!#Ihy z4$au4FZ)puC;U-vH+O5qq=U7For68%mz%YkwTMI&zK~lnDiwZk-dkfW@O>~J&}bW) zs7sj2$biv+j^BepfTMyzf{wsJFR)MxFsMJrU|l0R2)gN}dy%t%7~=MrZtJ`!~qd14_uM-yUp z1||k35`H*hVq#uLV^eNLVbQ;fgTC>Rm^(Y$b2Bo!xw$d8u`<{>nlUnSad9y+u`sf* z(1Wg^ck-}xHgKo6bt3(%l7H$EHgPg?w6J%!u(KupU9W+mor^Oc3CZt<{`L3QeVVvi z{7*}^PJed`)IrAIXBe3om>B=>iaA@D{y!A^ede!XfBN-TbG*MR zD(Keunb}!+|1|TzocW&{{i~#klZm5{oeij;uN|23U|eg(}ZKO8UPzhr|S zPW9Qx7Yyt(n1t{bWq0u7beObRVXQ8qmwo${sXToAb~tf6Md29TJgUHOII3{OY6D1t zyb>znKq@NYEOC?sk}Smp-F%|-+&kTlJ5xKyrFQzEV~W)6wexM)SZR-|i8^WBwYb5!0J~N+y8#Q#Net$ z+m`?N{r^uLV6at!1Zt4~#Tfjn4sm~s!zSGSY8?Nr<1<8={J`9QITwG`fvO+EUGeU} zozLHO`~ah!bzuANWdfYSA4)wtsucV`6QzF*wf3 zuxM8>*L?IHM2(>y${$&%>}o5QicP!YjiDuhN1wohpVG)7R{Hv#$?wXL)9au+R-2;cvgruc#?4z1vE=3d}nw8JkR z#~lf@a7e3gGiZXA$T2r)Rkec8t|F{jn0J*5)$e#|Cv3Q?Bs}qaUJDR;J7vyOzvyielkF!p!Lcm zGO!G%w(G)aVDp;oi2r>~NWZp*)0dTEA^Vqx+Px~88V7tZQ~Nt+CIA=4Xd+(A+#wW) z?P@SeL_ft7P^oqocn*3lL5cYTy)B=WGa6O4iX;lz*-Q3J*fWM#`m70g*&#T~ZBpAq z@iSn9k=9Qx|-ji}|d}T$JwW zj9sv9A0LJ`>iGE|Q0mmSp5PLL5;2H^uhbZNss#Ey$I{M<)#$u08HhLL1k>r%Rt|)! zEZ4a`-X~2CuMh|fJ1@atGPapdplpB9_3smmizpP6SFbs=B*s-~(0)0bM}-YIrBF$iT#9C+ra6Lg-25#5vV zFYJ%trZe+gTRoCom(7THMP(m($r1X$I)&y~M!7u5xni<}csC|YwS+m*Vz2WU zgu#s5NFNH_ew~GglJYb|!!F!!Qv^o`*$nUNJ`wQVhfjxR&|b#_2kakn!Q#{G7Z&E30Mu^|0yZ z4V_6xc!o(qJ`r>4+XOLPC;`9L)bz!1vipVH68`18*1bU!V+eQQx)SLPxAq`t-kXv+ zsr5SHTykHZRX>^LLIB5Nuxd;P=>j|X%$N`~nTF+O)s=aPLxNV8au7orX(wk3JA!*+ z?z&=XFA~iTl;0`a*VfdR&W_3ikmywX6H~=-qBBVQVD3d_7tINIHp%MM;BbjQ}$q zqE|dFu@VcmjpjxGPC3_T*c{O`re@Bd$y!jOzg9VPHEHB1kX&!^s^f+nNH1UIRE@by z^kLYUYyeOzz)lyn)WTYHqjfGEiKO|+#kx27?1@Z&{ygghG20!@&a0#FOl)=;r}ue$ zluQ3wFRpGRg_Pw>ciKapkzIx~4huP#4Ezjh#_RMLs=H~Op`^5AI|~|alsQZ&W12MT z#3(F}i4ryPfJ1v0lNNGvs?W;x$(VI#?aMuXa@(HSzG6MSJkcks7hX=En^^v#s>iWJ z%Ca8Ey`hYMWGe1oGL@#5(k}J4Oidl9@1DD{g5+sH5|fv`WXk1ov4}Bd>gIzF{-v17 z=sP1q0w|9f;V>xvu8_-t|VWD;-%90`%4*A_I>*6rQ#yn$87NUKmtz2NmG zj-E#N1|LZKOf2$rL*c{YOyIH5D0h$4vd7D^6q;;eoJ(&*+#=MdL!Vb&tT}jDn=9`% zpQBIwhGw4e3?Wd8X9Q_KJrujq4|Bj4$rmD@O?0yPa#-tBX+ec`+V@n$xx5Z<9N{h~ z=o#PhqKS7v4JsaS&=zba`od%>c3+-<(k!PX)PZu`g6P|DeaXHG2FBTKsykBHCw<`j zs>R&NVTX<0i_Ci}Ch}4eLL=zBC!YLp%vr>fQ!iI)>WZ;$aTTNwG_=bm=XA6}vJDh5 zPZ@)|>v@suv5NGYTuSV~k*SRDVoVkvhlT*_m?OmODsS1t)_|JdA?NAUW|T z0BsUDg~x|I5}JA9KGnsUkXk4`F7DR>oAcPdP2?Fv@Us7GeHt>b{!~l2rSXWY)t*f~ z49A4EcJeR zoVr3YX7~pmNogwQ$*B*vZpH=S%H<-1dPu!i4+U~f;mCXDsSuHLi3mnkPl6K=sSC&2%~OnGtk!JseckHMj`YLcBKzD&c@jOkny5e{;C9RnVdqIo z$9|6Q<|kS7;)D4%sG&uoi}^0qfi-lrDyLbg*EG1&H@lBs=hh%; zcRl!XkjLY5t`+oN%sWXZ(2+O*>Lf#G-izEL!Zdsft*|~|`8EQyyI&X8E?wrhf`P9eZ zSz&}@^Hq*0=F6(D3J&&3<=8l~u}?=(YMg3RGZw?9A)uuxQgCmW8NJA63%o%5DDk{a?}J~#wpB7KhFF2CrzL+|mU`L2+h|3;JLjeqmf0KI%Fi(~p@*xb{C&`nRmM#xiUV0Do^Fs<;W zi@eH;ZU`d=TCq@a+li10$fkCo@zOK40raWSHq9LS9(ALrJLge3<-LzI+~z)x)x_`h zb%TqjYn274c1kpJYcayRpG+G|If24VmqmpHo&hV%N{vu20mqv#dpn`)pK|w_}gvnKD(u)d+N#ZVz&$%*~xtfnw@GSjEmt>yG}P-*LKj-p-v^Hql} zu*L6<#=F+?D}c+R%4&n=+vrXQx$@o5bpKbWMuPIZ1cFI8@EsGAp$ELUC6 z{!RW(0ve7`D-OHQ6>3r;JLDQ)l*!VjeGrrfunN%B-oDM>X=8pe&e(jkiPBPyAsxZD z8^whZT5gMkvu@thRrb}6kQDYT?9n7zFA`w2t>(NrsGiJo_Nk_9<|mX9Tz_+c@_qmc zH$^@9N~tk6;L`a#h&e1?g>r15c~@6d+I8CMKV4xg=-05hA3Q z)S(YkD8Ae3*xg6kXUXQU{<7QBIyXn{N%S0|l^a8Qp=+}kiN<=st!(kJ!7SHj#i>(u zUS?A^1WyM4UEMx><)=ex-J%iieYe4$sq>GTH5Ucqv`^4Awx60@{EEVc3yKxfK_5k(u!0NJry zm=c81z1-=t{3YrEwEyXk`&GJd2OHidRT5RvW=K?1IyM$p@uD(>PD z{12H#*&0jlI2-rD2|7G+3$_=f00=Xz;wH)+dJS%onDa={;nw&t{x~Z>2!eBK6kD2* z)Uyf0Xz=-0=#VwWG1_SaMzHjCr?^#cjvFNNF-`+n!(uYO*Bgim12kg>hjF=m7eGH> zNT2iVHvM$Eq$xAjS|h%Rhro6rFkT>Qhb{GylVrwKF|S?eFNNYInk&czZ{@9ErG|KR z_URYqC*HzK?Mg*tWBjG0^F?!Sou%XF&o!zqyVWrRqO~pq$DZG$Gv~kd^RY|gzxp4c zO=*y1D-3a8I!<3s63H&bUM*sOvns08YA-0cv{j4BEyMcZ-nj0uFR~A4O5*%7 z@rdj_5B_`2f=%3Qwtz3O%NIAyvqJ`5w-G#y+i|#3BY)d=?o?@)6?;m0O?zLxE#QFX zoj5vlH(NM4bRKxf!AmDmZv#n2b%~{LeEO|8jGwmd zXV(lmxC^e@&Zi@?L{>L4Up=t(0{NBBU_GdORYr^&q7+Sgr;KfZPns5Rnt9mbyCl1D za1T;KWST;)sRSC~F;R5d2RiSL;RmOZxGkLTV#bT@y^5z;&-a2D$rK85uXX)4xbE9# z-Ug3mUS5XjibC|7LYb#4~Vuu zK>(B+52MY8o!w9=&=pgndQ3l$RAxJi0*aSjKPLn%c`GlcNdhSLOikc#`9pHkV@J0W zW#)w$1LtjSWJ3ZZ{mNMch`I7v2p%kCi4zX?lgrrMW_-wUrW>dURMeOaIy^s=9}>2^ zYqH^VUSzzyvSo!nM{Rp`)$ovI%wl}~nP5j(5kqnvh325i*$$i$ z;}6EU-|Xj9hVrfMR+4A1b9gZsV{F{v^gAK3=5K@a%nh`MTrs?VRdmrms7TOkYFo-> z^Q$7H{j}y2>6;_X=gkRWt4BlU8GVTDf&0 z3-X*E&z|?1F-Q%w;|_qV;KrI{E}-#}F_+@D3`}z6?K3}iZu8)FWI(O+&MhnO$$^^3Uun)VPde~#p|Lo-KtNyKq z<#mpOta6Ch{bZC!(2`_@jFbxiUXN7^mX`nE(6}-L;X)#T%{!r){G{oA!K!v8{-B5| zFHWh`9WgWC@sjU`+L@&->rqxNUl*p8!uT3TXqUaH&5HFm2Gztd4i7OZi}U@E8izg| zW_*coNS|C@7mj&TX@>0}D zg(bN&O%+dduWBye2jdrVj!u+!kMBK8`JC6T)QQ;DI~A7pp$!pe_+nEVcafRZPi5U7 zs4z~hYl^Do2RIHp9gY$gU4dguL_$-qbHpQ3eZ-duij(hQoQsc{s!Rm3xcA0lrcGNN zCc?R|2k*@Sbp2zFbZMt7KN6HC4p&31omMFxhLziRm=5_sCu2XMga^vQ6qOe4<8 z))7DyS0+aah@6e=uXfbkEvGXbU!qQ3YM>v9neG>*X*|5b@2kIT&X~S)=4JOpmGB%j zBK3!Z)J2vklXCqYX}TaJ-ZFhZ_122qO83&qV~36Ziw!<^eEeddNSY-ry=IAxJwuS3 z+%a$Q^7DabO_9NyNxOf1Io4;4AFg*m6{Io%0@utbn?eTtri>8*AeKhB!HF(!uYlo@L+{mSKfX*xmDTfRW!#BZk! z!Dr+ifwrGEyuyOsf?@#)i@E=3bXWhis4T`_FFX94>TxJ0R&k}cHz3Dy0qaaN|G~#2 zTm%bMYiH_)D;29NwJE*m**ETX-S?po*_%fAL6TJ&=%>kw#c%VLUFCm^PNOll)Fv+V zc(5D|Ct!2Y1l9<9v-_QCav=S2LfTxbm>OLay5RxvDqWLa!f##Wn_I)D+>o6(xApdl z`Pw|Vwfj{L9-dmH3T-VEPT#c^o}=>1Sa$hJO}>*<)!K{QE7rM4Xuwv8g>iqGt=8D! z`>RI!fJ}~81|+|plc7V*(vJ-U--lE_d&3F>dY4WHdk~rhsu7;#ry}r&s!?Ssrr$n~ z@ADzXM`0|K$B)r9s+#L4Y9?B-mqau~GWcjr-J}8yCj95NJcX1M0p7E%j$j(d*58Jm zDJ!)$#ZPF+>i zj6u`l4O!%M9E=#b@g#U;PHV_TgwavS1^q^{=7@YZtJ}6WGsQ{YtU&V zjj8Hg;Pc<}TU)VM$+4g7xG{gYr%)E&HPEyth6Y zq`4xPh##Bf2p@6cmkSJtJ~1y0$Ui6Sso=`wcLPyXxejF+Y6l|*^LCBarho=IIY~S` zlV%maa_@?}_^<&{Ig+(#4{Io0j9O>UcjtyD~P^l7lz0ck{JY?{2l% zGcz6byE-lQ*8cu_7_wSyxRl0V$qAipmPRW5(Gibfh2-}rjoV81$$`#ihSD3BGU4|5nLVzQ*&rZ5Jt!?CvpS&Q)acRN|3kov;ewpa#zGAP z193=3ote!T(5~nSBX3dt_ye=&F@ysXz2BCyd~zz-Wqb34;p@4pp*28gB$Pk%9Y z?muQKU157u6>Q5^a4rc`G;w7S>lJCIU$caJ zR0r$m?{c8}ta0?$5=9cJXK^eFXjL;L!7BuYBm1N?1Rf#to)@D<4%|s^*s6C zkTF6_(diiD`vPSc@h?uWrN-Brz5<*C;H}n@86UMDOVU@i2u2JBpQjShQ$eO~RmrH^ z_xxiX^oa-W+6$MyDAz+;yIkPO&)*AAyfCAMwqYtcA5O}Vf+VF!rL9G(k%~*JU8nX! z-ru_K!uMagPc!ov7cXuv?Rqq>o?9?|v1F?Uer9cco+I|FH1GBw0+<|C`Z|RG56kuq z{_**q(0l$B@|$=nq3431AQ`*S3-A8DOXaU%n{$u>46=alE6gD^+un6(PJy-DpSM#Flzp3?>&3rA~15M@Vjn33QFT8oM5>!hD#VHHu~ki?aXg2P=Ppq1YaSmZ(%!q8Z6NEG47_&BZMd`}doYgPxn0<#0 zv3xqB$xBCgR6zj4xF`2+EGF3U?-Z1U%hA5IhPVh8SM2el1n?MBkMF6I>lbY99%ovb zZL#5Y1{tHa8Poiaij)i8LZ^BXRmR16R8EjbS>(sWYO4RBBwk^6;n4MvT1go*>4u5h z53D>^vasiKerMuC@w0BHhmLTo=S#_bdmtSAP6c_|!i1YLph2AJo4|ryvoQim*bfn( zdriuT$;LRc)(sy=TF_so6N@x``Y89OCq`#K)LV}fr*?Rqr`7?njTTPCZ+mmI)Cg) zFii$lJ!H-(pOruhuM7vd><~{!6bn{&T}jLV*GTd@#8I)%=-_D0+nFX_=8L``ep%FL zK(0)(%{MiZj%@*vZ8eW^v$O=Ali4O=387`?-=r*PHCW-HqUabf!KuNnH(i)lJ%t6C z3;d&K0{*3Fp4ERnb8}UOl9kJWtV9W?8(&lPR=2(oB93Rc2zig5Z%&u@+r~@Y)oiTn zn0-1#YuyAW*HQeYdg1*X$F3mNLXt6k17X~rdm>Eq_^d$XCP$6^Fc{i%xZ7y{z4&%x z?=w$O@H9?3X>o@c%M%h;yvbFPYL(T!X!9<}L;kHAXsnIu(3UQRD!v)4u%vue94PLl zgI;^1X)+Fk9UEl*YKBibK;ty>x02~Ujuv?s#hVv-#ut?qgSzc%m3(%k61=a=yn&E;cwP(O}R(O1S}}Mm(c< zZ*PvG>%VXRx+OKK-R2%y-)B>M` zy()}ONzXP-uaAHlzBfw%l9WN^x+C?ga9+fZ!n*Zu&IfFMMSP;m#g=~hi2;`Uu@Y3v zX_XBGW!8%QJLj}Slj8G`W=Iw!rA$A37|9I>@Ks$mr-w6um8#g+MGZDSxHP^KHYzI9 z?^V(YOmv2wyo7>kQFo>EhTUdqU*XS__E_=@_B!=mz=2qLlnCl?&H!7 zyG!Nw&nfunI!3T#38`xCU5E-++DeaG6LzcPA%|V!M5QeAao)T@_&J>#4uU_6C8c&+ z)rp5caV+k%epEn}wQ1zNG$j)$2_qw>yXdX2Tg_`l2)yhqaa%VZzzFf;?iTlTb6#$*ZWk=O=n)o136NDPnXt05v-biE_D3s_ z{RbGHbiF@#-uFo8HTF(lfgsV7`Wq7DS_F-liKn{IBNsjsM&{;4o|}?}h;?T{?VG*p z{0ZHHmsbDSVmm+3hd+~aS8U?#Qn)E!m`YQz9fE6YejhIIlq>(S6lU@Gvp2TP^r-%@ zB{?G^#tgy7J=ZtE&4xvmsRiW=?d%Z5T8~XcL3uho$Pm#sE*ShlGg~e+qBY$oiiM$7 zGHg`axkOr%5+2Jg^V5H`_}S<+1F9F$LEF&i1>cEIOpCtLp?Ry*b#Dr3EE?Aly>Lui z)-&Z=do{IU$EjnrBFpweDoD#Z~VQLodatw>YRr1kE^|93Uxak-wSkQ zyjk>$!%tgUs?za1^ut(|g_?w%_zvAd%q;n6qIlG-2pmJW6~B&# z15{3XVT$Z|+I!Sq@jdjPF4}*JBg#9>WU}_!P})j}srJ3DwBr)uoatRmt7Uj<`21~u zEjiVoi$*Nx@h24(AQZ(P)F6n~;Pb|rM@leuYRuE@P7f&92t0mzekeZ`o5_jYy06IW z>TOzOYd$#g4aiV>1dqfLO^aJjvlM8-qW7W~%STtFA`@u|0lcEZktx4Ql*Vb=u4d}P z(xK8v?Dbj&CFc+ZluH@l97O>#qlM>lr(`r03V(r2+2h6_{ zP#L*VfJ%?(?NH+WW1EdmTrCVfcd zX!)y#w9Xw1e+iBdFk3Y=r~1P=DwM*Ej`7>p5WIVmx;iFlh6^)p$Z}cxJTBqX@wvvc zm2g!35?r&O6h~=^eo4-Leh za^iO*T#Nb(QzNP4As`BZ7J2SUHl98|GP2`TrwGtc={=eTte#qIsX2V^8f`yE7y*U! z#K<>h>hD#Z#!e+~Jgvqf9R0faK#lhzNzVPk<&m*=gySHK@0YhC8$Wiun9OL9M0VJ? zes2M0+nwALmq=F#tel(p3333`+(7Ui0YCwBSx@7kc1XDB45}&8{PL-$Tl|! zKX*81eY}~@QbyZ^WEDR|$Ob!(UQ%vNIbHPlN3ZCr5H&On-FQ>3D*14YK7xc|Y%n*o z5znWPa)v{xQi%C%ZiL3G@>>Xc{PH( zJJ=cxKaa!$M%6R}#^Xp9_oLo|C@r1+JtkxVy=p|E!wr{i@DEtKgYtFYNb@*jKrx1m zZzlX@g}bYggd!qFP@=t&KU38x*&lGCX>N$B4OV;+p#SqNMLS1oySLJDCr<+&0r zi)Iz5&sur`8_m>qJ#)b)E7uf^CdvA~c$k&oq5Ny_z223f-q6QIpEv{!^-617z@=`@ z&ea32@9pnCAGCapgZpLU)B|4yvMe)?a(Fg+x1=1w=9#LDO) z(E^J3iP}4*=H?g0R51$YHzD6Nb=s8;vZnQpc419@v*ZR+1(F+N4sg;VhT1p?mnquv z(HgoEfxQo2=XFOy0mt`KU^X)4ZpcY?WdwxWsrO$3DyuX15~txPeIC%=F8wjz7U(0` z{8A;6Bao#JzneqW-5im^Y8mK#hHkrgLhFQEAjzWSHgd(DZxDE1d-5{Z8@fx{bR8;C zB#eUWLP#6?yuKHBF_4v0;r+4xWS^l^g3D$csHRSYr7{q^XZ}%HGlFuY?+(%PeIsQj z_;rtMKkK$|Cy-Q1O0M+#jUsWzl?3XZpT{^%mlOZ?jp(k27I~D;6^+kOIy-P?1oOK9 z2wR@2svxz?ulfjL1>S!RIBehi?Mj<#G5VyNTD{s0@GJf%2;iK)X}H(pCd4BT+wdFv z9vQFo7<{*WJ(Z4P!rhv|rol*TatM8I30|kQ?$wEam86v09TwJfV>Mrg)Ap{>F$%rp zPC8)iqZfTLG^NUd&S-ee!8faxa*zpM{8lS`l`{(k zI){1q><*wy7B-XbJmY2nTp*Q1(}I%WG2WUs+>u0!zNY%at5AT=9=|2)Im>%hFVP6~%qL$pJbx@>oJxYdeDfRF)?Y6U5-dx^`02iDlgLKi0Q|*YUUZfny9=3r z2(~)mt)Xoiz1s&0#Dupy;O*PED6#S9ewDn7|61AV3zxU?QRlRbAtLaSGg&X;Qr=he zXn+1S!r*hnLU1e=bSTSZpqP~nsk_*7 zg@_J&=ks?4A1AeZRWm@eALZLsF2(dO+f3L#@SmfuVj}rwmaS!Yr+rCpR@>|ro5~ti zuvG4#P{F0LvSYEQPu{qm8@KDzGGg!*$nnLpgk4q`TzqPV-{6s^luzMb`^I!C(n< zuOKJf1EPW~0Pzh|&B^gbQawUk8j|@B$rub2X1e8>K2_h3f{=&B)?u*5W7I|-*I)2H zy$wb|3TsPlH+$+-6ZvVkHeBd$EyTk=J%dHq(=J9eC9qMiV0Ax`QLi()TYNRmeoUf| z*tjgm(|isABz7&)0{bq0N36P3=-=`()xCJhgO=A0uTOrakJ{)yIj&~z`J1ehdCGjr zmpfF=kNEYeBE(g)8jhvV%6E6Fb&R#_&x2d*22H3Gmou1o<99~~tesqmdq zm^gcbbbO8XCyysrpW!`ye2*E&!;qOx^t+f*H_-Shw zu6pHdS1ZK6e_a?|ESfkG4OGB-;j;7n7y8{xoi0}j?{!r=atjE$)&!+oI?_~p+77)a zYR`W7B#i7o3>N`&{LG5(I6kk!9|RKA8gmEucIusYhDvi2>AAfiEVNyH$v*Zlz(4)9 zd$!ujuT!|H)NXD1;(C)Sx~u3>C}xpb{`*mYYm#+YAtS=kqeOW2(>Nm=J&JT%l>? z>|&i zyRf6#`R>Y3&V~8RY{_+LZh9K_sld03b-kZ@XDPK7;MX92+_`D)0vht9u)>X|jAI)V zarXLz_uDCUYm~X}6`p9^;fuj$5;;^fd;fu^<_9S`iC%^3vCkHG_ks&6P-R%?0IxhZ z5C8r)h%x*vPrYD$A}y+z@ayzJ{R8!?Bs6VT3FjH6vS3POWt}fCn zMAQ|=>`9x3?Hm3gR3-xsG9#NiE?jPY81((+O6pZ3M|0zOHfP~9oW@sLYX*cIkC_32AI#KVlX3825*S};P|uRzu{Pj z@x+XuN0%^MPx08>WiRx1kg_fC4}xt|wgRRe0?gVz4|LbrNCNh?rH3zgZlUwgMZ*sX zO6o%x-;hqcLoN>#+abLdpV?CKZ=VETC<9*OysXCc@7=qpHi?+*pz2Cz5kOJX2rfpY z5pP?8rxV)R#;;ClTPtiZwh}&|wPMysnq@C1V5LqlmY+SJ+2B#qWVpS6@+3aMqpGT7 zM+0d7gr#YJxf?EuG0l+Q7aee!RljsU5;p&HSLAOlQvcn!DKS8ys*~}PyCj*z*0jYU zrv8Xp;n;)nu&pcIM=PLGCLuhRJ-oG}+*Tr!2L**;bAU;#m?pW@S^a4ou+tGeO=sIaSqqn+fkS zZcwB} zPw(Tim`%$l#1T8ERjKq?52GN7VXKv(dy!v4 znF8+w%F-bz`RD*Y`e2&dzb*{AUk==KcGoIBc6YOitp=`$LX+8@hO#+j-Ceu+qu-bx zp&aCy(0P=->2%OmJ1qqPXkU$Drl(j|G!xL74);S(=2oF3!s>&7Tv04IY5P zM6^Lp`5x3&DLrZ>rcQzKCz0g4L<{YPuEQK@!2TI6>l);8xZGL8z}eRffwqsb9<2g2 zE4mK{Li?YxvG95P1Tz!PXXdTr*p2hawzPpumE_VUSC}!-g)NXcDZSA@$f*hY;k4Kd3bEy|yy; z{d|+spw!_z!$#R=KR1Uw^tyk^mUJ@!%Fi*EfB`g6e3XS58&Oz$I`3E*C|655){YTI*KET6^ycL5xE1lkJQU9!Z$swiu8a>^3(Sne zxH1Yb3C12CNshzzLHUBtQS)w8*kBOO?Pb$Oe}Q^oWt^3e;H}EEf!(!o;9(*WY6OsB zaPn`IkXb5=mv*^4bdhdT?6=!pz4c~|lIz68nLTgU@8mKg9D$Z0XBeFQ&o2bNRBU=e zLOwPVXL<|s)~^efG6n?N@LBttT$vAMW*{f~)(OfKXnC7608+vA;^THNA-CS3FjgaA zn`B~Nm=iyB>Y<$^J|3S2js>7(;wHlbhi~t<-R+~ZdGkXU_jL`2{HxhKC2pCu+NgS@LAkcPvP)tjFiQ(Rt_hQ zpa97&-uB+p2uYW;VTjJQ>L6r!y9J$}Gwc^`%5?q^Tfl92D5rLScN1(<{AXqW+gP&LZ$A z14?6yGml%vZkAJ*CGOgGNvtTk@R%V^gxw1tgt!oDd>?64GkSh9e9-j46N;id`5k3N zS{#U=lG5^vAJh>Z^tif3lhlducK&m~bC@W`+fd(8>q zi0lFCu3wticE}v=A18>qs|38Ye*SQ{Ur6^?n1579r!?y?+nPP$Y20w`kuvtdUe+vo zZ_6QmbN?$O&Yr~pK<|T1`%JU@k?Z@c!g62}v8{^ODQ_YWM7uWy>^Al!e2)q^IeQ_G ztzat?QKz3Bx}1u*2hkf@b3+LBNagbil$O7W9dl2Db8AJzDg$J5A(l~gPD&Y>NW`dm z<@s1=C}T~&<+#gP*ta^rkl=GVjml_rDY2`}XB;H+soDsbUO5=LQ#Nq5J}E56-m}|& zl*RP%ob^EHcg7{NRSDpr1-Gdslr>ZH(|6$qsl8GYc;E}$&qg@N+L!u5I?+9t3S13W{9yq@hDvvqEH0_0aOw;f;s3tJGU`{RfD5k7rC*uByCeb6hjqWHv?}|Nyx$~#w$~cajgsw=E_lX2|Iyd5j~>^ zN7s=*%TPmFsoFo`-D7G`aL&GpjdIyia}A#(a`V?J^EGs@uqHs_?05Otf*D@@OKo|R zw~}SPbkNF$pBJdqOdIB)e2RRtWSTLB=H&DMLWBev?||z>X;Hjv`c?5-Ks?BwW@IV? zxJ`^s04hHUV|bK}hYpA)w8mnK?Y^Eb?yubZ_{D>_IACO?FB%|-O@KGD>6$VZq5Kt; zJoBsmf`LTi@kcEhE-v%dvHQGnI~dqJJd+|GWN5Wxl<_TJ+#%1-QaWYZKYIam)gdZlV=8)^O9D$+tYr zEAxQ1xHf4*Xgw3=v1Jr^Mgl?IJcco%^5J&^1$-l20LTg3aA`aYezg7odASAkdcKq^ zk3FEKNWOXNgr(xiAQ}GgGcrBi6XVy#qMjn@JIYT4H}2uoJXrCtOaZN_pS!J_4bHme zwsJe8_o9PAnS0Q5_Mm`U=-^y<5KBOSaLW>4EC@=}9NH1ppreS6oCn{lp5ksuV?nD5 z2aAqWjgiqk`1xt7#>ypbjoF6`-U>=Bdnw8^ zTu(@f1=LDw+PN8w??Hq7)>^Srbs^Kg6EM*?5mz*?#Abg~;wr&jZbY2XnTvcp{q;&_ zu_{A}i(UyX1Zao3+nO&dMr){ew0uBUEoe=(b%bLa3Gb-=XmeQCGiudn=T*z1^ZrPP zj%L=Sa%o6D+*l_D;}9WR@-{%xG1aR`oAkHi$o>>WRB+e^H6_)Dufe2Do1wqn0V5|L ziERajI!c_t6^R2z%pb}@;p5qM+q(8^Mu1iHZ$S~GP|007!o)06vehzt8ih~Bo7h6+ zQbSJsLjI@_7ikj+^hhI6X8pj1on@vDPrX>sA{TZYA-L4XUF!< zH#~~6d+@J}k1QLvgF|`ZQ7EiFho9s>4cFV)5ocFucfvhgbce03$`O+{HUke`LKwNp z>VJvrZTOrYQJ!BsPt^N<15B*0V>Si&TMUS=fqM7J^$qma zA11{7JIh*;2Aoi?ydC7K(5GLQ&$Hd*Q=+=u{!~T@>d3C|Y>U@VP%qT88K7JO8a&H! zv@q^sIv@d2uQ07sBt$kM15y9l4ueU||F(@WrI0F~Q64v^W+(6pw5$THvki`7-t^B7 zdiPMbC;z#*|JUsf-lKre!@oxnhmYI5lg8{pRGRjd>xIgw?Q5I|rC>9`{Qudxf;!#b z%IGX!Heh7pKi&f5@1%54{&mLZWpp`z@c&O6U=T|~89MHTBx^I<-wtbVdTgTObNi7< z$3l&>s|jOLWpH5T)CzwoK)^A0-RrZzecbIr;ez^SCxCx-l^6#_KcwZudfERuDSS<< zi|6nwHZ$b_)Tt$e2g|NM5Z5_Wxfsk8WSXZ&pTb#r(C#t9p3rc(#cdu&S}{3yV>dYf z69FE2F!@ec7#3;Gj!2>}TvKXHfQh)Ud2fweVyk{S`{xx0PM@*G9wK_&z2TqmTe1X! zM~#up@?l2r-Y1Z=7_kqgaKWs!xu79sMsU!jrg#U!miG8>gE2uqVJDZbL)Y$pwXNs= zKRA2Kpt!zmi#JMw1P|^S+=EMScPCizK;y2#-913#?(XjHZoyp}*SE?4oO{o^AMSni zzND*C6t(wejb3ZcIeufee=@TnMhj(GBL6+?-^uiv%+dk*4|}THwUXUPE>ZeGU2#;n zI1Z=&#tQ=s(#S5c#34hZeIsqk>i}^MNV%R11E9QvGsB*q+20p_JNx$2=A-`uC~x== z@_u~^cN{A2M6(drzjLFChXe=iaD)kH+h$e>cVrjF6F#j|^cD*(40md{r(;MoWt(m$ zC*F4+^?sCt?|df@^_qONP`(HEk36I2rc|4WO_1zRcSnh=3ULn6g~<&y^>Y0i;J9p7 z=SRKEjXAJM1#dBe^=|d?zH-e``5iST?pvAZV>o~Zd3#&AGhI>V4B?Q};KW`@ge}`)c6DJ>S6|Z4nM_%wS2>)QCM3|Ca3OwxhB2S7FTcjqx$I${3{2F^berhVPz+hpe)$>IUnkH$*( zg2yVQ(QCU~o5>OQp6dn5R>5y;tH%Pt+XmPj{xsVDLAMf_9eR{?KH4j^E9E%OQck#G zf8sk=lErZ>_HH;tHb&c&E%=+)rwKW1vd|9EDDP+%%HdWL9~p|ctCTopz^mYA_<_s} zeS+DoBy`=qVrQkQH7LnFZ_5}xMxw7;e84< zQsSDoVQJ9QpZ|>?M;g6-bqGF5`2T>8J1ov6;LN-US;2nv-rAHd$TMs6gP2=^9Own(Q=z2Q}tRk2nFf7^YBL^UmN=2+eQej*W*X|u|AIX$mTvU=Rps1`6g@j> z;!nL%KP-oUu_(mL^a3NfE_4-75Nh$cg>2lJj(Wcj;BeF<${sl+D>^7{E3Gx~0(k(m zP|2lkFL?kr^;$wyc<IWpPRUi%0zRl@*Bp|1Zfn+YQ8-GlFi8!waB!Nmout4 zhWq)5#wC~O9p^d{!p_{f63t_Wb(6pIF3&s23taMrl1bYA99DM=1c2a6w6#6%u0505 z8T@KkN3Modq5y(-IUs4wmVt}YVkzG!j2Fvh4voR~C8qFIRb#D5R0QA?%svp$?|TW)GbQ)GKoe;)pu(snj0YCSpi~62>>Tv&#|vE zxWspm0>3f};~T6nW>h~lt3aS1LBxI|MQl;Zz^{QH zieKgPlFx@!#;S%M#SpV(qS5d~4t!UO!FbBP9GoX6$RD+&-*Lt{icRcLG@gW~6ot;n zrCL%q{n4MNrv6G^Oq{lW_Rm`3cvs_`(ltEeWn-vI_$TYGXY}%?5&^1Bc^TDI{^#Yy z^4;Jf3uVbp&?^MKY_F{~lPYx;u7;1Bf|Ik}io78ODa{ zmZi9MSA;KA&&wz~WP!|E$vOy{rYa8~@>d(2XDIDgIFYks{dGuE>1 z1ESD1(5g4|U{2fgB5OAT#agJ7ZA6abtjir|+1VJHW4^e^${9`O#Tq0doB*+e6`r)phPQKA>5AwErxUJY354!{d z@C*@r|J};DX5nQ#^YaUYKHzq|=(yBkgAJcPEz+!@+h+AD!P5=y;!^3~ggLdKy&^@0ONLi zyaLk8jGlcnw2XN@R&a18+^cZCG!-@!7MfeCiPm^K=AZfAW?N-&DnnIH&BlbH^jsBt zg4Ue)(gm2DS|HI}0GIV%4Bd(ie-$)7{@D<|n^C{e`o_ERp zp>nHGk{}yVHbo^HmFK%O*`IfB9==-~)B60NjT1}{Yx&h+^WhI%i{vclf;63rUkKk~ zLrSBSkO#E!J(o35(tC#hxdOgk280eXU*R6B$k4S?(Jr4tw}er)+RMyc~R z1FA^6mlITbIpnPDU=EMX?(zpR`(KiKKy}-oYFG{@wnpl#FEMiXGjUz3a+LzbcnOM& z?$UYwdcpuwRNeEWh`wyCwi477c^sXra8)J%edr=kS~wN*B4J3o zudy3-o)Dc>YV9YYiqGtD*0GyxCAh31)7BIRFMgp_=_#(NH0TQw*G_6S7y6k48hlW6 zr&?A}|3WRlbT*iqM}kBz-ROevHM-z|NS_QRTh6Dp=i7es`Dl}+vg?fZ!`F6igMESJ zx%~)25XTpb#xZZ@>-2FH&zgm@Ri#(s(>Aa7X+sX{7`P$YurzK&{ zhLfknJsY^E*R_n=H>-@8Q#++F0Gp=mrZ>Y=SdtGrB~!H0idTSM@{Vrl88t-6w_$9A zQ7TZ6qfApztR#RsfAm@M=+`y znPUKE*gv*hMN-3qR{JaDoaFcCjIW%+RQu0HhitgJ6|8_@2JM5GWG6F4ZIt!Ph@cEC zb_4FNe$&?#&55_4?73HV9e)awY3nNW^iNBrrY+lJ4 z=;W+MHVqf~=T(8!dWZVRHeNCiIAhPtz;Ayg*3i_yhaBX{s^spvouZp-ij;J@RO?>n zd#f+t7E~c>tSvXV!CJQ}D=X9$qHBDLYkA20IQy6UR+Q`yKtPyy45B-3l-_-RVGwKE z2vy?MCwaVh2GrxwyREj1WW6@1yd~n=}1yuRU;IYd^6gmFQ_k z(7Nv$uDKa(e>mM+FWoXZy8fStHkaD^3!=^ZVFg~hii{^B{5!{MtuFM*%>o1ysAoLO z>{21cQyl#L1{r-{Gs}Y~*-BA^^fnQ(lqOp!XBijRm+pGDi4f{4^!Zgkh2F;ruEPWvSZyMTp{qa*cw;dBl&cK2nrFkIUm0`z`gjIH}J_}p}(DV&v|+ID=R*g zY5$AD0EMzk!k0t4-(fPe zJz7}`@_Fdg=`+-e$#Sl@&C&oK(URzDGD}v4QieK47&h{vTf*83Xs1`PHJY|j_Ibi; zi;UoUQ{uCDI(~PJFVTzLroZeTUq6NLwtDg~twhJico@_JdOvgu<0~9-P_#h)D}R7E zV%TA#Lh)6>tW`Rf3q+Bgf5B0&dk-PW12)a#+!0nlL!x#@Yza7b8MPe{vhlE4atmI4 z0Jlo3!k)(T_gPtMgv*TRWk%j34bUrJ6H=b!Y_f^tFI))~nt{!~&&s3;Y;9W}ZGY;! z{7F&k00q_!#anzFLKQ8ks57LUO>0%ZY6jTkkjAq~+7WMue53`|#CSRqRRd(&_^(PmiVQh1Je zG)M33#zS?=Hc02uD`0-)Wt0q2BsUL&EVqZ|FXa6!8S%fJYSi)`|{N=NZ zsGKhgO?7`M-d+j7h7(>3b%M$w-YRy+nI3m$vO4zHQ-CK#rt? z{}6FT^nmT==r3(Y--*9?xKdTn;yY%T6NR%Sq2&sv{~*cvN_kIlT*E33QYhb6-sV-Y z16#8J6IBw}KZqCSC}tC<)~tfe9I0v`&w&!|2Qh40DV?OB|JZt>B7KYd;Pk4Hj`l*s z4+S^04v(WAJ62L!jzsBMa`M0kOMY7YFY{00^K^E+A=PL*N>b<;%Am~SG3VYoD zfW7Oy&_X%FPF@dQIj&|XrnVU_cSut$yAgn{47ad@)3QD`ZwMx%Ntu^gJuL{$xjKHh z5H?tas?$7u%-(MUx)0TsPL(32?y+p@n*`jNO`V9Kt%VEiI#L7?x6pw=U(LTlC&BTIw=+{L(RVBfkI#d-k^ppR}IiskwA{Ds~Q%`=DB1#s1ZTL6fpw1hrp7(Uw|LLu(mHe4Z z9XQc7P=jpkt$D*!XKM*AudDBLD(ihwKBI_yeXcWRzlDC7{`EpbZpc&Eza)47Y_A?g z9ad78k4vZUZ_^-d#Z?Z8S=4+dru}Tt&9Fg|j?YDO(eqz(!zJ8_LG4Uo<9e|??!w`v z%Q1UdTHkUmFny8IO6r!YR`Sc>Ra!IS7Xx^4HoV52Vv%l-;4=S9eg^~p$S(!9Muu6# zqH|nYOxV4ocDr6{#e>zC!%6-!9#wcD)CH}gRS^dd^O<^VP{(@NlvEv@xsmY1Lhu+Y z)cyRJi!e!Xqkrajgrix=QJ2VeaxltdT%V7k-jY;*jIRA{Y2=_db^QIPUTqs82SK6x z1oG;7%Ai2ZPs@IcHT#VjnsT>!V0P`7@mTmREfaqd_nF{pFy6|Hv%`FzKR%_N4A@^ShzdTIt*`(*CTP=H*DXfkv@0VfE z=`!hRRibS6`RKR!;tUa$BV}4FkM-ila%&W0RLppTfcA)B=JZs(#uI%M^^n$cQrV`6 zXKLx}C;R4S)`!UxbT($k=CW;J@n+&vQ-$WicQzr8}>fLq^qWsU%naocd!my&v5SvVo- zvAl%`^JJw^W-fB5abl8%|C{#?-qrXh^e@_*PlMC-dFo60gOr;^?pbq_xdI+F4=~ap z%VS(Ulbn`x%d7+2aE86+O*TRXl%PWL;Xw3M=?NZV%(OPi5_vR#-&r( zy87*RnLQW4@5voV0t}{+_k<9HcUp83bXBIis$dyax9)!Hh5~XZ^mO&7} zl+I{dL$Gzz!vEZcY&sPoU>B_hm}NB)FSjcM9uS--*J{U>3#NvRvqueZPwx;vdTxu_ zmljLZhddN4;fv26;wbytu`Prn8K%SNWj*F?PhYM03r)Q>3ga_--bb9T#nJD+7OD2H z_9-oE9p`nBYXf33Vwhw#BG!f;3NFs{y9chss4GN)aySc;jVlAlwGJo=Fp0FSqDc@Nm%t)5_jY$)x38Z;yF zE+>AtQqvjlpwn++)O2O0$Z608Ro=JZ7BC5pVte=luy?ymUGr)!Rc^5B59`k#Z{rn% z9g2|`ZH_wweUMl$QxoezyTWV$^x*3MRPA4b6_|tLM{OQ*%q7}IuCUr^Ma56fM!dB9 zG>x+W1GQ0;V`UTu-3`N0`!4YrK*uvMszP-Da9lm|BFGwaET!m|1tW~M+c)kbVsX_d zmA$wPwKYiYFmUMb+SoEwk znE%;biA?GXEB5sJ+RJ4)*71;YEyY(ycf$}O98HS^`ov6>znHdaxPa;}!_8r!lxdQf&uQ55(Bu^t zg%xW2;Q~>d_GFl2qBFP$oe<8agPfzqvx&V36&wZC(^%~#8cKH}GN&1bB2D8;YW zvCU9S<#!5rDLl$t7aq20AnG{h^`t%IznDbB&fu)~P9~hi)8d;t@*} za;=ht7l3xpwl9{zZ^Q}ZM%?j>rj>kP>osm4%(?5i-8C&7w8&W<@8z*yL;c?1u9i$0 z-};}NcQ3CA=>@=ex1YG>@K<+S!w8>@U_*Hv!$h@CrJ(U)+ORe zb?2NKAlVfXhXM6ptn-Q^n400NOkn2$-PyUit2Q#4gGA#LBa&k@K4r%XdYKvK-+@WZ*W^k-!=?FHXn1d*stikHf90~tIAMf4He6*A%YdzT*craXE`cOd$9)nCWuEEyWHGKGB}yE% zDpc_=*`4e72W`yH`|VAHikm2`?zi65trXb+WFQWAX#bK%KirZc`7&WbMIjGb=*~Xt zTaYEl@~r4vKZh(4&?FTKX3sxOU2#^ak(?*Mi>2y|%vid*S?1HewksaTgQCG89Z?aM zkQT~>fo@eSL;Y}*E7*ZnsQ=AG&g(q9>8r&c(XRaTvSMiy+TjU6iu*>Z(B4Sp{9J=s zxC9(@o=+||-4UU-FA|Kr9WVVW!3%4w%f)%=OUC7Pu{v94kkx0XEfh5LFU^E=@J*Mj ztj9}H^Y0&B2-}f&)s|+Wm+IeTmrBgi25ubINmZ@@YwNDA;0b81<_mvvYT~*f-2)h> zH{8ZzA{rslftJ>*ybLfwPmHKEucbKL8_)&cruJPI^nZph{OV-MOtT8`vgBCq% zgVl7vmXZ`&N@ZWApzVasow~H+YCr4{az5p6Q(@-Jp3O@d7Fuyg*{aMl>oU-(mr0Iv z(hCGu=h6SUjL5p|7t_nkwUt%}ib|=ixjyHn0mlNO;954r-X7n@XD^;QFhuPhfq)3) zysP;P9eEgX2}#?SZEiBqov%LuVac-wd%LE?LESwAIo++;f zu?{8lnEBq(;he}-E?X@tgk;kZl)vyhF*OTL8)lX%n(j66gdt+Q`9>p7rnCX|c~{RUOBY$}4tJ+vKWN9;;e3RPDF_a!&xq{u5b<5~`L+(Y5F@GC2v z?-#f8l7+$OijH{~XXnd|M?^JPs}j4*A$B&h^No#cO0w4_BKX4H7Dlal$~>IC?};Il zmFJ7ND`pGm1Szi2RN6%kyMFz zvXsRwVD#Wk2wL%rs@F>=YLU%p&uunuoK2k>wu99s@Rvv@ed>TF7_@bEz@XVs+p58x z$E4q*G%go!z?AVIZ5DHf7}|h_$>|ad(td{zjRIE+5UIkk+J%exxsq~fMAZr(kZaO& zfL)C8vnA+*_}MjHcqYEYvfTq^#M1PEO83cUMO6X^nzg2ul!6*iqzHud}{Y0|Ke!gCk2R=6Xc`qF^knCR-J=%w({4WXzS z2;s*!ysA}4qa@lz{jPYWYZXM|z1kb=sjMPy%xafYoE#!?l1XWA@F_NtGF|85!pKxz z6@gL2SX8#uWhi8PcKgp`?x}>Wwe@SJ8`LAI_LA~VUL17ZJ2w*7sftwPHgd8SjB|7b zMynTw;K>7s1A9_nc8>)D6V&FXr9&lRnDwnwA@eM&V&b%?nB;@o5_d7oU+;eCTr|Z3 zCPqfUuO-%X&k!Ekgty*on814gFyRW$v6j-Pp9GAwuV$^r_4n@X( zP8dm$@$LK!gd;2dt@~9>VC~4=@SIc;2RNc%r}Bu8glQn9Hm<1$UwsCTeBcCgfWlBq zs_i^Nwpe2Ci8mI`uuHD=MCOZzykizO{ZEgq?p5w&%xRAEE}!T+|FAx=X|0O9TipHv zA|8cB9Vt@S+sNkET-tjpR>A4~^$gd#z0W1;y9&3iQEMh;<`cLuXN?XFl!2|x9d1(9 ztzYk71d+@=)RlS`F(Nic5wTr7mMnX8z1dQTgNMM6MkL zY}8i4Di<}~{sygZHg+@9ARh~UqS5~g(;`6Iu@a~-iF@0b4Sh6b zb8d~?U^XrwQi86lcyXA6k)00z5LOs%40T@-7}mZr)WLsSX}7HHSOG6)PBoGgm04*@ zoz9c%vASdmuH^=zQEQXVF?K!JGNFihKtThB+!E~{0^}*=nG5}JS>hP5Fm54i zx%O)FP|hY(YKUWDD;+2CNPhtzT<+aMAalFYK(5^@M>9!wz08&?P7nODb;@@!%HzI5)nXrNRaS}`cCS>IeWICN9X4Oh7xayD zbvzlxyovaJ>7-$8yMROS-Si;Jzf~Lfil=Lq)t1;h>cyAQhR6j8Xe7;GuD~=*AAUrL zH+glv*+(9gp~<*?_50(nu$}K}4c+IRU70da*aZD~6W`hpM=Xz4j~MC7K;2FPOl(s2 z3|QHXD}&DP?bn>?#x^Yr_FwM0EN2ZeZy`3# z)uWf@K(OsMa*r^svd0^(BOV~sLj$6*CYD4N`nmt7NmMtu*VO*ki-CH(ekXJOLbjob z%`yDs4w~zGji;#nU4wSoPK^5xUKiq9L4zR}HLGCNQSOt_u1|IeHPG7ZIIWH_1)qMx z0I}aw6~=03gu|(5vCJ3dbm~{?VF+7QL3XPw> zKXgY&kV~M(bR8iiuW!~&?^p(Z9~I&S!0Z0sye?W&o5XbnZ1u@?ZUAdt@ZYSp_Ak4_ zc`;aY4_Yf-1=6rM*#Ito+u+~zl|oKZ)mDjj9YMI7S)h6`%+aF3t^(|uhT|`fZhur-ONKN-OQcyl8w34@<)|~LaAd@sz1fq>B5%)jm9fX67DTkit8{ygVf9~*J ztZ+`aScucfqY4SWk0BHK#)sOT8W0qe*M^TMYX-dCi{K0JcCQ$sG}~;I&soLH zc8TZZlar5{rqw8;phE7oZdvr8uelSHjL_rZV)!xz3u44rnk$)B%<3`k32bz=f-Ag@ z5H!qV-8GAf1>tb*7rb;Qh<;CQ@RvafM1R`kJXRmO(?Qr@`DCn?Ptr;*)M!-YUvMr< zPd6#D0{`9B)0y>yblLs}M_(1oo0_;gELvoForUH(rjPAXXRl$#<_oS$2AX$uurjPc z1;YdpqS(R~A_%=rcjwNS3J2lm> z+q5k`J!!4g)ASw4#7!~(E^s{$T}rl3)=f5X&zSx&D-S9wXxo%bI1s4wISzWOuSItn zLw~;BG1vNojrx`HLoTSGwrT|hJo+az*z$L=+=q39bjnV~*9o4x%3$71ZvfT48RU#e z6$k@S@zb~X7U-x-ymv~ z>}!)O$X9}h-hL?R71-fg>(hIxA&v8Z41~9Z+cLpTt4Hk5=4MQ(YYPfjgx<4itVyn` zMvg}kOOauWXYf8OKr|dHtWhpw<2m($6y1-FoV@bn)2=erp*(UlQsCpRH>)RUYDuQ_iF#r-jfY_$aE7|Z>AXM zaIvMXHl%tKVrz7kIP{3Oq8WdlM9|_b=;qj%7CYTdsKNSe_BZLXs6OR{Ssv!^gQcYI z56IIh;Tcbuoz&*C)(mNnKl?5C=zR|;hT|` zDPAXus~^qf+!Hc_mYeM`N5eqpZd0~W+hq-^jq~nH`zT6qdNxVPqqh zH2d;R1;Vi%{+YwKnUkrhV)(5)Q|^(9U`h5fxX~p4w3G~M4Fe!OVCHE6u}`L!=AAe> ztp|DU`)H?Faf_@M2-!5{QES`AvUHWfWN~d5CB-cFXOhD5(9R#-jPPLpKzgQ>V4+cS z2BZmgT6V6$v%^3bgm2&rHCWF9Y
mokaBRcn6GcpE7lcTv# zzfr}G`O~zzBHE~&YSZu9M^?75A*U9@mALnL!ZS9F=7NygHTD=}<-aVoy(rusX^T~g z`;+mRYUKOrSSLDnScI6OPhIbXhNwP6;(XeN?>feHle$6?wbgCuKa%0Ey6U-nHCZ$t z&zU&V=Zw8Y9r}%bU(<6co>@YFeQV((E_qwBTGXy3Pbx*9rop5U=9sWA`e{2C1l8_# zJNPP9fFuOfK{kV`tn<$wH81Q5=&G~)bfJ9}n+7omZzHZ&G_BShEy^T;i zZI`$EwVPqpkvea`iN7Yr8jy!?)e+$gJwMCi^J6qy?*5NgVrKU6W3@*#+?&)Yyf5qW zSaeed0gsN%HuKxwi^Y~%P?tCb0;CYFNuG?S&CwWGKVJ4%1KSjY53P?jIkQl?VmOhq zQ=?4Piyc&-8u>TmFLm}SQw3;aHupv)55D5fT;6|HlB)8qm@}HJ&HKeX4pIw}@xxgD zN)?4-z~K`dQy^|Nu{4u6l9UhvyP?Vl*9)r+)?My-bXxpTXMLX| zsg)OQ_m&lS121JjXzQ=FZaS{fa38W7PjP8hk6LFM!O?=?)

sCkMaGpo(tFx)`qEmOh!`BHrJ7~j?g*mWNr zIka<_yh`j+{ift3XQR{vx3R;%l0(7@;v9SbzQVTnC~~6q z1KmyOYHc@+QBZa4FHeD58Vr5H3^sP!grUMIYeDrS*qqjx`4a=457#pzWFSr)w~Xi` z#Txc)n^%N1>hb=8r<0nq5<|^(Orl%cMKrDakRYjd8&gB(8~SX$hb9L`-zKd;3Eh!= zlm3m{YyYkOTj-|V*wqqEaN5aRZb1&CNQNKFJmCRLX(&DKuwsi2VuwO@S}vU95J;h> z{FZZplz~BI_JsaQ>nE5pL+z?8VyZNWFp9d5t{9ka`o~B}Bl49lcoLfJB4Yy~Z++8j)Z`5(+)2pJ2;(+yo^Z!ebj*uj<|!x9+d23N?Ctf% z$&ipS>W1m*!umi5QIh9r7kblZqGmZmDto@)o)VHSaG%)clZEx;@Q?}Orh0J=GmR}t zU*|g{G0b!DsxZRj7(C>Zrg4c@b74XQqq($PG*uye^4&?@qut;0G?SD%kUoVzhXDqDYO^GRxUB`g%E!`}S3 ztyR8}WnWOS@WuXG+9P_LeV_XNjvTU`tV?gZw$s6Fx#B-oVXXk9O5sk?sB1n1qYsRJ zKb&=6t?oG|WXUTfuH2fd>GUBXUA+@10Eq}OtPO~gS?PbT2-c_5;g)|+fVI%YZAGRb zlx>j=M3&mqa2+?yy9m;4W!ZVFNbb(;=h66+hEObuq{G7uXO)bpU@ONJi^!&&*JkaG zboa737d&uH2ArW0e>yDZS%g#<<+m@=vFuVKvpe0w9R>(&nPv^&N{W|q#zkyyoq(}n z6Xif(6OG85f8@W%YW+dUxU)hxgBKoa0Y}Gj2$vb>YQP_7g#*af5J+J7XH)){e3ud+ z^(_C$7ae{ixzHCN_{xnm%Fd8!Rn*CgYnIeX=aVK6JA<#XdcI+U-F?lax;RE=`~AYOX&f(m z)&l=3+t0L<*dupJ*9=#t0m^$Q3NOx`{uWqp1|F1QOG zjPOZ2czyn0)j2xmYKBe~NCm%ST!CD>mQZ1ic9FzAb>Je)+;Al8t|M?(yvv@NkqHK8 zCl)PXrQMY~-k+Q+!QB^5PyX2}b?Lxwob4#)tnK1usAyt+CZ;;9epRRT+KfX#r2&WZ zGhK;s>B9|ARpQv&Fikd0;xG9Q$vz=o_mA|V@;bP1wh#u8O#>)tAYB5x8@h*WUYbs; zaZ_l)vr;7D)`lX4PdM-VEq2@7XN9Jf!4D{K@LFuI5j^ggU0dDZlg}eZM3Eb_`g9Wg zOCLLXw9^)+X4ldXP^NGXjk444Bq^cH(}GkP(Mz4~2yIO!906z>^Su{nF-GF#^zIntolZ_hnF?9xJXTb)5MTN*g&VrAdxoDz;&KE|KNu}`@Y)lfoV2_)X|2>*$i zCqq(+)Vm3ilZV}MF?nv#!^Gi1k5&s?QNTUEL z=@N#-?-+}B(=f`&mLY09m~@nA%_fwSqH#r>J3CN9M0mhZo8U~IOz8u{XIQr$<0cK=w;Fg+b`7YHLS4GH3C}Q zKe9tE0X(L^tkm0Y7F!vQ+U>^^EvBucF9Ju7DR(@^>b+_hYtW z2CM$O=CH&b_sFi-q7)Ph7R&w%8j6{mU?@(JiN_ocpcK8HQNlT@lWGb7U6R6Hgr$XU z8g!t1Z&fIxcor}SvgTn0b$%y+Hq%!$o6xib&lHR=Xe60cYx=omV9g_CAZ$cNIHf+hYOeQehK5Fh+y^6Or=mPBH2t z-pSa0Q(W1h$Vw9HL#09tD0Ptzb&xNKnTUrtr~U+)GXaLWv9jdl1fN5F(+X5@5W zF9kVDgspqoMUsMR_@E&2eD>u7u1|Uka7b#K2P$d-s0eoM9D9*i-8h&P!Hb?omKpY& zOI9NLxs$i=CTmi1q;_0kHrnlw;c(dqPFXb?^hGh!kOa#^yS>I_ zx0qABMAW!TH(pdMp*CR*Fgspq{$!Maapwv-lB<2|y5d}RYJTqIqFOdNIeO~{+iN2o z(-4P0eX7;VvqM%u$?XGYJoVgVn77nzgC>~o(*K9Kfa8>$WrVdw3`d;Up@BLAo^ zq+u6I#msMtYRP*7J*(=2cVp_p8?j$3(KQdXz8f8OBJtyFETl-ak zms(BUo+giO77F$<-11DtpC{8j0nh>XM7Fq+a|iYVXuPQfDumVC8 zV1U6p(Q#-kdf+5!&sWF#=`bt3;zSxdXEq+!q%q-d3otEe+_-e2xSnE#Iqm(0J4X>F zoFR*DAGeHl%nHQz?&wdt#WHY8ADJvCbhc@`U?$=J0|EcRa8Mwd8V9DIZYCN+gmm{;%pC5}p=np#w!;@x<6|^rjPVnVn zC#q?zFiP!mjZTz(R&QVzFy4An|KI=|8!a1Fp>sf-Qc2R9&?>4ZaJKiRs}*}A6s{Zh zF7QH&Io9(j!M6r^5D|=fcKK{f$mW@p@8>JD!w1~ot}E>a+`RN}LS7LgY9sZfGLRo)Rw>gpt#VSk9S+(nOkzHTQ*;_BO%NytzO9K!>e?)p#} z-lPi?IOU;iUR)rlPCI-hk@=0I{BVtKdzWLBp_@aTg-1L4k;iG@?d8ol57idYuS%68 z?+HKy%pH%%O$anxK|}(Y@xMVn9^ZLMl)^Fh*ZBiincj>LTfQf4y-dDByKp0|A-Z;9K0=Q%u%*~*v@ zri2fiZ4KlPNuq1>ewjR?e6wcEYkn12;)VG-@vMR54G;gjsEC9`90>?KEsQSlG)s7!3fli}x&ezZ4J9V7-5 z`m!W0?iyKI%~qeTHhSJ-3;#mWd57`(?+4L)&n~W=$y+GkpKrmPjDMF^D-v+yE7u$4 z<9O0IK`%e-*#(3G|M$C#^x`Xmv&*(Y^Ndr%^Q}}F?c8aV2|@j> zneF`*3qX>f?xpd)upP`fqa5O5ObA7Ca4awHl-RtIN@~|ih;L;x$bi?ZhG=BX;x0o) z^Xf_ODi(fPYI1|+`ObxAzG9aHI+*BQ1D+3oJ3$2SrRSLb4P7+qolegkd)eh~Ng1*h zgM}D_wnAW?J%j^Jo2|$GdAlchboe$b$XBzr-?i6#f$=+uUK>g4z74p=G7V<4KaE+R zoY?Gdi?-0A!(P_#?pJ}AZxl3kJ6Q~`dH~5N!liDc3NSumT(#tgGVKXEF}WU-!M*%3m@&) z2&@52@t@MSNg$>vnnU*Y1Dh1^kA|N-`#l~}$qoAnphHY~aj@b`$UADD_MMjc77>Y8 z=;);V&K-4d;!U3I=u~T5LO%+Bb`x5ykD#UiJKi(Yo-FxtbSAa8!;=4Ka;q{EaC|PG zP5ZGj;ivg4;1xa>~uwr_j-(u@P8Hrc|E6@h7^gFb51av^;j~&yx4;x?(!0FgUCpXZTN|s-)HTX0 zRI8$FdMxwD`wYP94s@Q(QukNi$>k1|eZsXcD|Rii;7c&1c_`{0V`_zj(LyuUW%SVQ)WOYZ-}0V)1~u z=YHg9G+Vp{LVqxTSPvX3B=d_vGCj*s{?F+So#thSt4j3t-7%_zh=&ra^z_-|w4g-! zEYEl>J6|nFhA%D+)XMg(5$q1^$C>U;`dkl9Zh>2+97|p5!?Vy13;Gmk63bUWZs`u8 zM;wJOMFqcHtZ+)5YqamH#u&iB-6Oj^%8YFvLtJ0v~Xn;A4E~e|@Tx?;Js{AiekL-s*AEP8|QcNNL3bk<$@%AhTMg z4bJsQ0p>wpk9rO9GDGmF1c|lTBxZs3WYL4SS7Mk}i*Hh;y_DS2E-6%bGN#R35aVVb z>slgKu^>)RJgJmn>sgQ?ow0AarRURRjwGmCCZ3~s4hI@F?yK}fj<~6Bd>A<;*E?Rj zXBf(51!4N72oH`JB9R1YHv_VzXu?LgpJvxxpNd)#E)ch5QN&WXNNo$AP|X}o8Bk?0 zj=qE>Z7~3YlUMXm$GT}X*vsxlJAuvg1}Xk)@poxOeLCo3!nItHf}E4c&oB3`Ol_oF zde&^1PC1rsh>bDc#A}9GCV5eQhM>Dcq4jivSZNn3N8z$bm?39rN zq-ODO0?SHrFmobvu?7GM$Mqb3{9RIk=>NH-R0uS9jMef3(V1|wqk*CUuu(mhZC9tJ z8>Wl@oG3ZQcQi2jyOuUo+z_UDjiH2Anh>s1*hysTN_F028`q(eIBYLP5XKrEN!au* z5QBf!plJEH88y?{+{vSgxn+niI;6Dzno;`wEiic;-FoUe-ByGWN>P}zDqH$D&i3^a z{EEkg5}hGOP%5LEwXtFad{xVdmGgz^Qr80AYr=DqoC(aj)%&yP6DNjAxpN5mK z8D+{5x1j!zncMGw>x0QjlqH$WQA33{5BO!G5s=lO$LPS|L$zxh>7y~`yY`L<&r`I( zqRIYiAuTlMNb(D6@8Btwb`mUZA@5$dq{s=3Ve<#^&2#Wc;&JEiWMqI^^+a1>l^o_ zi{OvSTq3%=%YAsmKN~!kY>DfDW=Q&3E@;iJ>3#jdrWO)2YymFDOod09@gIFBNuZpn zyET+`U<<=Ee~s4~N4=A1o`>D+4f9E7i0261=|$1oFW!b(jbYTt_-eQDAr32G7z(A!8nQ1R(v zrgVRzb^@jS(TX_C+ZX-K;?h~Tt~5ai!t*9Y9+}hpe>i&!raIeYYd67y6C}6ECgcXubay9Rf6cfNSLUy z7RTaUFWx_|-Y)O@^DYLI-D_5ikL*)7MeE zonpt`qTTa-+_4&-FFij*=tEFVZS??l>SjVJ7qcY{5__#haue#KOAw_c3~icrY62#E2B8XqT`uiK~y^~+GrdB=bR~sN|o;?V0rl<*%`MgWN?<; zW{F6%dKv#I4HLs_rRrl{d$rUZD~;-+>+Yd-75Cdh_+9UB9(eaf-+{|fR5zXD7tq_f z$+^(e7_-@(X^BysLdvF!c*+9hsX0VZ<*Nx={P~(uSKEkB5>>$inSK#`#r?Q)J5`bR zjASM*P%GVb3Y?in2EJr-513^*rxZI0$;*UX@?ovp7S>Lbn zL&714odd`QQ0|a_(GS3T8d*kz8zET^G*jCA(vC7APb4ef@BMsR3D>7PSNXbjRV&F@ z6@R7G;)zwZ-1uqJ!ze~12~6pCx3U)3y<8_#d9=!yAa)G&+jFx&2t~#z+ppW}JbiH2 ztSW1Ix_Qq79jjYO@ZdL7t^{~a(bB3@Muj7z+!R7@D!2IL&sE|Cd&-ltXg3{T)hEQH zc!Pn%4VSIfMxFM7Zx-nw+*P8%!%K=W<5YoH8*73rER7HGRrhtR{3XKR1LchVXr8w4 z&F<4Qvv_>Tm`uGwVtUZ=RmzxsTQ--h;?T(!uJ}k_*&-TvV%zdX|5?X;kb^lOGV!Yv zC$M_3FwpLN%NFHkTS)Ips$&n{y_y$u*$cBfM6~qRJDtreh*Pa~B0ixU4&d>9MZ-WE z+@*wMiRam64$pypD-zaH;fMg@%y7`IHLYL(CLZwsiT@r~ZM-$lR*sFSXDZ~Ov#myX zq&RABCJ}(r&NfKBw3%^mgiVracs)8}MdkZUQ#2lpE-!k{y^F~!w6Pt@5me|xsTDfA zoTC%yLQ?NMA2Kf8lXNQeo)~Hq7{OhMdX%n+3pzOB=>KxTwGZqOpBj!@`sE%Dz{f&# zi~*;FCSv?R&He(K5ua4A-~GbczgG%EMklqM(th|P)3|*(D4;PWneR9X+!B=L$cbYY zeK2P{5DkB?De@lTERgv7qsN~D32AsM+_74XUb5J?8&2{ymHJcU36W# zVpj7N*t@I7=~$3Itmy30b;J!6-f=$HmVoEE73C1-*?PkY5KrfUFRr1D#xv!Kkp?@i z8=big#sFKH+C>705btI80vaaz6%{g;EBI4t@Vy!f9{W~7(T#;h`%$7<$Y5fP3ZJ|F?Fuwk_nD5H{+R52Lt`%sTL4E>Y)pacP?`r35yok>flCv z=826WDbdJM0l5A>F4bcx8GT4j8DVtqnXrC_SM-XAxRZdA^`w*1dW zrTQOk(r#u8>v6L4xa`;9UHgJ>56!#10{V(@!Us}{f`PoJqpu&^?mUQ=8tl|tvOt@f z;B)*&OEFxwFPKYQ4UD*bAY&7?)kg~V+oK=nVv|g+W0;?{u@Gisi?-5yKiyyW3TLlBr;EB;e8wkeQ)=-v+;L+t z9Z+E^FnpbGVv8A4KzUhehcSis8ornt$=8}NtvK@H`*k_iF79o;!-%qk) zBY^Z727Pjx(w5-K+q7pO22&2lF`bt}`)e_f$rxkICm7JQ7;!mMr3LTQRHGV%&3Qpg zyfaM);Q36pG8&KI5~gk=45V%ozs}F1b5Ym!i$)-;y#SWI=j&~67gs5S6k~x;(eekL z6(pr4mD>ki7Iv)KElFp)(GSY_TYFOtAK=|<%Hs*d1MjgPQM^QEjR7@JySu|vO4?=O zsFMJzY$yU;bR}=y6<8~C&u2MTnMd-v%E&X|7k~`N+*p;qA?0QR>`xDo!$Bx8@4W2h z_v(%4^7}-xs1!Gs3{$Fcg?isFaEV2cz~uD6VZLA<91(TsXM5#r0K(07q0xIzp7s^i zAE5bY1<7&9TgZKF?5N+$jyi(((15jrWVMGEw!yt*Uqcwd*J7&h&TDmcLiO}-T8E-o zB6E&zBWM^NxwaUnUmXE5pht>)GquqXYfqb%?EB#fN5TETsew~lc5m$_T}XW2_R2s! zb=$RGUi3LT!M!>st@^(zx;fclno7s-i`1>)FUF&(x~`0o)^JrfQNrhiI%vS2YhifR+NbmjKo17#^Sdl~G&!|J+AfT>gko9bI-$I77AM|)Vg ztIzfMr;B@8H04X6c=hr=zb~4x42rKSgmfJyA0p$YZ`qQ`d-xzNFX(SPHhVdHq*9-H zaPuJ#b8km~az|=`Ewe;2`A>XM*tP)|Xq0rJ$87fp4a(3$qebnGa^`9x^RQlCHMiiF zwb`+G6jEQ%(WS);?}j_V{QVrmRnm`h5}opXQf;$+QGV18)V>E3kf`~16Y-> z%WWQ?%CdyAXE&R>f?sEQGdjXPEE64TRM$dFNd52<5jVcTaGF*)?SeNT!3vDN)k5GD zY+Ra%I-WlJM%w3p7_W12pSj%HV|#9Y+%Bb~~?lsJ5tg5@Q9D3}K(~T`8fM@%&wmJ22L!*k1FY zFhaezCNdYIwO+2|ps=We62pe0yp*BUU-ovQiBxM?g6S9Fuk~iLSO~rv9hY6FMGnBJ z8$y#%x)h1~4@Ve?4GJ@nT3k%!f;J%Z7e4E&TNu1#H;H~wu}2+2Uf}xeU3peA zOtg_Cq@8vaNPIfG_G=Hzr5iC?DUdUtd}UCQzqNL|i=g>%G5xnY2HT?3oW(a;07LC^ z!sUH7Z5e=iUkZ`$ioFc#9bjNUF=;k$N3Q$UqAt#hIq%n5MDy~_p9-A+<-6a(=$uMde zPzX%z>)7Mz5M1ZqF3B~_^7sw#+pQ&9ib9`zhz$=u!0v2Y`P81I`KkoSBRTGJy=?UE zsE{3O5c=Q0l&e-kUc2UV$+C3N;yO$hM-Um zE#Psi{YCcp!PGtffMQ#BGJJIJB^HA@^@sp?zY^0KIhM={kgl4&4xg+<4?Ohc&^jZV zo_!2!n>$(R$@mf`!nj|)U&V^G%ri4)ZB?gmolj%b)GQzUaX?DcB(Kx1O%A*HSh1du zp@*NHAezm1A^czx{m2Pcl#4XU@%`pxWcf?eoND=C!26ryj^*QsHdEabKHaJqgSXsTrE~LmI}yL^HxiE`Qi+x9 zE-yXoKUcFv&gCWqAsj+R3BZ0}og~)E-Yt`<$c0n^5o4X4E(Oi5~hp znl7|L(`uDII8A&?VR%Ff@_?*b(Kpofc|T*Lmp2r2pwkbp%?%~ds)F(DmJZBCyBwN; z{u!2T`7Cane;v4Xp-1WCWm^^MrU*4za+=mSg|IZq$!DvVLu-WKkFwyMLg~FjCSX%wSWdLz^_tk_a}^#JTK}}Csic8XWOPM< z+CzL?%kwkdMDfyjTa>2Ia8pG_UfZSdSSLfo+D@ZFa52h@`!PglZj4^aht)$K{DT&M zv1SfzgsG5MFN9e%&j&9T;Js z@4l|!>enyVwIgJTg?3JZV|7HPHP_agUwvzm=2Hg?0bl`oX=3|d%S;-GwW|rKXB0xP#neqbg6Hjmm>OxREI!_#Ef>$SgYIxtE(ZjaI2mYPp;u*Ox1f z*Ni5$laC6~2cs7C?nu^=Nmon3O{`B#pV{d-5QR04r*rriI)0N7e8O$EkOAq>;n7&c zgr`2_O`?W5O59(z*{)p@TtDtLEzhgWYI5xkrHGPZqJZBOv{7VKO9--$o95>tF|-lrCnd&S!{FYqruYFRA8X@-t`d{*d>iv)mj+H6|9BJ z-Xdgrr3i&Pc${C7bSw=zuAOVvvGU~AuFn<=$K*76VqK)RxL$`ZqR^;zB9_o=g8UR2 zl*J#{;zRgzdiVttBCT}gvJ%=TG|jBSog9+G=`U3l%CWY=iiGOt?D#V?jD#0t$tAQU zT0{oSS{h!x`xgm1zis4v;=eAhe=%yfuY*hV6a~2*ZJ0E;Z^GtCCZZ=ANfwrO9?VjX zEsenG98PFf>`uR@h-!UD^WesD#N{S6AFkruuQ#3xrrF|vXNu^xRBXOgkM?-{s(yy$ zK5H5whY`yv4MjgwR=&BeK^r5cjL-N`u>G#`U7>PCl-O=65R-blZ1hc}75W?yto0y7 zCZ0^sglvq6bD!<=&JfeeLV4hl!bF%-GfDGVSrwko`+lItYxhKc!D&!#vHF!9;)jf- z7vq*~BfD;?RY(c1de>+@g$%YFMEfu@B55>AgN7$jwt^H*~wy1TrbMNdHGDa>$UjE^J# zY4JGO27DWph*)CEwNi_mKs&~hy9s>sRi*ok94WQ3+rZ<1T>*T|kA$I-IY@2B}D<)z+-VXu@m3-FF@+eB$O zqThNG4fth;HK}@XIZVgkPY(0%I4#XBgkaDPjN=a-cqDhXpUHU&Fv{8&g(Y2Om&$Xd zvuKjF`(WZChcnxp?q9BwXDO!9ZQyRL2*m0LPxmNbtxP-GJ!h*sEgiYR@HN|ED3nNV z{^=OWX=nSRV4-y{Z+2RDd-W36bX<67)YVncn(d)v1rwZScmp!dPffb|K!%yHl^xeN zz=lC%A(Ri4cwFBxFhlKlY|oE*k;pXFsXq>Kni;zwrzf!Qdl9E;maH^urC2Em=n6od zXT7N4@%bbXu=uJM5NWYSHvG3D&#SYtJq%N2^3>y^XG|nVQHBYM=l66oc{D7Drguu6 zr%pvS#t`jA{?;R5#B1q_*J4cs1#*+YLz@J#9j%&MDCY|~xV)N8;{|Y^=ni8B4HHrc zrW%Ykxb=0dSz}fPg8&Z$pWTqlKVh&mYH(o<{fz&k>4}{QaW%QUMF$;@sIWq~6_fP%2)D?Y$c{G{ z>+ZrK+`d$*U{Hy*`^zjY)&zD;2{@@OTEG&8YUIP0#_@~_tSO@_y@F=zOU|O*6R5+L z@9%P$vMeVKh{T(=UpjmyI`IzD$6oJx_443i9A|t!m8byndB}%b<6vEmTtX2Jlv`&!kuoVdOA-asJR+C)4Mje}gw1G_2(`ZMANNBD3V+v*1f+uNbj(WVlc3!sgz-XHt5g?i*>YH+1~EXjwC* z6J&2TYHZx5{|&ulH01p3iAZ) z&tQ0DEzmh(3#PpUymo%B#YcShs*b)8nf%qiQ;u|rA|hr_(P>grVz}+P~A5t zN9JS`xi#*e-a!)cZp&h%EzUL>|+lAF0wr5{Pp zV1X;_i8}TDvD>96^Yn=G$t1vZi?!;9b7S-#%;x zG%^)bNMA+oYY$6e@JeGDd}` z+NCCs_Tl+npUP%gJEb$Y4&38U-1~BHEk`1yekg3t_x#S`Hs$STLY)i~Ow!%O0!4cOrJh4zU&elH{(%T&yzQ+R+ax9=}dYIb%=nBGUt?De!ieD&?2 z;Q20*&xj={HCsdFL3ZNzg$@1dF}PK>2*k{IZkl2L-Fn^qyW4iLO)&8SYMk7K9~;c+ z0;x#JfiY-6lWOV?*Jc!En4aiK$(PHT>XRhrUo@S-1a^sn?BaldB|e58aqxrRd4re4 zYd7?_T5H1}pl@(N$U*Sp<^<1=>}PY{rIn$e?IPUgmaR)MiS(n`wccUzA)yhKJVn2bfb@*ERog$I;PQ@_St2Hk$8@B ziUS*NyKEaV)x;pxfA1CBr{%5Yp|4qePtTvK|k7<(!5?l$kx9OFbz)1Bi}{YX0s{- z_sAJj;}b?vVGjJdB{P&$zhfN$$J>Gc5GK1#l#`&xuEA)Q+Hc_k8QHlA3g^bP(Qwm4 z62aw2yFTykHWY9u8*NM876pcBz_9?SDt}DR&<+gaeI|oq9OanD`&#)_qC~4%slXSz z05fvD!GBGV7Qz3TAWhgKz7s{*e_&b+e?gTj==UInG#*q}ZmW~H-&De{OxUl}brH=_ z+i#^9xP-+Yo)CE`MzUA&b!#jvcdhC4)$){8w1iDx4`@d1l_5xGY7A68gJmjs-XMmQ z$u$|^qn;K`XgoLWkP}UlU3sMNeDpg1xGs)t)Z^u)8W_P8Wx~t!QcFQ5dT*|muVkrQ zcOJU$8CKRAzTIn<%DK)A}$YBI@Me1h%Lnd>@59Ff%ecVHySB*5pN^l z|25;H-(5hLJg<`RdyBwC-(s$JpNLhCvpK|CCF2Q4iBsu>o2{$)bT*mM-jK7m(fa82 z8C6jcd0vwN1*jeBg!)KWT*D@j|7~HyQr4p9mM+)Vo7Z3&75GJ8#U0Lf#{)W$J}npl zwr{1C(XBv?3rm7DJvhENt&>%wl;~Cx~64$@ma|2V+ ze?HepJ#x5+=TP^9M47Bq07=$JA1llADQ_djirGP=l#^9Q@K?rZ$m-qC=k?61k-82? zt@+tlVHY~je5ai~RCo^!VNfE}<0;{K5vEg|!~}sdtm<9FhSh=m6?%`_EhW@Cz4@op_KN4@187BbIM8Q<6-y3_+G2z;YNu5FYG0$=AkdGY)92@QY+fa@`^9K?G*rE_YeWX89*69o z@3v$TH949VSgqxqVF8!tKH8QOKT>1EV-8NpgY>5@ebpIAzD-xS2J9`Yg_j<$!{}>{N)y9fSPQh zKZ-GUFJo>9b43?pm&5PUy-gqi4;D*Khkev0Yp$>VSwmpC4tzU%vpfvddn55GCE|09 zK13n$?-M9R&-{cfDOzTeyi+yT)sTXCh6$_|vk^?w{&t?J+^WOZ#q^_!FN{Ov9sTI1 z{QDX`Ytk{F#jvUS-5d3)tj;phyIAQGKJpr0mZJ-5riC`@NfXhP2u^4(CZ2Xn82pTH zmOZXssVSL+Cq45rEn)devDbd!{HXAU=k(hxs{cQ`DXBmPO)=todOK zzph_lf39aiqI+C7pB9shHj88V7K+O$CT{i`?h#jiA^%`h!m{Dn79?75UKzbym9(=9MOVKfpOYX6BZYe)-E;nRf_ zkLUihBzM!Ruu;u|QQp^POxNZA->XNuZd2#D+fhr8QSnQdf*sT$U-E>iM zloShlu^D}|CdqKHP^8@ugf%1uJ@m+hL2R^fikCnB1jJo&My)xnMg+>|9!V2I>Wj?T z2xx|4uh8DkQjqlr%K1AbzJp)$ei#Xor}BU`7=YVcaLe&o5t#wEW2gW1)sx}ATZ{%B ziIOS7XPQ=P3N{f13zvXKbIw&64?t&k#E0W^z8$>@qSPD6eOrCiSrIgy9*bo8Q?rzW zYY+nKx?F`3BH#G+hmwEz^&l5$Q5WTm zPci2M-Wt>8y6>SYiVDM1q9Q>PF|q8goOFL?;7U@QgU?XOr^j zQ)_>Vr464xs~xtM5#wGSQp<-117-o$!Y4KDo?vR1kuLcIL!D$MyLL9K^wVUxn*!#B z6zTb2&=5ZR6Ic9U@-Io=wGLA{k~dF*ryRx^vuq*MHU7^7KFyO&cM;81mn;AllWxuC)DTszui zlU?OCm**;ZXos9OmSHepouu;sjtB2EAo_tIy6za=!jvNgV8MBdl5b;q0A*O}t5^$> zg|_Nsu0H)j<1p+>g%r<_bl3xFwmN1y;Q3)#ktQH>sMHS*nd98?S_D19T(46dp1U7{e** zuIg;};z5?`VMl(_f#8y{YSWKy-=8L(#<7(ikgJ7t4N^Y$hWOKuRaj=dv{*1|G=fk!sYriw4gyPebW^UF0){T0-;4b5wWE)m8bSQG_q$abx!x za7~pu4H)}+OHo^U<;l5%raUBr$s#=9&@Dy?Qvchie#-HbsQo{(P{Y2UB;x~;%pOqHGLdMavpw~J%<|JpBAAblUa(`xeIC}UeO7b3r)4x>un$NF< zjJ@BLdMi;34XWB-IL)IQGLIxXHCp`XkraY=DQL9D7GH31MU4ctQKfj>OnPqIua+1i z>S2OZkN*d$@0r~MG&GmoC&Nh*@Y@mv;BDrX{|;|EPH2LUK)?Km_gqiVKIL%XH_#^4R&{Q( zbn9SmUDR+kk&%ZJ-mc!miQi*KX2nv@jblg$H}4%Y>&j?c%Cv# z9ua=j$79583pdN>@tqZTMqvdrD&%@AZlk^bTHKi|^|#t!;K{92H_s+WsWbv6jgS+)80-ua0~G4S`8c=72xx4{93L=mJ0*g6E8v)@9R3A-L&B8&KY{N*w@D`! z{Z;?arK;?sT{iIva|5A2Uf8$;7swAVir;51A_ir`2FE9irD+2J zCVZ#saqu$<(<0AnWJ4sL$>X3<^vQzD%8tKs`=v>7hu(=~Cu6}W*8CBx1d3&5MNoTI z&f=sUNqOo${GTKEL>v930*k=Gy(1XFyJHLjJQ}#pZIeda-Sb>OJ3OLPQc8Z}2xHbO73w4W2Nr9*?IeBcOr-to%eA0&SSPMv5aL z&doW);!@;!Vj&HG0HGS4L4b zk%z+M1Q=sJ@*}M~pks~C9WNHNAqFifc%Giev#mW0yAz;4w{}t^MW5QiA*CdH%)VD9 zoLU0jE&TNhvLRlq>+PF5Rd|)o@KUEt=-)_X-*?$4M7#_97SDDB39MOBA(oFr?Tj10Pf~{ zA+ZV;i|hpXY0?iu;&5-u6^~?q>o$_9P4{DCLs4tm)R5qQm(dCWuKx(%zpf@-zCYq| zn|1}JU`XL{#MH`2rf8IdBO~0fTBC!l!s94oYK<>Db5pZpK6%^Nun%HBbu3(A$3C{c z*aM>UZvPP9LslC}CUIObi0r+Pnyx0{x2gtJnOKX@I*(?ovgAu$ zp7#(Zv~u_d=%sXu22cxv&@uHVUyL!>S^4S@etdbrt4(RMjDG$o;Y%0Vb}dJLE{lP- zo%p^r+qhGKwPQGT#y-V5`6|1Hp2PT1ppstnQ4eQT1Jk#LeHZ?G@|$k3L#Q7sx731< z$c3^T`J>iUD}uPjKPD^=n^=m4^KQLd&NdDWU#K6emKk8g=a2tyPbT?KPi7)mQK|EK zA!dwr%sHk;)od~|ZlYnNW=;oAkX^Nx$g1YQwrZKv36oO^nJq(; zXMPK=jsHOn25dLP&w*d#?wS-i7Z?&xPts}HM<>=Jklx0@RDm16%`Fo$>fP;mc+h zwCJSyKhX1%&oxMNY;_X~Z9>F|xF~lKowP&5nSw7~4p{aESSA1>jAhszqrc+gccI>H znBNh)#^p=-fz0XgY{c`wAal`G{8sq9F$=+BkE=tExB8Lt1{>~lxcsXlxRPvCqw~mA zet+Bcpu^a4J3x@A6-ukJ!##}Pn-+3J((mqg3GrJyYv*3ZR32c^@;U!8@?Fm zKl&2TV&@``^*PSQ2NB5gs1kvJq@kFhDS8%wYn$cF0ryAqCEAAk!;e`%`Dn4;v~tM^8KOTPrxNFM*))gd@!C`Ly>xT*n2qYAH z^5yn0%+UHygQZ%-e&Ah!jQw36MySS<{{aG!UfiMkl)=UJO(fSA{nLGFXNbCEsJhum z^&q~=z!E<8@whR2D9)u>Y}`NQ1n7s~)yTz(`=swn7O0&{Yw=$Nm4duD?Zrn2SfiER zA@s%6GOuGCi9P|gCQbLWE#ps}XyM(6UIPm~YEENcM{x2Q~!LAYx*G}firz7EU5w5gCZXv+WXAR%@o8)DREq}4D z5VxsDK1C-q`Z(q`_B~)haAmRixR5x=Z^xrXes^-e+Ta1F^=QIy)i?B`C&SNj7%V=3!*;dQ z`7@Fgz6^yIZC2$C`4nl4bx13ox}FUq6H3$2Rf+lQo~J+(nnW2`V=IB*MYHVEgO(Ch zniThS_$Hl7z7lwUsO?x?kem$MbN4Mq|+M?l*^u_K13aGf~7u`6!da;9%l zaAa85xl`QJd`b~5bg0QRo%`r_n%t<=Kk1>$KIQ_Pvao*oZYz==)1$NBY(CWosOIH~N+P1|AJn%36?Ey(G zaLait44xB`VsVz`i?`XU#;ksDW$k{)wzMe$n^U=-JTrCfGmV^3V~U;9;<|#ESl89C z94O~$lC8)cRkUm-kD=)SWBIL#ci7D)>MXCT2Du${jS!WQ&YUZT(8#pwb4587WQ!8L zJ#k46wy})_&-Fb}k9L#(RPKSaryF>NBkU^}{;qYB_!4Tdo-ig{Jby8jO1{tmy0EJb zYnnLi4#;eG%CqDBb}RMxYCyM!KS#KP)D&d4)(|>?OIgMuc&$}WKsxYI*m^YzafEKQ z!4Lkn4{Q|amxw}%FKzmA-A8lYNK8JI#gxTo);Nv^v#h_z6?9^u4_YW^D6y8J~J$euuNMPK%A$+TY;u0?A_K2GJIH_H63Punq z0zdmi@*_an`v;+*^jdrf`sw2zpCT}^4?L#9luuNx?jda(8fS4I?r_c)d{+)@n+VwI zg^fQyY=-bL=KLG}CfP!BY39VeAl~P0iwWG9a)xeD6JP+1``e%)g7W_29qHK0C0u=-g0|~-rnuzIsUw9tTh&i22S@A z%;AME1dge^I>o;&%UD+vc+fog2V9iklmaxIf2HErQXg?OuA--&vyuQbt-6t#zwt8=2yz)E zXROfgtLM<>hjDO(9n$?v9E;IFAGtblNPWNN0UDdI*fa;3;?g%VX(&LHIBnjo)LYD6 z*5S!*uGjT15cwnDUlQEu{}TzGX*qgf@PYWF2t=p6XydBQiSOmoDu?-|-5>ygx5||h zyU$YP1)xOjJOa`{ojo5QI^5Bc@4PXN`S?Hhbp!z8b87Pef9HfuncYL%?|&)v|A9;Y z7uxz426P1W$p7HKiEC}mF}=mU{yhAV<6h#e3;%B~0GK>qfM!qO|BOZA9n~1jfA5}4 z==NZ4y*>!}#pxQ%obQ7Dh{J9^h=xP@fBd_j4WJnWHTyHR{QmdP^1spD|K;QAKx{CB ziElEWOL6`G0nYy`%KecFA}WxIP$uv{F#rGkXkO50gHbrJhSYvVnxh?sA^Gd${?RW! z!hYIy)BBmuCs`519*$zkR0uTlO%S8Y4JgPpjL84_BmBRi@T0`P;T=ZxQ}%58W7(Kn z#xvkZ#K9Q!pK$?qYL)Md1qt;eH6<4X7zlHg08j_l_Z!e%v_66J#Ehy6<=EUDxZFM6 z26_8HZl?pSu0N9UgGQ_8r?54`ON$0GY97w3kvKw}BQ6hU%cV*ce07*dh~O>vr3_lH z-jwk|Wt;|=M|AfF=aS>5O>q4d&(NAWyz&l%4OF~;u<#$>18mpwJkJ|x5(9J$M#fd7H2Ns?XJ&nSI23SeBo3!1GWY5-Zhj{-?yfpD>zD)s#jbQ z1BoXeBCK`R9MfDTjs4+Gn0}wz^#MGZnWeGSw&-z*Su3LT?ujOl-GpJi!^!NVwpQVb zEY=O(z_BXc&0sfydhswlBC*wr%`o{_x^81N=3O~!^_BCH?xD~Q9{8Gx z?E!uhW3BWl@KOA2!;PO!qH-!;kB}QSb!N#n3w^g5(pUhyKL8-qEQg>o?uU1QB4&=Ei_#LXn*%4QRJQ>0bxL`H3J0sj3{x({KcW^wA8Nl_=usg0h zv!B_C07UX3EoSwWzk%z^I&gH>eQnf_Yd>)QFHm`ZO9BL4_EyY4P`NKL#l0{qOGC*X z7e*ARFr6sh&P{5woUt?W7w2`M(0b1M?q4PAx>4ccxZ+W%r^H=Rsk{!HZ<=K9>jBAp zoz8>(gTsnuhar6dYXJU_X0lEh|GXyRQ7uUs4swjVK*Si(&z7L173!-cKmxTQuO_u4IG?%~-4hezv7Q)CajJ4rmAy0_L7_?atE9+kj}lR@GM@1%)hK zit<}me(4AGVf*4+i=W`eh+XT|f%>^(81>&#;m#%v;LGF-h^{Q>5#pYkVSCFGP(E*u zJ2!iKQ|5BkY|F$$2Gs2#AxKOIJgHuMSGDB1V26LX*1xmqa$k(sD(QcZ>)0*}qwzXW zJ6zLl)RBnl29|^AX$80l=dneZ*x`h9C#5g5?S{y{;oSFORxV4fVI)gSRLr}mybwf% zZRZcuCHN#Y*M$RWQzT(RTnW$ei6Z7V{{sJYdFZBI4z()GZ?TbwkN@7>-(iB^GDIAU zIt@?$af)PYMP=ospILMeDQ>FtMbimUB z#np2TDNKuV6EbcQSa%I?I$OJqk`GK+ECNQd2`Qh%TK>$6{sb&newR=%aU*9d0Ab!6 z(ECq^dK_?WjGD-rtZ+)mSnVr!!h5{i4hgk}-j{ye)wpr5%}^^z%u;gCfIQckUsCTqoXy+CgmM_*qw+_1#9{o>SqJD;}&GJ(}o(qDd@yxB`1RCd~X5!$2QN zm_%6WX?DP>iypm{>Md$b`2tt~_c1lDIQ7CH4d@03=-pN)I&dz5YGs313RI6AVz?hk#*m!mT>9Q{UR#FruFL2=P#Mi6O^u$YTvbTEbyL^a-4*UULCp z;$e+=UJ=mxqu8M|wlCv+03N`*;@u+8z-f>CJ4R_fVl4HfDS_pUg};`iq}q#bdT(+xLkh$ktI3 zupaSbrrPVlk-uM(;RD1?qk!ZYhVe`;qy3}ig8PQ;S=w-QdmCOD)HnVc$W1BMhp`xq zXB4LT{`}xhkwV*!@i}qIzzN?ql>vGu!5{o{m#F&zTjzQ6>M%M4nfqka2yjJ;)3Sva2NkEufvPvY^0)|g|AT2HS$DbmZ~Tl^=> zf(pYB{*DHbKY#AKlcW4F#s=JXeBPDIUboo`Xr`mUI?g3g={asT)bc&NLG)=-jfyC= zxADikA1x&A`mxNFMmgc;}i@?;L}nkwbc#x)yWl$1GCKF9K2*N?{$_vm)xR+<(M=M)W-C#0nDD09+x>LuNc zG$Xt1CIJ^9EMfIDSePj>D`8B;R4=W~{vq!4e&~WYuQk1%#_#x)2GtWma<*?+4p+c;>vG zsMnh&Z(Wj4)y(aQ{V=7C9VVg4+5FZ=1;n`pIWiH=wHycImKGlA= zm8q?a_E25J?L7dyS$KCQ>(K6QJKjbLOzar^u?LXpdYxUaCPQoL78z~Q!=L*LG=PDe zTq8fE&3pSPZwqVKT@n(dpTqvibAsOtrW{S{`JmCko9{njGSN2!G2IC@7Qvz-fo!s& zYo4h;@}r1Wnx((E)n7?F%;i_%M_4$OnxOQRK2NwT#usg=o?N&DtDg*bjlw(A>0^LL zUs&T-SA+GA@GO;xp!jtetdjmZ62Mm)1xd{&ab9+t7fuOIk__k_x%7h#xQwuJ3${2@ z-?^mI#Gfn-pSX)onsJ`iCL^sAUv6-um(bo=q+ODpaI6B26D@Z8p)`5iNW)38)vVr1 z8x?y`?8m5G?LtS!efC;!>LMdmT(n&dX5ybSYd{C>8}r4NWVSML>~MM0P=PuVLvZ_y9t`QqRN%{t|Ob+)6tM$R~{3Q74^oDLfSJt)`6wqsLGHlQRX>4Ea)4|*6t2g&FFlL~oh<<$isQHr5kKQwV&q)Ub=En z6Tzf*rR^cH!l@vCAc@&295T+_tzp-F2yjtGZ}K1t2cg^dBI4t?m`|5-Td}&R3T1Ni z{lni^lIb#~^(N{3jJE{wk=-YB!^lZloZfM>nPRZ@L(7bnJ0YkM?|4%s0_&3Z5AjCi zqnd+=<`e{9!?Nw=vjLMr8n1oKxpU$l{_w^%95JP`l;sYo@Y!a~JapZ{tFLG#38&B8 zoo|@*58B5+r>p&xCr2^*wuaeujN{hO?|lk4=&`Dw-VfK-z{j#@`(muhOi zo6S`AB;yDaep*@SqRM_9Bx;HDEM`X?hvpJROZ0QHIso95`yz$tT_+yWYZnB$D&rULc#?25dB;rehjt z<39&WpH3BCk!QP&{H^R7A$k<1lcUvKz%E$yax~^ZN2qR zkr`9%2=bX%SlA}NOas(%Vy1I~@XjZIldjpy#GKMu_#cpSoA-Z0&bycb;?*W|m)JJJ z+VC(_m!C{f542A|-&0wGm7?>uiYPEyv|@>=u?G`s2NjYy!q6iu0wZkdV(+7IdiMT= z{aRjO+EkaTs-}*JTUIp=5S}c(H++#*`LoV)tFyM-XA?csHS_Zp9rCz5uM2R@__&Z4 zYHu96S+^`H+bfT3B^Yo-JFSBx7_8^P&!J%pm*d4i7 zp%-%MhD!bczs;EfLwZRjkPPLUD7B8s3vb=p*MeNLBEsXYYuU={GrrZ+z|WuZ@nU7iq6jqM5w%o3JPqlib!SDEJg7pUG1 zM*lwp)p`F3R4>Z1_nZMhb((15{{pIeq+Wq)#Wpf>zK?$Y0wN7^Gv4l`GWFM{-iR0> zPiD@4hr*LV8yOs?xDM+j#&uZwldpq?|0!#l4L1kIsQUx@bI!xhC`+dI-$wIz6|RQ3 zG6a8}a7S3Pqb{x9HnPL#M;dDvqD>>0+0{st9jdwm!Dp%KxSW3b&{uP5J@r3>kJtS^ z7(w#yH@EgpS*ylO`3IWmx<#`_#>Jus72uvSlf}o5IB$W1(6mG?j_OsQFTXKhq26*! zY0}FSBQ6$5H@rc)>1d;^Zg_sTwt+yGsGe2v^^$Fx{Yi8j#^D&d4pQ~L@?EBqw%LrM zR73oK^y8*8zQRp|uBfGDEvIPKzM!7@f1_?cKnn$9RfNK>V1=o^`@Jl@aFzA_`mk$v zZ_8#9sI$^Q@Y9Ln^I2N-_@U1s+W0-%cv|!r(V%Xl>tqA&&l;coWZGQG!!+;2`gkX@ zWXZ~UbfiGq1>k^pO*qny$# zUfgv?xr!Ctt~P1^cpI^PCj zP!UJ{+IP$;#%1Zoj`mkRoei343l{<`47>VeI|5`m8FW0P`hTwVi8wypT+zFXcwGtx zgazVhDVcxL|KGXoq0#>;_S)}#(17@k}aRA1bcvvEa9GqYt=E zxv9d!VevP-jNR`sP2Dm^>rXrh(-HQu`jdS5YLhI?7X9!*)Xgtl96HxpOC#D#SEH0ubqKF`jV1e;N(oLZGX)M{xY;C3O+|}%K z)orcCZ1ohHo}SKixt7jt_H8^P&`P^V^Y*5V5vFM*L3sCNpxT-2T}(*3FU-Yl1q?y^ zk#T9d7l*gzC>&@`&UIU?&WKjC+ z{gFJ9$`S+~Y^ds?gKhppWQhf=8v7AJJ$q6|&9m&m)69~<6Vr9%GWnjbs(oL#f#!5? zrc9>~(U!*vJhoISRvr+1!Rk|~`7$aIgK*ox6YV9_;FUN`&H-}S0eU!{my~!dTt%@13+n(DgFdnx(%PvmW8b1+>URd@lJRNFQ9Ko_G z;B3l2Yllo7#Hrlj4OktDH*v6|>DMY!JjBJrw9zft+qe9jRjF=x-IVbb#W zsF!-wh0UesP?HIxXSd_C_ysK?^PNso*Danen!sU)Xj00iZze-|;($s|RWD*;?I&Vc z*HW}{7`M%xMljQMbn+g_S6F<0Hy8>-Z+dO+pJH6unhLbBEs)7PWH{3cO!RVzqB!nU ztS;IbxNgDzo1Z)=xsN?&9bQPd?Q2Ws-4-S%!S5>fCy*ChyelAq=d zx$aMO!-{_9R_-Wz2r7iyr7BCxtqQN$x9J_$R129vg(zpyd%2Z2-b&pR?35;JceA8Va;1e7Dgl^Lm!=RuyuDUbWvy3sC7GaZQ|0G_oLI9 zcWXs$$Iibk4d6<#suqxH^!F=%`brzMyc%y1{P}PfPs?RTY$p=VTcT&|QX=RVC+>t+ z)WYXNYA`2H-48I1Aeb}IBUh}eeT7zbX*<&D);2-hh(i1B;ceM&9C?Ky(I1>-y`Ox# zkG^{$SYDLQk2o1N55LU)={kDS&!}>>ET)Fia4hbZ5TPvNZdN;C@7U7?EgvBJRlz8s z)>|Sz#LZYAu}TfI5+lhBfC&ml+nWhT`z%5g5-7r%ed&>Zm&8kOp?lx422J*u*SWK< zNqml2(C2GOeXpsgv4+)fY#Ee9zd3n79W^*Gxg6_qMX{g^aylOC(%3m<2DPEL8;1P- z#cKj@<)qax4$X@!Empb1oX=}HCly7!qI)8|#lA|u2y^EU%ej@5Bvcz#SqWRQVldv; zH7l11t3n-Q4~Cji#$Dn;RZ1?Rz>h>@eCqbo$tBKn-)t;bjgNG0QU(5T{j)ladF9OH zF>3wmZg<4qb-9p%c2m_%V4_o~kf4s$Hi{cqj_K+doOOb+goV{Mz=3(ytAXcGh1Uvj zZ(E>7k9t;GEQcCr_7Nft=4G*LjxP#_w?K&qJ~V#NKAUpy_-%S+&x(TF!Y7;9&~i#L z+~iX`O$7Wg0e9@f16q5_%9fAcvr&uxnMwz>Vnd^u{7YY}(b*x$hY_c<(j=J?hoya= z?s9)$Wv_J7C$_;2E7ZXinM4K8Af zn*f>^J5!(51C6;Kxms8!w_McRjI>;H*|04?9KFgTTb1&Wz0)OBkW#W;j!hTBtM1h$! z@MJd@q4noS&X=wD$*xe9B|5hCBvZ!r?{?26wV`4#g5XM&MeDM z(P|vUr#{|rFxP=kej&6yUS((*Hz{KLn`#)me)p~BB=F6SYOvJq^}^y~1wgkLw|6;W zPH(;HU7_~Qa0WLozVLRHeGq}9TS!T9%=OlA0_?YI0(fo~PYf6@g%dj})}a7p|LYFC z3@^}a#zQ8Bi=|PW$c}9KQtqD2ZcEfDx_Zmp@uFelflM?*T&B$`9$drCuP(wUqzGy& z^iqX`TGHiurmt}&)Wc)QFlEDuopT_x6i7>Pleb| z=A(9_?eZk@<ABZ-40`rSfzO{OzG%vu4Mo!`Md` zM9}uUJV)8TS_tyP?5M_6Di0AtcoQX?bDf|kD*6d+ZAn7*O;hm`_M|Gua#juUUd^9d zxZQ}hmKjoGcIsOM;Cl0j<@DEB+rCIb)9Z+maSm@IvX!CRvC;pKepm}GG~(C(T*!0j zj||6i3^8ov-btG(n6h9e9CRq2>hWrzHa=BvE9~6hv64?`k**We##%#SVY++N&fc720qQns-8cIcC_4k00LjbI?C`{!vc3ur zk2fZuOax4AKCGO?r#6^X1EwkPOVx|#iWub=siPQ@YWV7-KSa{pTm^&@=VBbh2cLnmbn(eyaq0Ee?!dJ9{UEktnTkA8C!ScJ7ndo+1}DZN_S z6p4H53FTD^K!#%vch~)?e6J_Dx%bDi(Q&j}ib1c5Y^mv|2xHLj>Xoy_RH)Mf_4jfJ z#SIR^s8TY6s^ajEA+zh0p`+5os#HeFW;v#s(|N@rLoFrefv#~wK5cHcknZ#TtS{QR z!(j4|M(3>~2cv>#HX4#|z1S8k_-b+*)AREtb+R85Bobdvgfy&b5ky?Dx_0+s=9jWT zyq3@En>@#R2~{%DU~kF^8Kj^H3s%iRLKlyt2=2n37ViF3i)GI1P%5E8*12I5c3xVmLQWQa~FLx-uGrHTO8)%r_7&Y(~I|FktVt z1t(rerZAF8Bpd|z(+N%|9baugWU?=O#q-gWRtDRA=aJI|tn)BOed0g4;V#-$rx02n zAN^olSQi96#w!m^Q^a}~tq^j`?`L*ttG0Vzh8UfZwwtSGy6AbR_6~=wlKbzn*gxXW0S`YmIa}=wmzSvqr4j0`QQ5%)XdFL$E!Z(>e+hy-g`>A zTbIv5Eqd8A{B8OVPT+tMs=4=G!Q!IZq)qpzqw8Esvm?o52f>;J^20tlj`NzVgj=)U zrSpPdS^~IcgfjNMk4bX$+I)zwNU1?&&!cC_4BhYOz79=h1W$Hv9G3jy_v0Lw?t;uo zf~DTxsMMba&QWHN-#$vb)sS9i0 zm9oj8Z^IGCHr&qm2Feh8u+19Q|E!!l%h~{|9gHpPTVx?neB4m((vU4D*`5@V%{v1% z_c?}X{&V!5p+(<(QfHK~-rd4EckA2053J9W{5RZebZR0 zi+>>6-v^QRe1xw@2x?C6-}quTrz?@1+o)wt9%Dsv4+AkjAqf`IYg|T<2nX@m{;7*Py#@7+5NtBp#6b-hXqs!V(q46~7=+$a&y;CLbvZTpLUT^aN$sYK%IeziEjFZQ*#|rqr#v@6&%kV zPd4955x<-GUdlf?;Fw!cV{p_icfBlI=NZ}FEx*8CWlRL-O|?0H!zt&4rI^N@!!jdD zq_6X~QZ7t{W~|rjlltaGQ^Aw^EsROBePocL=CCiY?1`5n3XB`K((_K@PjpS)Y+SdS zcXLN0?gF~;SqfzBK#0ptV25H}qJ>_e1_A^Lu0Gw$s$*cm8#{7cJh0u&75Zlk8(NJD zd6;TfvPf46%JBW1L&5;*@garRGk{IB1%hh?d2sAM|I_!y5l52{2a9EC(XC2Rucu}K zD3}a|&QPZmo^<+9vadvY@n=5sfaDQ%(o6*msI+!q4U-w`P1*%i^!@Ks{esoibcE_s zX}ZquR4VUI*Z#aep#}N&Y$9BoC2yU#>l2LH4tZPhf;Ovq@Y26xM!B@9{kzfy9;s)J zMGn8wo&_ON-^7qWbILjpObLegeCs+)1N8z%8l-*kW&D{kFK|AT!<2Dla+Yj76FHX}Gs78vZkRj20 z&5#gpRa_rv+En~vrB4;jP_cq&aUQh1P)^hZ1*e8l6jVIEd64r(D5tgco~=j8(J?}7 zn!p4Gfa_gs4@t)m&`;Bg4Z}kZ+}NrKxs8u|ofAJKN3iU++PYE`uN>n<{vvsOzB99z z_{da90zw$Ykex&cd1z2;aA5j1M3$1;8b=G_z>>lA%=@jAg#gPI`+J1Fm z4^zISElAH$^(MyYbHGzE%X<|a=l-n&F&bH zACHf0jCZzeO)?v#L5KEbuM&BZ;~;igIq25E?E#kahz!6ZO83E+v(=GPR&kW zXx$=V1$M~F-uV|nUwk)=vd3Y?sorSH*c-UPsJe=5&7b>hfHjA5C~z zciZHgTo@-}4YA+=DuUQv_(I_n>pV}~q%u3zBtpzW+lqw+E)CMxSc&I=lW@HQ=!s$BJ5{}1c?2+!31HsQaz4>AK zoTZ%?`vuegJk@r$Od#l4DgCPQC3GgbtaS?8_(`Q3JK;QA0& z|90dYg45tT4o7NG$t=|x3_7}aJ!(k7GZ=?(Du3K>b0o7F&`~M~#}MRq zI7yi7thIP*=VdA17mE{VZ~vpFYOUE^P99hIe9u3T65WMIpZQ6$T0{Wbbksg1?+;S; z1Jgx=U5}~b>4Th;6;bFhPrGCSbpokid9OFKuarX)o?Q0W$uoj@$9;-QyJ7iw*`-vZ zP07{a5%=3-FQ|Xn+1-}LpSat1H3t&l3*J~s*gEVnwtoZT2a*-9kDJp-k#i$=$Nm5> zjm;#-PTGi?>)f|PFn@K__0s1U)C}lkwTUtu<)uHthqPK_c%t+w0_vq1-$riGtm_Qd zDHMimzD?&^Ks)lVl-*^B3rbYi>{fEqqLItcxBp{yIm zJ~B*FT<%|KX@FV_9a+1>F+Frk`0p4Ry^m?~I&TM9J)=IImOAe_X!nJ-cC%y(JkVX9 z^CCPK_G=ARnT>IgVD z2SYihUSbV?wX9Qan#wGCllNYG==cVcLV8S)i5Lj!6%xo*biutUk(m=i4w??LJNHcL#%cl+{~0meLietZZ?ncG!WDcvioQNyJl^+#_?q zkt$tz-kmIc9*rculbDmwvnStoQ*M7W`Sf+(*=FLXb+A~)M4Pe_eAq*}jD+WxaoIw= znvOaZInGi0QM5I+hvq!eecBy{y^CuD7Zy`uh&VdT8L*pri)LFBmIX#)`E)>L&D*Mi zL5YN>cTnrU0^!m?mxuT+UXy-#r}+88>{4?kX7}RmvLOLlu} zgc8|;>xtgKCina3e}C;?26!rU6jk08QBS;qa>Z|0N%w>$7-9>sc2^2qbno=quRo|M zW%xQTF(Vr1d3A%%bZau~C4(rsN^Rae;qoa?0H=Amta_Iz+w~$+9YfqV~g)&^V4AfR22W+=(O{S-};gj^sqs-m+D>=;EF9 z$@oAMO_x!VB?m|EJJdS)d)%T2*-mx2`9N(|c^amdv{%7yMXHh+%l0-=n%Ll!r(WZ? zR*94;Fm#+X20xS67;7LAaCG_=q;_!1`QR(Wz8-!0N0kIC9acS2>MPlu2npGpZbqXc z5B@FSI4yqvvb9d+SGXgea~BY)zc>v}(p z)g}k-{mf73V{W26-AMu?nYv$Nxum7FPQHfOxc|W_iy=a*(<#!|bn!zD?b|EsCnf>T zjOd9ixY)ezP(GSN`$NcUA^e?QY5LXjD0AV|_RdoQ=_#aEeEF9Xi^if>>Zs_d1mXrp zaGeJMp(qMU1Gwp(ge&fmS({idEt8))=ZF zrA!=Ad*A1F+e*vtL`~Qn z-IsFxuk~BmD%wjW{!D?psEI&GK(GhR7ULTNyLOE|MHA`MNot&W^8^K_*tpmkiL&;J zB-6dN@Klye>;8RUKg<2h*w9zsM9?r6k8bw?0u>8$x@@5H_fq;S7K~7Wu8%Xo#`99; zM(SPbOvg8U#Y`2In2MD%1vcvfdbZm_xKcNk9O{wwSNq+XS|i3@BD+SP1AB)!%&F^ z-mx9hdJi1YG@nW^?G(gf%@;P5nA-Zy2Vyt~KJ*^8|t%>Ah+M~BP>3bw~eyBlvxmItKf#T zBBz8d6Wst#*)KJ_%PPzM=7QVZvv`+q@Q~*i&~E6!4Zj>xurR5SvaX{@vC-(>4u!vF zld;F#`~tIAN=m&|my||UBn$?#jO#^;qdBvNYPtd2LWQ+t;vNlsu6YkRtdufKzc^*c zj);;uJ(--Wp7PjMrOaK*Z@;wqj3fD?EUPpKz+5S;&vKu3#sray>4s#o*kMcr0oH$Z zd2snbJSCBo?*vDGapbC9&~3{i-g_=BX-yQ%unH4tO(Yr}t`jJ3Lw7D_fT2y?+F0cd z4Y4<)#TF8LX|Ymg?|84`@_Rync_Zz;jB|kJ9LdgS?s>>{+O_YDhy~d+kRYL+lz4_{ z!dLjYWT&Y07^Jf<^cUBs0Lm3)TNJR22vT$jxztvUI97b!C>4FKs^+cS1n6G|^WSLk zB>0b6=oWQ`@)gf*N&uzosn1Bwn!Nipsm7rE;n90}$yFtb@OtK5lk(~s4qlIYn5)!t zIA44o40F4U@Q_s%Bk8He`1{|;aG<3s*qE_}C}5EZuHY9|69A=aZm$Msh2wGs7ocIw zH0>l>8`=UMzZP{_{_HrmCw4PHI{Rlz?Ndo~W9Z(f<3t&uO82&A_!gV6_B zCldN4M91V7#hs!f{E#<51X$-)!<4xWp+Ezcsdk6p?2*!)`Ct-{iA9nF$!XAZPPJk4 zo@TZJ>lJN()7#%1n5J98B!+f4k5T%Z76n4?8Zm;iFH~pUQo8oGjD9t{Mj4d}&q>Zr<|P{?e-rR+d6C>>zb9XCJBS(2$8#Sy&3aU@%`eXi*F-!| z8Vld#_Wnkt+jw*zaCg+}?f?>upBo9#_=fW7TS}x@#DvCQwKWSk<;e=Ev91HtO!wUi z!jM7DXz#f_lF0x;f&0C!BtPE}=&ISRhx6YAEX>2M&n5<x4A;6Hwkfs5W6?8ssA}Uc+2_Jv0+#@qBII{Ep(*atv`%&|}4Wo$MGlx7i3i zs?&z-#5$zAIu8xpI7hpLs|9-YT7_+7{^jWG&oRj2=@7MEjyN&WZT=Gwgq`b%^eLU$ ztbA^0VJ>}h)JBURdlQ>A^H!Tcsx)mZjq1>3l`6K<&17}2b9m@(640Ywx6a*icN2d_ zg_NOwmpkgqKE*xlj&Dhv%A=SUNmw88CAXbviItzO5=b<_jCsF4kz{iF7Z3H z0*QmvwZG`ZTgYUw^S3Q;v+>x;LV@o>e&i4rf^C!v5UL(PTW0B^Ugp7TVvA}6x{*9S zf-39C53P#86F-ji@Q*7J$NgIa-{a?fJ>6yZg@LVHZ(n!HX)UWIFuIPq4YEu4JgR+^ zHpiRD8Zyl_N`tpP6z%4%B~NQU74K23)GJZIA*KSp1{DHCPwfJ4BDWseGdvA7c5+!# zTM7SCF~A-pwV!u0?frNRPHH_J+^&-vlBymxPq-(uY3|qDiu_hu+G;nWH=nv(?U|wM zp4-$>^u z!>bIg!J^*oJig;CM-_Q;msKam5)9EU%p z8+F#NjcCOkr9CKe_&L;OCBcDuGT)oO0ij9S{tC}&`Lm+sm_58OZI20U(QZp}U5Y)P ztol+9eplvwB)qLq?HZ%`ZwmR<`E83p`?BOj6G*4g@flauy~h6>tWx=upf6y6u)r~K zIl_kHYFFK$Sa>rVSyeVi6xukJ1`$nKK4!9;X`)3ibv{8ElV!(-b>_~RT9 z6sp3_N@Faz74mjbEg5U(pUIk**zI!-EfJ%7Quxit5nE{M8v@mhq* zSJZEZGXdNBeMjk66{7N^PPjLp{__NOs>JU2p`d#q59vI3mZmbI$BJT)uS>jg68~zw zoDb3{CrKKrUCsHDfwzLx6m~e$6A*euSZqsHKroqwuy3n#z1SVjgj(t5J zq(%|{T76@EW&2KSjX+XcBBzi++qC`1CnH9z;Z|l;W!utSn7YkxDX(`f8pLa)0huOF z8)kx;jQFPwV}Z#KAz@4hAH44ylt|hv#@Zy8D*GN|G=s>stwozE5n1~T;lnRpy2(UK z>@vh@uc%sNb~h8k{~2nj{A7ipVQxqIa*6n!kj4eUetLlQ0%4{dyIew zRWOP%%RYp2Fb26-B>jL%cItYV_X`FDs80XskSwkXe&yhu^Z2w(#{z!!Pk*j0N>@Ej zI7wqJK2_G7l1%cL$U61q7>&*1I$VvCMdB zLP&G)AfjNc!`;Uw2+CZy)Wvj?8H&H%q@#X18hiFX?S|uFiLRSb#|RSIVN!Ns<*C8- zc<%ujfU5LTsjyv>IT^gK+yjj~0li)>PPFyHsO`fu;}kT@h;3#!4e-v{AgoRfYIn>un1$8)KBt_+YzW^|8ke=!hfNpj) zNghX(NelV(li{uAPI&oatm+JtoimEZPNaX8DUyr7!@BCf6fZttJUeQ7Hak?rbWn=BGI`2V1O{TGD4-xsyqHd{gKnaU3(B}kDP z{=yv#_^CE_v7&Kzy>I;fPqYoT(6##Df8T%p)jV47o2-s}r|Msiz`kTO5TFpr&KsS% z^_1{G31`${mdx~)ah6QLY@G35(q|Cw%}(Up3DITWZ>}m4JwshC@c+*A`eQ`@)8tuZ z(2T6x0mu;iwFIs?;sTBy{r>;;UMcGLdH$-3?LyQL?L3mL(G>VGeJ-Rz#*83(_=fKC zcz9(Q&L*P&C(74I>^tT`*Lw<$-v7YA|KB*^|GUouf=(*kTiR^s|FECd(>m+Me&_?UA4toh8CpJTq+qy(aF3=_pb%t4YAS)WqNWwAeX` z`V-mlPa~oUa~&EKF7Ee!lY_PiKYF8$(Bcw zsNuIp6Y<7$`H=d@(vk12CF9Ds&hXjjy^vJtl@Tt!4U%VZM-0`F`r#3w0)27l$O46E zdM)OdjTH~+_BR{cue;neTL0@v0R+@9Dr-%wrXjjoj$P~tqR467Xz|sWXhP%iEHKn? zZ#cJ631Yg4Tf`1=`86I!Vi+X_8r|qY93M1^>wt zblsm{bo#^cPlT|uv7sCO!lGBT4(Bc~!ofcw`k!I%Nxx{)-Uia3A0WS@slL`vUzYmn zpK%=hAHD1w9qy1h5d4EK6X_;XDLgcPF zQOG+_cX+u6A2|Fbdu@H0DdY3jUPGZ_-+(Gm$A6~pT5zAZ7 zIDSAaU%NxW{n&P;1Zw>2j5qU-h>U7`r-8eM9@B6^FZn+x=>qj!lC{j&QSlZ~v(n zeo^(tGHW0AlbinaUai2V3ko%nDCf`X)5w7zOq&6dtj^VV_&>jkV2P!x!*vFR0(57R zMQy2-2p-rE-*r6Iph+-hV&v_dkx9b9>dBN%C6ektPq z4NYfb9Fz*!?L3e=5RTT0=lorP`_5qSv%sv-yB+P!_D~~BdeJk6QPy^|eriGY>DJNR z=LS`?#V=Tqk$h``H|*TH2>`C~`>(!zRcA z#_fP9?fPhwsL}X-w?s2;cC^lGMYKV~945$di<9BFvrcPVdVJHnV@hgTix)4*M$GwP z(`CdhAD0 zX5Ho`8P4^+;{e*{7L%T95cN|pGxp5G5hJrLn&9ereC%eIgIEYKJwF=gYJYUiOz*ty zK!n* z;p0S4@tHtI?jhm?#15HV`jbmt_!3g5hloCM=nD>}Vb96)qN_8foc1NZbNMvZ`&+F6 zDUd!-g}Oyge|Z|>RVE&@-sRF)s@RtIKuSK^Ncb@}fE!7q{)5(AYBm8lMtq}Cx|T+* zVzv*-1$+9D$NwRxuVnJVo zd7#>yL%*73A}^X8l@PIOr-dkqS`%o_PD34_B2Xw>1KyQyur1uqN#-c@*}RxTo!Ca2 zygI@dR(ZQLYra2k6gPZQ>P}nx`@R`LJMo$Sgt3D!7W9*1g#_wm%Xsl#Z2Gd#%DL_F zRMj=Kp8X;IE_MiAzWLrimf6O0PyO^wBg*_N`-X28}ah`u&g@o7wIu#9~x(gOWF7MSvL4AWdmh)hb z&YG){fX91r9=@6eHc?1?rEud9AY&TA7pRS9 zox|T!TF%OKm|Bs$;je}oDBofyw)(E#q}GWr>?}E&P+Ds>s?^$(z5KEXYSz#ZpdTt< zZ1}`kqtPg`uhPH|!&tj#Q(~LWGV^=BLm_a@?Xmf)^?dE)LNpf2Yz(2nT$HDIWwzYy z*44aG_si&+T+Qmnwk@FFWE)(rMwONGKOPG`p>=#;s{QnLBl}i74MSc1NYf^-d6`K$r-Bs z=Bc7_i0}VjZcl#a8Z5$TI)`-!y%d9k;8Tf$8XhTZ9njM=UJQz`p>0#<9 z7k+|dn_?4)iP0CXZKqE29cLgTl+u0+N*9C$+8l#km8N{EKtgfudOikiqSkwXY(>d1 zms~n0Q$-cw%4wu&>#g9NdvlOQleX;iF-B}lG^miwCx$F)u<$n$tEL3cg_-G=+A{i_vWJ^1)+Xm5R*_hF9 z63Hhvm}_lZ^m*%w|AGzDFifccMi%u2TeBKIJ-WG5X zIj_;$DV1Y2+l^!pQ)Hh;o7*`fvU`cVIvc zmn_)rim5QLiUO)=DXiwviw#?F-8%b^*AT(IrzxP%T7#<2+%@KpjvlyI?mG8yivRQ@ z1+5GE88tGmw4Q#3sMR7SjHqA`+5oEAd@Blexxq(mg|FB<*`|d;*)+}CiAG&StbtKB zCC{-g+}yU^W`5Th*NPRu$dQA3Nh3Cz8FG#x1p_|X6xFgP?rdURQx#ZlnTsDzNw7Lg z@$>0~&=gU}?7E&>iz$Q)77u!yZ{rD=WPaI58WW>&L)crbqB1G|8OzodtU?mbAzqsu zhi`VdDK#4hv5zKZN38NMNAlf`OfLq2MDaYISG{w_8eK{3D!f}8%vRkUb|Uf}$}0Tx8yGFe&e7hI+^+)&cSw4ez-(nbeUalq zFI&li80Q@cw=wo`HFTLje#cO{ha`SIIEQqxA#gIR#==a$kC+E}{rj)K*?)M*ha!Vce z``9}&6xXJ+=?r&nS#oE&^>^mE5292`F8x(Ll-7&-B-v;YIH^jQS*vg(x|&}m*xwxX zae*<;aS48pEhIMXnJ{bVsZ#K~5!lt(fY zPfrV_nDSkDKyrS{q3kiZ9LTV`v60ZI68vd(5ItR2qpbmH1o_Q8l$KAr+~7BU_{SI( ziUTng<}9zd&09{y+s?Dzy79Hkh|nT-DP|Mi7NVZl?P~kV-NCb8ag;RM_Hi*$%|mjk zg!We3CS)(prfP)pH0~PURI2DNixgyP2Z2Tapq%=cD;nJ-S9&3l?qizHy&BOl>$DSD zS|c=$cqy?H>FR<-QddyNlk{#l3C+W}_JHnf3w%}LNv`7jTlzmw7e@^+fdVQ9!6-_f z>CkDI287C|n|)`orBsxICGsu1?e@OLiTtO4b%CaB(TGjKfAN`SMla-vQT0ZtF&De{ zbY+<+!)g_tCGVO_e<0wL5f{PkFB9KhxM1@5SQVA_Qp$=k#*FB@Dm!(*@WdtkT9p-7 zePz&{$-c&G4kvP6BAx6hgg0^s#@=YgnQ1OUYh8Z8bavAo-+j>)c?XV8^nOz@cCLvM zFDB?GY2dz_@p7Sx?h|Al7W#3jj&MNSSu0s39_@DCzL;z1|9yicXU5UYt>h*N3UrRf^+rj zqUN;)NI?km+2InM-xHr_hTm-Rn8K)IkW{(UwM3M}zuaMItJ#KaLOA*EdX1TUj`6>6 zb~h;%6=2cy(e+pCcM~+vo|>z2`_8hFX3WIeoq13MH6y;0L7FbOVt*6z`+XyS$dS7n z*d1^~6mX&w^3_E~Isw2NroiRRr0 z9LZK12L+h+ce^(UFydV68+JcRVuXKE2hmm` zz9csJj*0Mf+atWtb6nkz3tTW$-*`{56e4tF@w$s-#ec@L^5sbhw3Y~eh|wX z3bbljW@m!GK5G@(-(rU(q8T=Zg9USb`{W77gn!aoj7yLdIKCou7Na#PQqE_gBD1PU zMS1?1-rwH=SJhrD>$>hw@@?2_c&W(- z+Lq7yGdlhxX5W6EKOQ@m5RUu_b+B|3nCJyG1s-FNnKgQktHpF9y?{6gKgc68;~&4$ z(n;wz42oIUe!#wlrG8*ORuIjx1&^10P|qCJ%Dl@jIYhXw&wF^)zZVhtb1VZk>5bus z96VS&(Jv-*TdlWbrlg#=UecY0{k$b#D1HApp+YUz`1X*w9M^08EkaUH{i&lPFm^L@ zuE%{@;c(}jNvjRiZ#j;Jc!3;oHTj1l&`0By{G+f_n@)SJYVXX`ir+PZmf9j50LbFMlpl!5+HLlvC zxWsxXY!XkGQKH@nb$eYPdkxT+~vwMbU*2Z)KdKb=s%(m_G3S;{R2PuV(zPaWRZh}td}b@Sk{yq-Oc_vm>n1bQXvjX6MT@~+7_ z2#P=UUB($;Hf*V;W6^M_m6fyPF{)?g%U%LE`0xd1+is)vwF!CLfS&q?*-5JnBq78P z1bsKb+kfF)tt=VxR3zrkCxNo#2UQvTz%Vj}T&sSXcf&)n0+RbVS4Z1UmHWTf&d6wf zv560>H>jDv?KrxF4(KpR3Xl?(zt=+Ws`Z6eB53=5*KO6rr1-4B zRfc#D6jKDjnES_M6_ft5`_YFR>L`LPUWWy=DW}m$u zLr<1(CG*?CpTMG*P`S|fLT9dc@QvtQUaNUodFj{V$f2cwBem}G^Tl7=YeJ@k{g+GR zw?%T29xQ)!$-2dB)hRRSlO-g9pXypmWYN;_=2WdlcssWyH*pGa6TS^rkw?h=05s!GXb|4KR7DgEA4;*`-R zVSn$wFoxTQ;yhM%zS+hnWv)}(%=+-`9gzq-h=jjCo^BGR?EU-&O;=pqB^qTWIMZAlB7?~2fnGe+W;v z#nC*>lwGN?YdVqJhmGWt+X$lEN=OLIzSN$stpg4 z>#4lV=xMHEp=(@1?W<|{ zu1PT_??Ez`oz8$=UPk`C`eVFe7kZ7S-^lh&_B)!-G}N{hrYZOC8?C$KoYX}t`c>v2o}U=?xa^?cX)@;sg2^Ow-MMS8uP+&J?8DDA9+;#{`1pAZN^ z5`sGfhrxmcC%9V(?iwt?26sYm*9q>y-3JTq?(Xgu9KM(Az3(~a);;&0`Zj+|)f6*S z^`h6?-K(G9vsj_Z@SARI!MFSP){+yL8mTQMcUBvTv|Ifmb6_8>&^C^i5AJ}v*t^f%PK`6@e3u7j>spJGA=H?L&=3m6Tk`ZER=NbDg)< zrWeypgWDTm@EzBtA?ndb#kItteS@j@hV*oP%!)$$7I5ftzF2Ph64)Sckx0pOJ|$)KVU65$r`4%oG4 zx!9J!{}jJ>gjV^v;8=904iGJr0XS){J6LzYM6~T8(FqVFKV-LPOIDnS)^Q<4sE^JF zO}=-KTnib_D_;t8|NH~iO{htCSK-pVRj#F>-C;`sJa;$>CrELEVg0#3mrWfnvZzKq zWD6NMcAy<>*{7|DOctlS-XA*uv;g_)Wzio=nE#CyI}Zr5tP(7^w~5z;qb^5h-i%u}Fbf~iD5 z(})bHnx1nhgyRokgf-dY?x1{md6-tG-6IsJ99@NRIQoF)DysJFCVwujETCpICGt{< z%%@2Pkyv@B@^nwL#(i8ehR?~iCsFtUeoevY;D?AW8ghd?DSJ8LkljCEacYf-AUs64 zvQ6(xDWY~2>w~X=UtWin2;ic`22jMLL|dJ*z++&4j<|A9Z_c0FquszKPjVS?C(HA= z(uEywn`JnCkjjK4obKYA!WtXmH_j8Q{TrY4XU-%KpGP}|_YKd<559@v_Dh15t%A-7 z^nEPiHtX2H?fQ`e#B4Mc?G35Rb$zPBu*0wdyCXZG?dCt1 zCK#W&NmMxurL5p07|B?Vq?GH9#H$H^ZOjvTYyC3423 zxH1=bYIfMo*cyC(dUvuzsTu<)w(C2?QLbf(pKymigy$c1n}2F;dvCn+jyTqa%T*&= z&jwS@D;g#x@ix7Ns*|xps42G?W}cR|x##p#77_bNjRd~hVAc_B4^iE*_e;b~$9ryQ z&pyjKrt~RwTp-dVdZ^3J#N}{YF?XsRAFi{sqwE>v`aSykC2Es%bgYEh=>{5`;k!<; ze1lnQAyqE`3r7RH+E=7r3rAV-uU7F-)Qhqo5>3N3o)jeow~+^`Hp_^Gs$j0qjET!f zX17zQ;b!p%CgH!bz_z}Y$m3i9P`x-5+qXleoljnq{R<+FlWpe0{FvDq8NDuYVrAj1 zWuv+gwpTMsIE^OJ&bVuS;T1$ww6eR_hbC)f$B$iINp-Am=n+akK#yg2G`6JgL~~A z1JvIEO4k{Oq?&|XXBI%C((ez;_qg~o9YyK!vc)@P0A+viLb*b+UBDRsq{C!H?MN)? z6{OU=kYsfx7k=vtbI@)!=T%<3GuAM4{I@?D!p_#eCk<6J48QJ81#i>yaG zA1=}sxcQXjWhpuN3qC9dN#l+&9@xFjQExMd3E6S+SoZgvXfC>U(0aFdiE~&f>4>ML z{DEUtX^G2Y+5nF6Hy+-=Z|>a_DFevVZ5$5~( zl_(^5e-qznzlrZs=|6~X4eF8CA6l(fO%c^;A_YJh_YSS6pNCmQIkF{^@U(#D$r?L~ zW#X!#MyFWlMMk;I3iq=PzjAaMxbBYgg;~5h&g~Upp|vJTIc5TB7Q-x6&Oqoe2j*7Y!&9%db+V`Nv+XrZ1@JwOKh6TvJ9;f-fNW1KI?e#Ab zllA>UY2c))6V??m!p1@&L)>DQOA3$TZ&88yGb}l}VjT9YG-|sQ5emI@cg9&uA6df; zB~wsMo;lmiaZLpv{_rwvvYPu?-4Qn6s)cyP3RiYtkGGD<)~Cu2A7kUa35!uOxCCw585j0 zYx;W8uz5D;Mrlr6x}Y0YSy|1^Lu;aZ!IEPWx(IBFw5&M z)>H&A$c6QrE`B5B^hSRn<#>=HD!?J-Y7rpHF)>s(%AeErx<28;0sI;Y=OcdBk}}CY ztNsk6&qA+odA$MQ@VCT>9XY|E0sYc0{G>M<8_)%5>{C$|Qm;X~OTnf1g6tD_f4c%h z*+=}~A~ZrhW#{Zo*6Cxm4b{Get6~M*2zrNT25q6P?Y(|U0M6g;&Ua0T1oghSm zXF@c`i}6D#jr`EIT&lC}sB9`63nK35C-I6v1(UcXyNV|{&z+fWkwAuzi1$ST9rA=| zdufmMg}|ci0|uVkQmG;)gEJ|w-&B^%-id@@T4;0_F~5`)G2Xj*1#+cX7OTHp)#VVJD{-#^^|D zqy;rxd?Eb9G0kR2fKpsKAj3_+BI6iD#WLgYoREuR-a7h1=!~>UO&xgW!ZT3pn9ABD z&6LQWAl$}IQz3Ryd?u$yuP*5R8wm$_=dbb-T#OYSXRzo<8w85(0vCov-)D=nb#o0u)iYWN-7sA?LJp3c`Mz?UCpJLP| ze@-#Zrp0-`8#B6e_pVsr>NYEl%e{{u@zXx0L-Ar^!tQ9+AGtDr^^@T_bYJ|UbHV4fh`YipSe$_Oeg6CI3TK=0eTVN(RDDnEH^xNQ8Er~&(XE0d;}6z!4a03irC zj1Gz@=4GJZOQeMgPQy@mgT~aWop!kQ%uVMk(cJ)=1&Os3I3-q}j+px^j^` zV|gY3?~VUJQEw(5GSM~nYGf+z_Uodn;;g0Cu7yyX#c5iT9u8DyI%Hy!VJS#d;ki?q zh$WlL)JoPV7K(<}urg|GK~&&Vkzr`GVh*?C^^38ZTr|-5IyM$IJHZqs#*RHvkaxWr z7)vudf9LL`O+!U#x71IxkChVYi2PKmG=d^gvfs0w0t6WWYf|bU z9M^{kV*3Ch47Blrxk}|4(n<|un*llHn+>wC%m=_vzg44n^2y=V&(pvk++rL=<&hw!-GT)K;SJBPmC7W$Ts};hpk%pF4x$#3_1E#j+!a>3| zCI57W@|HJfK49Z^k+a_o6?U)X)m_+(*#x}=Y!t=?z~j6%7e9P&tebXOZpDW}egar* z9^L6vK>6sxoN`Eglt@uokM2)NWxBim?!frbPgMXG9{WWtt4szR^LV|3RZKF6$RMw|9Czw_dOQ4_$mn$*%PY)2!=Ywf zLUq-MG}}gn7Q%F%YL~u<;YqFh`IC*?FAd7Np~>7&?cl3f8&`g~UMMy7b|qu!Fi>K3 zwIizCzQ7Dk)x+y#hs5}baS zGEtXP+4o`U?}&V>-GpkwT`=iiMEZy#>d>XU30=OG05ztL`f=jBGh^%^8Zo{uK`i5{ zw12=_RlldJ{U!)ck%OfWg1q40)7d|X#*M~MWqp573byo^Yd6AeLEx<^pfR9VmitgF%| zM$|S=+iAuYqAX?2jb%G>YklNzl$b4TTo|LqPD0!0H9M_-QE9Ta;{jG88a18vCH8xn zES82zaHpp&HzcK%#QKd70ZC$JZt*yT_Qlz-3pp9(y4X|$#bHmnxkA6Pu1m$RGH5L3 z(DH(0=e0{qt&2q8ImKul2t#6HzSa{^O35rEXO4*!L7!PcZMlJWdB>UOs-eM}6c;V5t>C_=_HY7AxgDya-?%dmF zFc27C6vz-}AaaHEn2a^1WKh}v$c{VEH=l^OIG1cDT18>;b*)Ix9XE)oY*=dRx6O{# z<2RB8@oeEmYc}yM*_C64Av*3}e13b9i)us0S+M`)a=SZVsPU83xx$cRv8YzCIl8hO z`^tttP!6!$N`~+orPt^13+wHs5e6tnEpRJ~^OvMcPZsRamw0qNzr09N(tic?o#2tJ zV8X|hO+2kB%px@%<(P2#Wy(pj5ssuS((aqJdp(49xh{T>{dlwQr=>OTU-hb~tm&d`3dO zVM7ZQ_k_7|xM+){Lj~7{XvqvQaJU#Tj~V>Jb6HJouQB~xFjsye0{@Dt7RG^0*v!3L zvM%&9Zq9)nd{xV8l#Z8_HigxA2x#5GS?*aSZV@X`oS(D~8&4GEJjuA-=HBGXtQk}W z)V@BIN$#`jNcKC*r%=vLV>t+kCn3MwZXRYRFBQz-?N+<}%<`sK|w_`-3Mh9)7BC8Y0WQmQA?eBU`Bv z)cL4z8*-Z)LU`!Hv1{Y)u?#3hsu=|y6dR<6I4)?G~S{-xy5xitd;@Le7{Xe zpiJ_eiBUC44cR79SGL!Mv{VZpYvYy+ztLe&61ie)Hrgw>p0Q_a;-oq;(tJgC0vMb- z?ha;R&+!H-!f3nB+|Ru`?lFDDA-}gt5jDfZd41Tq_c;}u0iAf3ilsK-&-wc`xW)_L zSzX#5H%d^>BD>wtv3TXIFektc6sWv`d`8R-GLn9HWduEg8ynOO@YvJB%tD)(eQG_R^3Hb!I`ZTr3 zek`ME2M3BuCeV>->VbQyc7s>XBE`C8KC0jb5U08}0%vwly0FhYTOabsR8mvyU9kZG zf>e4d3a#V2-=ZHT<3NnebDl_A%Kw7P87P8M;i#Eg5C4G6i&ayRZF5JTZ?${$1Ewqx zt@kj_&6I$chsgZni6+s5l4dTluu=0YEh|iVP;wT(Ub)J$XF-X)h6SumwUcGc#>#`N zEg@CwzDc3X59~oTdCH)SdFG&N8J|bN?5i!xplB&6Qsqn&=usT5uTc^4J4Rom$zr_y z@VfEQE&CXlTF*@5B9l;5?J#lDVl=~#_*JNw}a`AZXJBVXhT zO(@Q7rdHT3$fqz6*DP8kgd~{!_5eCOT=Ne)yj&iqH;DPG5l`o-4zN_%!vq^cY;*i2fXi#xD{#Te&zk&Ho2gKU1v#pA22ZAlA8 z3f^O#I_&)=ej(;(na`@e4n|k;pbaz&#@3{k zB_mdq39Uv}g*lwN`MQ*SrU!pqog6~!8>|gyRx3chZRIRy#A`udx3MeX;h0RAzL{V_ zoFT0x&{xSTk6URh6c*E{m6O`^<sgjA23)4iB}?RUiZ5Lt8(myLxeH$wo}d5bu?c zJg$*Wq)L7ovM83aL`OP5vG8a40;wS3LpgxCqq4cxtVoqaw8iMzprr{tt-3`PJ0ime zivz_vX|YuBUn%c@plkX^%CiC~4`At{@OoFAK>FUr^FD9Dw~GyXCM{;ZwBwvSab9>t zh7pNx_`O;$jU>$ddg%=bWVBO}JgO0V7(_bDcI$s6S1h8b78+_lF<{1-Z*H4(4;=Y_ zi(*o;kL7RXB4ZVic#YC!PYM8!^x-a=0LQ(YzF=4yfOxd%H(ip ztffl<>W|>A(SAvc^lF6k(@@^WUZ%lj9j$0z5 z$%uvXrBfIaOxA!06u1F?dEZ6*DE}Z=gZL@H!tW$r=ytWBiW1cs*`!Me?O3v`OF3jP z2ZxjPjeg|45dRTXHx7{(0NZ9?v_Xsh43g=6J|=rNG68J!Wt*tf1e%Y^~l@KNZ)!S*Yn{Z#R{{tW-fV3n4pj4CTyyh z*i(q9<6P*y5T-93ZDmu?WDIZ@pzcmpiuv?awhRr_I?>9V+EdrRo;zsWXH${9Wk)81 z-KfUazv)~fOWKURHzEn1A=UkY^@A%yvEqlS#qA84|frZ4!T@?xhct<%K0l2oky;n zSgm*fch5jV72CWi@C*6Y5gh(FzS?}^>S%^OT(@P41A%^pacKR!O*dOqkFG?zfs8MG zoDr)M))ss{>lZ72@XXRjpy$0{C+R#+?JDA(EknK%Z?7nO!C}kBA1qa5A_gwgwsZtFj{y58E~g$0LIt4!deXr#Q4mkMJtw2MBG{!A8udOH4Jv1_PYqFD+(ewa5zv{ z!(qfIn=M+zRpw}d3Y~M#J~m|o=4E+g_AbK|xtzgjOd@lgQIsMaWp5mIkKUE6wrPV= zS6xA%wIS?tj6dmi6T`P6@u~SA5fwjCj$nYceo-kVDHn~?E5F>qET$=Qzn_IA4svhq zQ8|Lr@P#5eDz`XHUA)V1!BJzZ(Pat&z0aB(J%N+L-}j+K3*Z#avfGL0JOZTQ?F9Vw z{Lh)H)&sU{^u@ly3^#4H9HBrku;_6-qxH$Ji#!>?UV9e7snDr`8Zx6 z^2Rrv@3fk1?&7~g7ByIz!Bpv>(6KJE>eOrW{X)~cWRU7ziS zDjiMS!qU1yz8)Q&YGb!%27jhf-9{PDZmm}0Dmdb_5tu(2mF2gv953hl=!Yx&DJr1!BcsCJz`0q$HMCFQBDu#UgT=j%6_$H4uJ|`A z{}Xb%qsf0B)#tdFb3m=pcWOSsFab5tcvk@ESJk-1*f%dztcv)oxJt?h3ZFq!xto`R z-lIrb04s5}%`I#@7b!1sQ1vZd#i8uRWopdxT3v@kz|w52o)CGjDua6iU;}piv}SUG zqn0)}*4emsb`-s`C{7ZNc!G5{sXStmgb^>fLGv|s zkohNWg}NAU?0#+&70sxj{bs*6>ayiZw*L9|w#HRi7* z9_mK_%0Qu_vpABFNgs-a#3udWbEBN=?mIIH+Y4csmD0;qtGV5HRz=)XwjGrs>1pAU zkcT4p%7U?KYiyihz3{F{8)0MBl6`itQuj)tCLy+nLvwBUnxINi_A&_SQ0dHwkY9Ec zB($Swx&#aDK{hg#h06e7cM2h_DQ`x%ZbVrQ@2LB&5vD&u_cVUR<^Ja#lf@g?c^Gs^ z=aLg&XWH}ei#)`wOUXcBb%gcP7|$=ycn1MQ?EmWNrjZOAsT*Vs%;>o!f^Mdn<23!8h3~PV{at!6Y3!%?PsN zxHj@(gc3$l)ZF^hcZ2pi}l~PZ)?(-1}1yCm&=|Ii}M6GBl zg;THDeGc4Ni_uT#+T=!jKvoP)ZrrQI2=%QCCixkB)`-zk=)u!kiNhnK7|IvKn%hjZ zbDlfCdun78x7f|3KDlLkxPJhgHD4Fe;Z|?C+1qyJ0Y}Hmd%&4N@w%X+wOOj53KQ6J z)@%FmRnG$J)n*ZS^lJ!w7hI}Ca#D)Ap^wH&8$q@nXrkX5ROwDT_{D2Wlaa92SJcE0|@pI-yq3c>)lf)6)L8zH}#q9_1eHenK+I%Hg!5T31Xi&Iqf#RCB^ zD=*)<5c?{` zdQq|-kNdRxweG?eR#|tx_8)|(R!E)h)MEfbKv{l;Cz#)sMo5GtfN_@CBaco`!aQ5qG zFZLY4xUQ8pAY`3eC)GK!&L@Ud(5Xrb4V=kyvW&i#{U(9BLE;eqUWJTdLmY_!tQ{iD z5llXpp+!k-wP;uKd%w4a&DlK9TW!Ys`U{%u?(5S)qe*)|zsBct?&!r~4cG>T}3;n8EWh=Tm8F+;RDH%2Zf64ghVn(Zk zCMWAac~+@sxv8#R3i6fWB3%h=D2XEM-~WlQM}zkP|JkEkm>!0wQ*6IbujRkGas!ao zzsJz|1pu@h<1I(e-$UmP*^&Sj=wjq*R3x~ z1htSCT@SeanxOwtVE?%n{O>RD!vK+nc*1r5XA|zAh2Bg*(UqAku3 zL>jGR3FmK)3z_UEq8*d#Y3=Ucnu%{=5FR59gGFHQIIdde1dk_Ao)$_73n>2ESp1*k z_t$$&6JJX{hot`{x&MQ||K=$Wb*Z0U4gTgBnSy~dX+P@UEMob4iA(+lh&qC*)&G}; w#y{fr#upVx6P(7YIlI3xl^!4Fu(0vNjBYf`y_eNC`nfPJO7Fde zqV&*PD1pEieBXWcKKtx*UElZrcwyGcT2EQCp8J`Zdu9k!QV2G+kImmWobzh zN`8nU*ZV9YqiKBj(cjtx(TcXEiEA`)l#w2s9=<Qc zw(7NyKMxjvCmqA*2%z*T`AHO$y@_%#hwg8Ydg%J@+O^y-PJ@Sj3BfWyhJVx`Qn~@8 z5XXS6Z=|ym)2(*w&AK3W5pb5b?-@kZiq&vXRwB!%z|;Ri<1oC*))+KF(rAM%1$ z>)w{m75!AzDcNgPyCHAFQpPlXfPY+=N2S^<^vxhHqFYpV6jx)-GT<5nAL4)Zi1EqD zF7}Sc`C3X<$J-#(HciVUKe6X1s|ICuMo=0tg!L2q{AR$<4{zL<7qxePSrMGvsX0L6 zxP>3&KM0@P^|m$=(&oyFEQHBKOV_H z)fUGWJ2Hphui?RQ(l)iwQ?yi3!DGc86XRXOzlTSFJHp5P!way%Bm8rWhsTWj#KR+q z{Den@`@DzyRZF|}kE=JQ(g^->jJI}mqok&^q9X27)9kH9bbLT^_bz$ zEik+1kM&g47^EHFS}+K5a&vM&7Qe;7z##V4+!CZABlmZ6+}ZQT)-c!`5Eqx5n;WMa zAE)D6D=r=p5fLtKUM^l<4%{6a&K?dh6L$^=XU4xe`A0u87S3jGZQj6a932?0`ZY0i zbb&p8{P=31|N8lBo)+#l{~XD|`R~WVJs{WB6)ql5Zm$2-3}$2bzcjnL@>jDz&-K@E zVppAk)NI@>>|e>)KygyVO--Dak5}x^F#qGqKPUQEOKoS1x6+PKTtk@nKVQq=jsNxH zzc>6dq~5;_DZtC~uVenprGII9^$Z~JTN~VPCRaif$F=z%-~D~R7}u4A|3%_|z0N=1 z;@+qDEitbDS{vfGtf?Jx@$g>YDauG{x#MrbZzcg`t1oU&F-LVZsT?WtPU+rLAQtsw zD7?nXuy8G7S8FLOBdiTh0OvfppUNOcF7utp?{nv4{NVQ=*qO`+dd(|Tc&Coxk-o`I znzLiikNb-c&@(BbeA>>mzK+m zJunPgTr&n~pSAZO7M`{7fNwLo5(^X3T_eR0cqje+(J%5h|M#bZ!{oy0**7M5+DMLj__?AS70Bel|1Zz<*SPwG*El;% zWMP#Q!~bt0`L}8RbxKY+<62H^TN3*JD&YUnTNN?kbQSTlHD0|?+&z8$|vXs{t^l){_5h^o446? z*GN9TtE~GZHC(-jd-eQ8gh^m~(#A<)UZ!kO#UUHI`vC2~7ly($e4!Yo_Z0vVDs!uV zsJzkdk*cuvj!zgDqYLcYX~bfGt&R@pslK1sLsawxJ4|kIFomF% zYRZ|DlRhcPI;_thiI|kqoM3}oM6GK=eDpmhx8$wJA5mX=eRql@!Cy*LM+nYhxv#01_=E&Mxpzq^$JE>(d68q~ zPkY~7NbS0p3ma!I7wq73VtRFUdKig-^ccp6#Xu{<8gl&w$;kCdX?2m z?z@S~SEW0i=%>2`o3&KkD~^3L)YWb`!cZ}Y-np%SJP`QWW?ajHNwT!J4tsAZgD6rQ zimZP?k{qtz7W(lSBwT{`!}pLRe%bjilTtF9l)gb%lKo+$dqbqByOU8h1Rmi*3e zb=0=L^x2%jlnrQZ#L!MraAHy~_ou|f#Qqe*nZrxdy7`g!)oL9@O-@T`4kmtD;K>(< zKJ`iT5>=JoIC@h8<9}8Jg?md6aBMt|$G$8n>sPHija7I#Z;#?{|3(6tTd?&n?rY07 zgUnAMI5gsWrq)%>)?6%0W&r^&d>FzV#d##ux{NvZZ8&4TZZpZU_y-`AOAsLTBEg=V}lqgi0FZ;zFsb+DtSV6CL2D}kI_^$*X!Gtr`Mff}i7 zmZM$S1t_=Mj>LLuW;})dU6RE6gpFF94)K#2CyMJLMS*WaBu4|xZ_)9bZCyaD)P<^p zEDLpt*AM{%hmOTb-#&((T7{(M8#M;_99K_?GaeY}Z>-YtF``C%{6*D~N5ybgHp(2b z&Ar=8iaF0PMy-_lgxd`!(zed04!5n0AJ_}|Zm#io?$3R!Dz~o*I9|VF)_k8paa91f zAAC%Hxn%Q4#!8s%N8Kymph6#lRT@J+?P@EcTr_hS%RlCnXx_V8y*I%e)kPyihtE@Q z161zWL4fmv&OL|LKkbPb=7TG$ zQQJhs{L!b?F!qR(8oZG6o+nt90ZO5#o!eW@S{g2G^kmDl+sQQg5$mzWaf$)~nvb{y z3pK|kv;ot}FRovIMo9)JNyie|<#|oG^21EQ({-nK(Hw;mgGFAkuKd>aZtH5JpT_gZ zMzkpl4HY-IC4Q=I$`T#XUzK!L9yn?BNQe7R7m0|E2XfA(c$4lci@(C4)9gj0z*@0W~=2igm19b2EmsdyHE zT$|44Km&)OLC4nMZ=<)~LB_YUIo7RnYW18U#E#5l2oG8hdn9j<0O1Wp0|`A9VK6V5 zUF>F>(evBZpNHQ+x3#Dj=tdj^3wWk7mtS%Tqk($&>zlfV+uu$}FuV57%us2*cM14< zM_q*fzzLh^klU2m726uHYnyGkkTz##t7>c=&vP5C#q`G|{vD{pSwjJgEL}8Y6i6^>DvbHn7U^08P$KTa? z&pwx0Rf<%4MN}`@o825a7Uy>V! zfYWpwKXwgT&x>J0me)yNW3q|f(xuJp0H|_Lb0?59XGS13ZR$3 zY%{)GZl{=FXXax;Sns~|T#PCm(9e`3I?#d|l+RMlX06gQUy_sT6&7yu#cT9*$gE?7 z->xvziTia10#;kpRdYx^n**kml7F&arfgjiK?H?NNV|;Q@c;CEnvx)jvd5AS7{XU$ zP9ym6jYzUukqbQ0jP9&FnkX5_eTgt@jM07^mQS#Dl8G93lR>q@YfQ{;KXpWRC8AOk zzw}H_8qAcp9>KXxksoc(myXwxY)8p@55q>0g>I7{KjypPb%X(PlfBlP80DbI@fES0 z(Pjhj)CBCa^3+;SjNzNtP0e;J-ZPRw%C$nMlm*w=ei3w-*{z|fS zdvA&_2StJ1jA-qrfEXc^`G`cXW>PsJdO}byO;6s1Q+F}{D=90uY5gf{*tS;}olZff z<@}Q8otXZJ46{W}^w#{jtyzE6!@}A1MroCVV^W$9`wn5V%)W@2cq}DGI6zD#3|K{{Svq0EcQ*#*eTq6pu<&t$bCx;n}aAa z&LXA;?Wiu=S3O_erVuOLgi?gkwvgPaC@h%+kCkp7o1eALMx8T=k}f2i)(GgYOm>9L zi$`!`!+4BDmu>(Eue8-|NQTbzK9{;jaPi{~$Y&y|KW9!9r0m+V$w z5PYkMDzCydo3_by_!V}IPa;mLP5LK9cVDpb*moJ4a3&XBLUZQ+nNc|1&KFIgK1UyYKQiM>cr2Q(r~8QsKR;bOwq8x`r!e?sd7U$!^0}@p;j^JsD zC$>HPVQ_ecKDJceuxZpSbm}D-SPMmX;wd-g1^QQi6*7J*DaRU?X4CvM2>%Df#$0I` zTu>a|k@{-$phB_u#<7_Nl6HItU5ZlfI(hSs{_GP*;~tjpVpJS9p}9J4Vm8ONf$B(w zSN6L!y~EU0g;>;nK0<}>{bu-9xE-hPWL-8$l?3JGl8BIRJ$~tOzfI+3fQ6hre)`WU z=G39sk~-(eTb3k9@_ErBzZv=lIBB1|t3WE|S?rPXLOgaYR3dqC2s3e6oO9O1+mmZ& z$=PIbAHI)_YE)9XG*p?#oY*l{i&TNtA>5|}F{`mEWbCJU)2556TscIs+1uL1(<(Jd zSU+Rcp!X%8kMdh_+e!ENBT&I|#dATSMCo!wTMs)+GNj(AUuHo361Yxnx!&gAaBHWb z?2~OM9och7`&Ts+iHybC1G1J08R*l-7Id}2WM7bEXY!Oie+g~ zv8tQ%e5V;G1BKW+_sW-~g4Jgw4-=Lpxqo>^;)Xb+_VuoX&$j{Ut^c#_UaH3yK`R<_@GkR-luqhKZ@sUeCdwM7^>V8dB{}$NT zYl*#-^U`Y*a&W#QHBsnzgKCC*xVd9K!RPHFWUg+MC1YO!{@`T?6>kT%%_X;Jr=B0i zF7V{(xW=p`Px!Fpa+fYZcG;vM+gUF^l==CMN@av|meQ!#C%dt;8_7+)+I{|lg!77y zCqr78@70p_-@{5glUD|2^1gft8!nHysnGt5slzj$zKmVt>^CTh>0y>_=SAjxa9q%+ zPc3N%kpVa%tM-vu%!=YZyMKSF00&C_MgURdu z7aue+YYTJg`T%rgAIX*KX#k z5$r6}>LW&OezQpD_F4Pl>fm6+Z#)^ev1bDYu{TFlWBm?L^Rw_(aLOFZ{ns4?{-kA# zyt?x|fvV@P$zSJg0Qhw_B96#vc(!|+__rb@U9N359BE%BzeT;q93F!gG`Z6C7#nO7 zA~Ut<6_NGqS33l1>2)@9j^mBVJ&(O`)x-Q9@NfHMb553ip$X2uhG-iNET6D{v4atX zvQMTT!v)8*9KyK@JT;%tb*F4`gJjuDg+|b0^uKKd&KbAH>lXu^hi^>s0Jla9$~#62 zGAz5Ru~q7YW9zMK-_2j<`occ@+Wm&)6UpWGrNe_J{RWCGEZ;4=H$cr!h^}kgiAxrS z+irNNi^{@A?=7g6ub4YO9e?zK9UNYlO}on85^|r<}9DY|@RQ{?#tA_jB@ z)UHB5LB6U1qK|AMFJ7!Ks(iO!#UVBbKd8DZ6-r+Y%oppu%_|WeidL3qB`a`LR*F;u zbsi+adZj#~I!k*V3kC!~Q3hk0bPt}EL!BQS7t(-HGW#e{8k>Svg7B9k*p7{kB ztGeh!a;7N*Yyr63e0jtSXBB^JmI=F~`nD)`9iM<1r}7!w-fbvQVETy5fih%S`)@o5 zh0J?R^@pvf&3*b{;Y`aapcvd5Q*8EsZP`Tj8zac`L<1@J=62H?0*!KKAzf&pUYy9J z?6eqHl(4<&u3GU1P#f8v5J*r3TfqP1i)!bpby*nO8RtaigB!_S#Ln?gOUSP$y={4s z;I3-iOwzy0>Wb1B>wf22e}*6` znNWjF>>R`ZS8v&k>gXkfU;Orzw$&9~B;q%Egqkwu%n?d0VDJ&ijLH`!9)IeX(6sb4 zT7DS8`Q`O@m+Xc;-ZGRQuK*AuynK#7C-?(n0F+OfQL?g0oWAC1cLS+Z;d@j7jmUKE zR<@k(rrdLE8uYs}oO@1gU`r@$_g4Nf@;Kor>i&4%PBc!@&@M7%`%n9QB|UIGORFIB5jQ(HA+axA z5AtJ~N_r*6A?e^Zg@q@~4m^8CIi61ezo2%L<3X>h|B8+3FO3ivknraH{TRLYSheB7 z&yT6J%gd)g-xK$ZOFstD{ARgBnmtEifU$z)jci81MubynTlnzZ&-ARU+5RWw_czEh zDOsYxJ4#j$CGdW+p0g)%7DGNsj68nGfcWmM1Sg(|IC_?mnJ3np%8yg8h=Jw109o!A zWtJD@ce2D&P;2I*H8QyKac{*ijWfw5$rYkby@3j)Wl6M35T6tgJSA%S+K)snA44Ns z=T5yJRpG~t3Gxu+iq(aYe-KOny-C{;#x3y{s`U-A1~tEnl(P?wf4Ojae+VE`s($CZ z|E_t~k-wN(+7(v6a744@dt{_3nLgl1rfcw@uW;pxAILr%b1MY2lf4WYI}=ULs@+om z!Xb8?m|b6Aqt*^3OOi~*nv}h`RvL4>2<}bmgZTZT$Op?TNqRq!>Yx&TA6(@dUQW0- z@puOY`?GW zf|G)o1K9wxmudf%)$S=nMt&yAUA9G5Nwh#U?t7B-v-`Rp)V#&VgBj)yihzX{Q= zme*hXT;S}o#XDv6bA5iG)c1>JV?(HTfnUG7gi0+8 zd!pi}q#n5P$n_x^WWi1Y3?@|H z9v^bn%@7U1&&)5lS1JuWf6_Y95FRG^v@ zj&v7&nFX%%74ADRqwdp&a>l9SAn@HJLLcg{J5F1ovo;)YDQh_GX>po*e?z$8ROUK$ zzL;MQHbdhMO!k>0R*o=A%E2Ev*zDG3r&aDQI*VPZ<6r+eS8uaEolCG)L^8SbWefRc z7g{#9-0fC_vFKAC#vUXLdtH1p;K+33Tu$QL4L$ve5z-V?q(_Z3VZY?PxDGnxZWBz)Lp?qx2aeV#IQ!QcM_4%(GIFK5Jo(d}y zz1N0wk%d1BZCn=^nzg@KW3r!&*2$L@%vLt*ZQ>{r^(Re18wzL|w4(+{pB{Dh?;XP; zVmP9^v_}cWAWqn(e_-v^uu}5NBX#3^|2Ch^%KZ3X_C%NlTQ+zvajNb#%_=i5^d-*= z6!%XJ;18E9IP2Ju@IJTiFjKVrw0Lan`qQBI_w6`t`@J9Su+v94E8Qn}HlDz>^t2qP zMQ`r&E*Q<;EJM#3woW?{&Gdm3b7h-<5DMJ1e3jp)*l{oCBhEIjMSqbqS?Oex^Ivk~hqbax&S*+fo<9a1xfifLT7<2EKm+mxqU zR9W6&bSCTEgO|BpCCQS;@HOSjZ{1N6MhcBYE;t+c+Z)wycBv{NTy1@Y2a~K1WDMc-%%ldqKjKYx`oQ`GIrI;D<^L;Hu}HwYBRs>uLOzB#GzLz}twO#eMK>Ww_CW8Eaa^z$uji znXenYm9&ku`A7E9stJ5ogR**kjx((Pb)#g{G_X+q*5RZ{jXFg*nSp4MLz5%ufvdx6 zH5Il7uDRR80w18XlbMebVrq*ZO1JugUlX#c!^_!_@LN;jm5h>|aRP@1L^46rbxW%( zC;rA)*PBpzot$f`BiT%69a$3XWfDe18Me@FiFgZPF0|tiX4qLVm6S$v;#eo|HPDBf zS5te6a|cJQpA~NHk7i`?oD-$S@-)1UPFe;;xr+9#ZJu02#{Z5* zS{!Jy)iZ*+4_KewcnVlCT$c_Pw*V#CyYPpKJ$rd{x!7r@bbV;3_Ti*yA8%{h_x6rN zy{UeVyLTe@_w?xlv1vQC;ive#V=@1~_h|%XhyxD*9 zAE0NBX6f?C7*95!C(64{30wHcR4}oXy^!8w>vDVS#|@zwK5eRWboD^7#te+$+Yun0 zYTK9wgUH#z8gXRtNbpXKAD>t+QmA`mHQmZK>yGLX8KXyab z5rq}K-%-R;;$2gT-dLO=A)+=D&U4!%CZfOe?wwlFABl=Rmh3z@?hO*@)3G$f9OgX| zHI3*BAgp;q_+=_c)@L&7*Za8GfVmhGq}-RB_507_$(5H*SAtnI@bQL>-;}(H#9UW0 zuGY5aPri)5JEU|WQhJ9+c>l$KQ017&Uc)b4euIl*iP9DSSdg4wU3YNywhQ%=@Eh}o zx3PYYrgh5ohx{bmQT&VfZ+#WvO$ad5CV!;mxqO}+NvkytC|y2py)dkZIN1vHESYX} zaqQ;2Co?ucxFqcG*~GVtZ!ALr;zE&?=p2~vv}wq`4wp2wLwf{^bQIbM(e{p3(*27A z!EhDxsV*$<@Tj%HzQyp4j)PPd&e$~NY=cO*8`=HP8>W{CEQPXCltyt4B8JPsdE zQ6nu844p!$%GU*DWA7MYeOU8A&x-nPK3;si;dK`;@m{@#m5?U_daL9TdY1M##-mmaKTJ$&9>&-nsw@wi0&;N5?=;x?sgk^3ixx{e%!j5OF8mlF+kb( zblG$#;JLV2H8sTe;|Y0d^Aih+&ti9|LF3MbUBXlWq$3q7#DIJ<$#O)rWGt;sFZy&E z0w&tICgp|5^IG1?)>)E-eMQEYz8f~tzX$xW93j5!`M*K3<@@-*L6Tvg8-O~;18l^{ z;Ph~5l*~K~u=F(DUu>YQ{Q6DD&ztq3tt(kLmh8p+@moWjDPQsPvBZy(0+PYN?F7v_ zVf6Mh8;Jy*yS?~bF+)_NQ4Xsb#5d*JinD(oIxR(Dg!07{`zb7*`$uz9Q+S8zi+k~* zf#Xw`ChU$7gcH_?x5jn+eTBx1^7cwYEkZSXmW`}5E7q?0c7lb=-1y(5mwZpu2ID_) zIO9}~7z3=R1igP2#sSbu@g7r`p@)&u=KA8Iz=@s=@r+K-jSOUnrqJ{TGQftMuf4Ki zMmkMSDs2u?gkNKWL)IQ9jp~K1y&v^ayOY+RUu=HXM_dhLW*j1rp}dcCuBB2evJUJB z2}=BTb8MzUpVW&1pIn6Wuq?a9#r&maq5F(2Aq0}?T4{)1bmm2Z6KaJB8%yOu;fHq6 zt#|78rW~}uzfNNv5P=foJ6eUxk6b(p9{lRnI`vs!7BQwx)9L3Mp{d4NNN2Sy`UlWB zfP3hmUb~YFYY{ue#&_>j(4oW^mQsA8mv<~l@%C?zMWF<^zrXkGtJ1u_Wuas^M)?)~ z($83|&)0J|J<-Wn~TDM3v&KHc*x{QMnk)mX- zC;LHlVhf`g`JUwa_Kb#Zxgf=tk3Ol~;qH&#sa{612b>sb$qN`lX4F6$kQb;jv4Km= zo6j+Q9&?nl`r_gUn~8*ybKgZ&t1~Wm++LP?(~44)t6D!jzP}MMDv^wi6-ZBWYI;;I zG8+y}ee03WVNJ7E-rw=z$@C$gB@NoJ_LkBptsuGkC>lF+C-+ zPyx&q=-isD5U2*Q$3C>^j~G4l$i=*;U_qVJY-#3bH!|y`$NFz^)_L$79}RCq1J+2; z1gWsDaotCrAtyo~R6%tWhSF=T)En_Yq!TA+vVFzI+0!eym5qzN41JDy@F_?)`uYHp zqW=!Uc}1$wZk~bx)Q!%5mKgJVXnWkw2F_N(bn6Vpx<|t{a1Jvx6ve#f-3=2~gvl37IB_dDSshD+ zVii(&)I72-NFuJZpJ8b$n*hvoH`%6IU*QKGgmsitsex6cgEN2D{{(PxLx4-}&4>Xd zWmQh1uSHU?>>pE_b|M;R-CLA0o)d zwzlAHu^=7g@~{eMya<985H6q5TRI4Euh-T%NE^U0r->*D-rRPZglmyN{4hJ>o0LdocDL+MM> z3&vV{yesR*i~W}mPd!am8Gc#yDmLZ^OwY;XMxzPwP`qe>-Tfnw%l6b<)a7gQk0fwo zU}hnCiMtS1C$%1DDMa<(^qo2Bi?tdGw$fy~gu$ysc@G)SNa7MI^R?8`Ds zyjm%M$|Q~(#E!|g$6P;4i=HjHi|Qc^#&hUrrwd=C`dlW&Q2)vD8qSqlmeXCuX;uO z?dA-lo)+&bHH@3Z(42Tf>JE?~{Fn0U(u3j_@xoKMs4F-q2zyz( zKdB)F zb-G+rk}STR?)2KYgc|wlU!;m43H_DDnz=mIVQ~0t`Y)SR-=T|#a~x6sQ%sSEd$}yzzk5UvWZNCb8P@6FZ?bp@ z=InS)LDfYUxG`4qZeZk?Gh?g0#Dy84Xao|>FylV>+WMECTzJiZqOC025@H6IL)84)e2Px@P>#zZt`ZQuLri4Z4nVrR*8+s}K%>=n)88e;Z|%d$@m*V?^hjw7QV2wM#uuOL3FGYf!5(*28hWROn3^nWzRr!j~Cz_nuw>CR@IYmNKkJP!$ zCu29hvdtGFW9jZ*xt?8791g^QR!VXZ`#|n?oD4+{`bY-Zc=@zZ)QkXoG+v{kJ{IKK z0!za|n(ttpku9*sS%{)p&lu!Z>Cx4LoKAW*gaF@#aHAMbP1;@8$D+N?@e(l86>SWv zsrx;uSIYKT56jpPhPw+^S~o=(-r5SMmC`y<597V}4&%yyA^ zQ(Reo8FjL_EgYrU=TUu&dH5>KB{C2!5siYlg#H(#*NH8h@YDT6(FuSnO$$)rLf%cs zvC@Pa_6@n;nk{CV2AA0!X~9(98DAZ66b-Mg{p-ZZDw}3q${}?^f)Av8PtlJQQWMgy zKpm#o!wl4=r=>Bt@t3~|pdx?(OUomBao#3*k$~fH<|xj;#d({caiwLRy^1lRO47)h z>?^yr=HoB8Hhjn7Ze1S;@h750$RV-e>4cennEB z;z;VzuM)AIA*QeQt)l)LoNce*Og7IQw2fVDJ4^*9NjJy2P@_?^kMw1{`|;-*o~NtQ zjtbr9d;Q7Hm6e)`a4NbF47ZYcV@8vDKA@Nmyk=MLy_xPB4Z`j}TFF+MYsXo=ABgj# z?3)VG*OoHCuPC$rBCXYnDwBl>#|7(udNVwXJ4X)M5PQT~YTytZ%Nf?~-)pjX95#DS z5)ZMp4;Rsho>ug8Tl$itQg}RA7*-l=^r=7Fj1hE$&$*6%V{sk-Ag<3xOe7c}elo*M z`mv`?5iuW3erD2?<@v7DZ;h1KlE-n_Dqa}D(I(ZMIMvjrXBxBGu*LVOgsNj_$@U80 zBl~NaA<&Hq@2|foy^DDuo#D;%>e}_Y?dq_(+)tX>m~0i62c093F;U54 z@%HATp_}=djm&hNt|7OKel=UO8mnEhLWlOz$yBZ{i`Rcz2%qc@MFVq-H%67t7CTVo zI~=>$m`~{C{ejbNqnE@&zc-r>cMA%oYTwk=q?Du`TRJSUC&lzlNj3O=RGpZ2<~2O) zqp(TUFF<^|)xOjU87|>Qj0nWqEuW$MW=`r=6)rdD?vdIzpx^W5zbv-y#4%r&pgmi~ zL^}!ZH9POBHk#l6rrC#{*u`%rgW=zqc9umH(0#c-A)zi>@7)`!vsKMhH>AmW8%ksalurt+tn_C;RAX;5qgh_}G^800ft5K0j}^_MgU(>^at}99koD z2|_r&$Lo}Mb3G1_SheC-~Ps z06zQ~(R4B67=wX$qwH3O{$X#2J`01=r~3z^nI#a5xI9VbKFmIDKMP>co9J-M@$$T_ z2ls85-cGs9$X71jPN&GoB)&lmSW&-Xn#S~YhjehcAIkcIr#O8+_>1>*XnWgt$_*Uf zgmDjvHg^bo{qA+KM-)9S;M?inOEy?;#2OYDTig`y65hZ8Sn}X3;h5b2)CnCguNE<9 zHvx5?8I`bfE0Uz=y8%wB0Cb~yKsR%#Y*w}QrE5C=cQn&sxtHm+->HC=y;$-|iRKx}g}7}w#J z{8J~b4syTJWm29N+c({6D<(~*JA5|s0@Lv|5LtGr^X|mX0TQX)G$>v@l>F%=#!Nal zA7zbbk~$-b|Cvw~$3+VeI2~AiScGD5W;$|vumjCi&&{G#9fV#vXVkh!qZanK#E-iG z|M*mcFtii;ok#D=Vx2PGo!TaT#U|=UM?cRM&@WgCVsW~@ojvCXJVtV5LYYWWV@{by z(ClW|E6WPBW4$U}bJUm9rw1SZ`*t7$i03fxOuSo39ppYV1{opbvS3Z!_6+2)Sv~V? ztIW$R;?}7}mYHxhzPTJ$b8cXzD4%~B7qm^(Pj=lBmGZqfJ;RJjtbdnoD#Ha@p6{Zk=8lql@EH#Ff3jKk zOqaFn-=YP54^1@qChSiAV|67m5%RTsC%EWVr$z6F!+{?{KU)0hKD*#D($dH3;^aPJ zb|Y@Vd7s^zG7Z_K%rSSP)TI|?{jG7O%}EmVjtSnz)!(X8Z>hi))6vYn4c7g&FqYig zifI?@_iY@z6$E-RCV?~b*@E9`h_AkTVYF<~`C)s`xKJk_@AW^_y4*3%K=h8RxuiE& zMka1q)jooeLtd=i;l9SAG-{gxJU@H`y2RW~FRt|1SQfgd?WStv*$fCAz;=M9j5N~q_DG5N+Uk{+wJ5BDg8(c zt4!(L%TI$t6$+um0P`Xtj|#x*!Z44`c&z*yE;!UuFW$sA3&Ct#CZJ=xm20pY7VTu) zo^5HtK&xA zD96o>^MX3cfn9tzSi?q}AMN9^vVxDg5Cyw@`l!I?8M3Xb{=u`KyGLuPE~G)H65gw` zECr)NkDQS>8sWCNf>UCVYt89QqbB?!l&&D%t2FudmK6HOPR!|q$56-!C&%7EUvLOm zX4wRE&-g(==rc`D%vWUW5!r-f`@L3UJ`P1&;vy(4z(b0g+oC+3LO!K^P+hYL=7EcN zkspz+EFGWX#;(!o@$8}F zf?~631E3c!)OUwtUf-sHcyM`eG1IMCsT6DSV2{hgJsZfk#}&|fSVI&uWHz4y4o-|e zIgbv|9p-XyADK6uXyEGG(cSkwtI%EBTAdip@AmHZaN>Y|Cj9-tCSv+8+u^~mL5BZYoVt`j4v$L)iy;i3sfA>xt{xd)c zo~h2AH)Zx zVF+0GYF#bjbq_7;w-PHm=QTaQhMlnnMf2}7B0H8;LR?1~-i?QjjMcyze1`5j;6 zL6zVN8UwjGX^49NHGz=`oUv`0h%>f_t?tPDRkR)Yq+&1~#X7hHx2dO~?XP~eZPeU7uW-%bt&GJ?(4@}3#j zd((MjIZxJNqc(W-78&CiQJ^-~@2^-wZ=XnSaT;OtXEI^-=ANAod(m&7k7s%^lmlb@ zDS{*m5Sz`1i4mhdakMgI<{DWqm7K2BDicnvuHwv#zqIOqN3t^ZYwK4AM6px&uW?)f zr~D5Dj8ElwQm*gNtH2`d`R-WOWtnH~WZmhDM&v`q4H&1r^lXqOGNVMd&~B{t4ttes z;wift+ac<{;g*symm1p)nmP~B37vg;eFbp*xb0Zx8Vzi!R=zzWH^Zf9vg=d zR{j&ACFEVgnS0JXUuOofo#p&YA)e+J=pG)F!-QdZSy^21Ewr2SJgZ?-l`(Fxd5)Mm zQCxQFFn8j7{P7zTV9MTlR1 zTW3u{c-pM+6{2EmdNYoo`a@-V&l_HUb79@>w+#JSf>oZN$pk~a9~4XsUr3_NfAEdC zd>{plCyGr@N=Xm$*jRS6$P%@HC>uei$T~$$8&vrqoDqyTTDTa>ciFZM=goDA zNM_^R`!tOR>65i##(H-jI^#0qZm26)Bny4OnrEi(tR{+uh=9emG9(W#>BWBful$`% zJ;*K4&7=67`+9FQSjIcLiz;%!B9(beBClC)HweO_TTijfrVFEymd~sZIP`E%{yE(r z;hsGpnu>Wxwow>Xk}*Fk(jnovseI3jyC;qiFzOTei)3rHy$lF3%C2prt{k+YhP+z% z`sF7rZw&YLl1@eCC3pXn z!KU4{e%d^uvC7%5p+uwM2K$f2((jqbABrI$pOr1PC`Ho0Y`5ZSh^=9SFIkRv-PcAN zpNEJPg8@D|T+^zT?r&ZtZ*2Shk;@D6HwOfS0=gZ4N{;zSuZ8e{lYB1l>X&PBJ+4jA ziO4ly?6Xt7o)Dg$f31a^{KKy$j{;sqN&h+@2u_RQD*7gHL|~k}b!{`jz!CHba7~5a z{ij_~4v*~xzs-bCDWaT!|54`s-tzPJv>`)jjbh_+KG?P@t`PkF`HRq=X!@NzG zGgXbDy|hh+-Ct7px6*XtyASn-18;ejZF@)5=YPJd|FzA(3F%)>!|z{%Mm}Cz z6DAyf^yWe!n;-LaZmxKX_ISkXxFNPgZ_`e3D6lWj?w^JKeRtkx;c^|<_$#-a7XGED z!QQ)XoU>LYT1vo)#;KsdfBTC8G-UWf%?@Il2Z0qM{@&{(9bcpwn(tb>O1l1|^4lM? zhhu3@jk&Ih6xV0$if%Uq|5|(2n0K7^czvijeA-39MgH#+?vOT}3>eqh!b4X2;jYX& z*VFzaoU~}3oMT26N6U!su%l$G zyzTSm*Ls?bRdFER9~iN)$JJ}xV3B13P?9*2#Q%KkLgS}9A-Blz+Ri!TXL9DW7VaUL zidOrvC*13NwVj0x<=#S9wK?NzeEG|B&?Pr{L9~cQe; zhGV?z&0?M~;oE>0mA5NIkEb;fE`GPz-1S-vd^>Gi&9&8-#YLI_uAb|B^J`T@KenTu z5?8QYomY&@DmuBU>8_@IDetC<9|&lCdp6o}*>-?1FZLhonzm-741fKm%QrYDg5`L> zcDCHcx8Hvg7uq0a<{023G!dtR;gb)A-_jF4GHXNqI zYNRGEF3uht{vY<9i+E_^cql5s)XKqC-fRR zh|+s+p-2gk&^v^-CqD1N##+l?x7T%soHs!@5pU9_ zZcW{z8ewPm@~EcHKnvlO8r@c`H^4-MkMR5#*|%WqBXhOlL_+uliT55rv;8IvZw zsr_fAQ?FP02Q^&OzDI)17 zRx9Vqm%VP6N)A5bVUbJd*6Dr3Z$=<(0nHrIxU)R`Xc1H0{Z7kyv3b2SC6@HF!402{t!Af3PRa!am6l^cHXbK()IHG-)d6ab zyKF1W4o9jjG1eVWs!XPBT`BS&XOB@e&be^9{L?Y(L$vTgeM#R@e8TatmS`%6!)vyT zkC8rUWiql!S&qX){jpXL?qcq3y#XVtk2Fg(;t;>(@oYtuedLDs?r1o?CBJzyut{Ln z;wzy&J&DBc?`-}6Yh<+eO&1a9vmv9nv9RBZm;;u4RCFM!3`R!@T+HA-hHRtpzF47` z{%_IC!40UBCok%(>Ux}ti`<$`dZROa5R*EU3UG(fmGY|ft%1LD+bd-y{+yFQS_a^5d$^b$gyZmQ^3

y(*9ez6FCwz7B~-x%B_k_Refazl?QIV9j%jR%P*dhARn9m^2>>z zcYSaRhm~2X%ysS4y-iUKiP3{JZsHaBI}-c*`o0=Z-WnadQ{2a&yf1~ zq)yiBv9ncYpO~MNG%l9CFS?yMvegJEz`(!!vTTwOZet*4u;Q=WgM&nkfB5BtPOfQ( zv@L%yp3Thr#|$HQGJFai$CT-Gyd8nDAoln)k&J?#0#b$g(L+C(-ucfF3Zq4~v`%Z2 ze$IK9Q-v6(v;JSS55Ow53PD84D@}3ug@TS!hb5R`#tI<-KsOkjZLp!XkgJ^|9CHE1 zAD(FOf$CC3oVSOew?j4+D1)X4pOW^fN1KqglBl#@Q`)D306gx0G%>gZ#U2XxbUIz}Z_H5|B`OJD-q$U)t>PH&~$MWf8Z94QRaar$)<9M?;9#zN&Be&wun7j zW*fjaqR!U(-09)EPP2BXQ7+$}`P6AA(q92By*cX9i?mfAQ8-K<%xacQrk3BIeUB!R zOE^RhnE~v(@p(A3MS9w2wE3+Q&%}0{9A&syF3_9L6RNEg0JzFx0G=HGATn)W5EHvB zFN}0QJ?Z`0|1rBJeF?r<@qLOD-aw1VK}mg*)dD6LZ+c8UqE1|GNDfsW03rqOAq}A| zqj0Rton@>qZ+tftKn2v06>_`x zOhkInhU%hjmZVcezm4o)FHt+$=WzQ13iUP<5xyGS8~yg=it~K=6Q&f?Iqx7AlwuR) z`(@=ELV2-4aJ7g1xFdDif6E=MquVOl?_nZ*aZ+K6Sp{*MTH0d9xmQsmgS!puGJ#i) zm}yPU_a>W`pOrv$A;f6kF8O2mTya)|E;&Jd>MZ?Pp~y##%3!^ty~u$z*@+neX^0Bm zI>o$@=MJ;5>lsDD?gkokXw8}S+h ziYvGu6#$AigC2Azu87;qy-wLLW?{I@D;eSSP1(UxIc>YNZDA^q!9MMrK^4fYX0+j%fb<)Gf0B->WL?8|ENQQwYcmBEoywXV2Fz(583=?>;|7>jC`aHLuvn>G; zc6|%r2(d1^?An$~-!*%5W@xR%2EWp1%zd%S?n%nMbV+cI>D>KyAyXJEJ9cleU_XVE2o7 ze9nQZ``*9R;hkv%FJ~$p{{%e(tH*eKK4K5ImxKuhwWkc+qPK8R5;o8r{kR~7)`}dr;M`RN_h%lft2DS$eS@S5zX%x`IRlsvPw-A0 zO=s|c({uylout8u6OD-U2F-V?$JT;b6^0<4U97PQQP*7`5b)9xt8J{heZ|#5fbHIa zr7Ec0jBpXMQveEUj!}i8R#&;AXHS^YpOp5+{eh!V8)h@{kZXM!8fVhZ%_~3%Iotwl zLTYQS-fOp!Lxv9eWdTQ;h6F?PDs+~@YMy}-CBRh7xxREYf9EJ!ZRJ5>>e&n6ygJol z7eNZR0HWT6p7~|)|6u=bf%ov{U9PZ z5^l(`>8wZP$}I*hG6@0OZvnZM&{+92)!2R6SZ|Xgea|UGVi$$1n+X}W-6`Vgc;o$N z%K7aqtbmfUE#nGi)aGP3Xy(00=%MsfsG|_C;16fQ;8AJIIjcRtWx>aAJYnuEZCzpu z^$F>2CYcLDO9w3u1RW^l*^yC?a%Iv18TP_GGViQ9Uqi|^I)s1>ZakgzA&7iCeoMA& z#W@QS)L$aYbbPlA4D5NxB5m@T@_gz8R^Q(-qe5>iMc3*>?$g9D4T~U6_uWzgc1-b9 z^Wq#i+M|gR35ctN#I0{8s{>S3E0b1`Zf;x@zeVb^rPmSrZ?Y7#K?$P`L4uFVz_4u- z(C~HQ1jLujl&vfGsh{vLqgvFNuS$(AZ>^QhbLFulU}#|Wb1G2`X(*=#jCWEeLH~aJqOGWbliUA9elBFz>JL_ zo;9d$)XXGgJ5?i)>u^*zXGr!R0{YeQI{GaFm8M0Ns65rco_^*rc2xG^ZjD&Zrspw? z-GEobRH9JuAFi7Cpe%|IS7k3q`uuYiNl8Mv^OOPveM>{veZ>4Ki92@wW&gZLl+%7y zNB0^qN=Z|v(nJ-#Xg3*uE=~Qr>)85%&rKrx>~~@LIS%yY47Q_Qext=VYSpKDnlEj6 zh0l804px*&Tix>`HE{tAADsXrLnztrU+1K--y{kRJiC7Z#`945GgQ%)?5(Nvx+Q+o zcmVl0;hC$OIQVuJoxJgaYB@N102viUB#n>#W7ZtXeUM`b#HI7@%Al5 zz54UJ=h>$bc9~k)DzRz;=j?3IXQOdgND9YVPB-1{um#Bt5wg(P~Ey=pqU z*rEFtw#A|VemG{~0rDLRw8S;v&jJwcfTu^tyO1xN4s^r0auTeeE3!a? z2gK!p~g=zJ?vSHIi>u0MeajWaOUxXrA4K{UI z*9H?VJ^GPz-Lf-Z$sUfnh=qG6Z`%!ySC5TAyZY{XwxcpyZxwhd8|9aXs1_6Qao=KA*Mf~BfZW4FHaH7?OdOr`3!>nV(lMjs_92dKWus*WQtxpw-eV06pXr8Z9#GNjhGorctOfGFm z?~Pojvf98K^y#W_aeL*Se4G1-R`ar)8nXQrO{&}a} zNyq+rh63c#%$!@C3DvOBYTzKpv8nzAd|qw1^(f(!nx#oJ6*dTSQx045o4(%F{_aaw z{iaBFyTRaaKG;}$hJ~0#>>wO!!b-!Y49XDk*_FHquVS0Or&_SO%NbC7rcmlMy=&Bg zBAQP~gy9Vxa!?{LK-pI2>18^1f2O7{J)E@H^_B=w-5nPc9FNe6YW&cAmAqTF!|9q* zXGYt2-l6jTDSw^hEm4v4SvN8Swb2^R6(-jbJnO~7c8pg24b*;zd&fW7uo6uA8tx_k z2ktzE7NgMVfL(Rc4Ek0tZVc69gY##{U3#V=qs`923qvCwBZ~TUVWuL=(iQqTOVMsf zMH5EBvb_g)Z->CFL`hlmBB+1#s;EA~(b^N6cy}w&>-Y46<33~rR_fqN3dnlSatu%o z5c!{p#n%aSz}bWxpm7hNcC`SE`)+@w@3FS z$DM%H{i-Uq!Xx#iSGXtWEZ^;eT41am&J?(-(nBu*RdOID#Ehfb=Gk*IEsG6phz)K} z0bYp9xJdhCm&>|#g3_h)hS&*zpN($IRR$?an*lk#>&B5G!cuhPa1{di%8mnZeBB7GHBsiV!5R#8r&_u!B6sYw>VY>cT)pw)|q zvWxhiP>r*fE#+I6-hc2{sH(8yUI*&XsZ&>tfnLA8@L}ZJ%F3gT8^LqaPw>mFa=lz1 z;a1bH&<0$rT}Cw^2A$|7EmF~NptfaRknbT`H5Zh84MgW-OM=b@d@|WAkq@lR9HBco z>wAF6-HEcRCTvuQ7^Mt@^0iicZy#J?2K)k!GnRbb5Lqwxsl0nFY3Yzh)YWPQUw=@z z)bKbGO$g#EZ-`iZ7ZM5>RX|m!!k=rGJQTqT@`ANKaaWblJO4!AEb44{ngjW;9Ht(21k~ z2VGViD;1)whN;EnZULO9_+OH6r$~?4=?PVaEZUWk>Fo1IuuTakP|fO{vAodMBpJr9 z`g#Md&qxaay8o4{d0#|wU7a_@OWN~~#SP9T6wPAvzr4IKU7Hid+y<(TaHI8Y;3jHag3jF(BI~XIK;9#XvHJB9@l|$|7P%Zju=7H7Ge-~pHn&i8Iw z;U^Oz6z4v7>eNaMQwl!~Jv1cQkM#x<04Yop{dgdq%JqH@xT5j}bI#!!iGZg0E7TGC z5D3j~9|Lxv!1U^~zZ;BmbHMfaN%~Yeqy@gIF$QjzYNyo>8B&fP&tj9nptMLe?7zH?h&^4gyB1FcV=$Ei2qzzL74q$+M zMWo04+&c6D0=nceWG9}F3%8f-kzSJm6CTojpaKstXml*d17;Y4AU=Ctlmp#F_yJ37 zZ1*pxENRf=kxizBc^-B=n^S9ofIl^Ct3s`9*4}rXCVjh$>J-Ac><|w^PehfS zT!mP(OJ4P^_)2xLBDBcv);F4?YgGlFcLnXv?BFU_eCOe8F_>)qaoc)hb(8>i46n36 zMVqRz7#=!%e@gAu?0D0Ixh#8!Q+gy!kEAfav&A`fMrhj1?HDd`;D`TkoCUhI#%B$c z2-z%AXY)3}_~TdQ(_K9fv+e3*v$bJHP_q8wu?Eb+{pr7b2?Q{4;G74vDp|B4ZWZ&j zN#Il&H`#KI2N^8T#rO?A^cpc;(g2YL)> zC`&H;xG5z)XF6Rn9l-r|{&2cqm<K8T7e%i8`_7dK zkL(LSOsib^z$>#joR@$Hwy*r?nFrjeS7HoUcVGG@6y!GGUF{ zInpt$+s=ts(Iv*wia&-@!FHX(OB`-25fE|a@~1w}3?IP^PN@@RTg2YoE09zEa=!v_ z6i@Bwl6vuisg&=C8(%6u0ND?aU|#K81DMgSEyI+t8n@LeuHL?&&F*izR&xKqDf>? zW$d382?v0Ak zU{AsLXqj@<3QRWDHL)hw;c9!iK&?sj-e=`_voVY6zRtaUpP<k-?bQ8>} z#fg_1aFSfZ_c(fF1ptHI17|H-t#_F3U-taIiz>}>nN+k*7y2$UI0E{0fDRz5A-t~O z!cU!6f5KISc;f1JoJ-H?yA)f5>#Ed6OVJ%09u`2yRN29PN0U3y?T)3W);oj?d0oQ+ z&&OQ&NSKQSkcdV~x43zzJrhTL((en{IxJr~EnS#=j z6dAa~Tx^85b3~-}BL4JK9&-c(INm>}Eu})@nck+ZY%0Jpsw)vpb+`+-NEw8nzW_e8 zg*pQMViL6Xp&XWZY9Y?>Jn-FXNL36CK&tLAcH`!g0@Mn76}vd=WYBRc=Sz`0v7C2( zN_+Jg3RPR^hkkJAjaSSMDtOfk@|j$nsAsDcFOqg=Sd0Mj3G@(>cv@)GYM;8q#Ty@61yY(hjSvwTUgS8~*zOo!t zvRyTaRk&T6rbx6U{1o4j8^N~^I7#Tcf4U&HWN77Oa~_oWMtKER1qp2x8rE&}80G!E z6LdaG8EO8ra*nEN_AGR`o||vb))s;={sJfnUy)R_rG;X4W~I$VvFX^z*gYY>OdWrb zv<0W@WnA>iF-3Y!IxkXe8jiEnkGC43%j*pOevyN0Z(B%uJzbgl#RM6I_Xyo>{ z=2MzJHl^q%2<+HC#nYZYEq?QT^DADC@vvZ7}t_23@x` z>{c470pI4?vw>igXzD9}FRt~MhRN0et+Lah$-_NoiTMX3azeQa9!AeZhhOweu0-wy z2FwRqIl!O9A+0-Uts*}W4e1uHg{Imp$iZ0NLK zuNc<#>74P)`-j47kQMASms&<)t@Tc@qHzz+2lTpI z7*T3s>v?*g_SKR>fVh`P_{FjPbPmwlR7qEp_t?>PI0SFIX05zZZKzIummGl~H_YD% z79;;upz3uh^65<}FO1M{=%AO*hkSxR_5*U?@#Y6lVH+(zGlE@KeUPsAK$qhtzhV}o zo=Dl{Fg=RI_NxBpWQ0QO7(xrZf6a~NE3tKqQ79CBJ?cuVhdqKzUo7*(iU$DI`lcr*s}pN)RXS7GyWQleIHXG;d?Td^N( zUs0n)4uW+XvPO-f9MzUvYM|l+^9>Zlk$AhN+c3K6{Wo_7Ypy5e)@AK|QIDWa=k1efO-5e9-$SPJB4uTcGe%oY_72rB5YEVx^AP2xu1%w&o{>S$XwZrC0Su zBeV*}Rvq0hCuH*)jwZ@I`wsMVzi%2AOnmJawNeRtb@f*H)W_gZ8_1_A*ZH)p3S0qY zPyXLRnxr33&0>)>>0PRp+}J?2S`+Wt+k}50=ptvp+A{nnS@SZyeV9S*8=c#9NOfM z8wa4`&qV31;_E;kdrMcsE?AHVk1J|{9d^h#dR23 z90Jjp2ac}+fPyAVL~p&G2|%6i4J9)V@EK(m`wH>f>jKZax#PxDa!46o8)w5gaVbBt zkjehnA#eOi+;+|ZuLvK|+q{nMHFkrlyGsMl883qw=SOVq_c_HRS9yd;iJgj$iM8Oa%Q5uIpCG zf9lqN&{cSL6MoCnA?&Z!%xbkKB9F5d`nCD6T4JXu7D}z=i81>F^Ccs7WCqoLW21a* zI;E|5XcinApK6cRsy$(wu@g{e$aqq6-;9wE^RyVQz1%!!8C~UQF?Rm@*US0Uv5&4M z<$iM5eonN?Tlhv;RN212+re?N-)%p9u@s~$n0SLqTjdmO@PQZJ^R8SKlL2~}o5i{m zIPc;dS)YAzp?KKE=65fCaQ4h?6@<6zh6jUOJwu)NNbzB^IZ$}RH>;^+2KE8?J=kZR$a;unK{r|G z*rx0omtJS^p6~ZDmYyiWBlQ9!l-S}g%k^ni6aN+ab}LKYh)AG&VsO8$>t8umO$Ckt z{2<=C0_Q)swn=M?kg~VDiyj}`7NNP2@1)F{?RwZg)Of~`0<)0I^cH`Wuw;dX+4G;? zw!^t|yJ2OloM(8KH~K9u+13Lt-olNWp1eWFZ6cs8rj*(UzV*pr>29yZ@445)*Iy^; z`*UE`1+MagRw`4MA8ro3S#lve`6B>ej4`_gjFm))cLnXh9LWt+#Z%K1hN4sEW_6s9 zb}?W~C>pR8_j1}0JTATmuxSDhGA07B&rRDsZe!mP zqG!v@zhNml17Du?yT^&@mcooK;kmw3%=Zoxum;J|Pgt)?<@HIsE|lLHUu%#7bR2&g zkeo>QiKR?%YJ!t>$E^xNkRlJFGYYs(4Gm?js0M&DoA~>I8G(3?Na1!J`=1HB5_QSp zEO7SP@mnL@e@;nWm9*vn{DO=<^GoZ{PMuzB!PrZ{T_E_c1|;uo=?3lCX_jXL!!GsCQ-HDN_ZCJ3&@qw6J+!So`m0c_g$n@9LnAecyOknR zq40exz>HNCSXaP=>3S$|=Z_!-csPj?F!yU+?&Avu=6>3L&;5c8#|GzDGdAlT{KWwE z_>;wE;nMF^XGfkO_Zr5puTq=$c4RPZ_iX*(5F}XLaq%U&rRY!K2{(HBM^LMn9DU*y zvs4Hm&Z-TEQWlXDsvPOPic{d%D8y2CR0_@RxY_nN|vKJq}PNFN4_0&>N*7a!ZHF6&BO6udt3w>6uj3< zwgav|3(17g#=;6t(Mq2tZ=}_|r_1KSH%ppwBa1E?@_la3dzQae--n)mSvwSs`3Aqu zhy9}WC%hrv$$DS9r-Ho?U%*>cJWfbjIyS2=vN!}&d`+}QVh0O4<5@QtE;ujV5*HjT ze%QSkCkwROB!GusJ*3u#$jZ1ZLNz~}q6K4*Sh}^9Aehxuj*MAt-)NqxORB3&AHX4H z^k^28-t;(}Y0DKf;!51%$r(>Z7%=j3ib{6OIo@5>pm={`o}-|`36qZ9A{2}rp*3;4 z$X2}|Y|dq*85Quf-f403`BuFJqn$u9YsmE((Z)YCf=gopurt!tlVQTUJ|sSK(8wYL zZA11X_fZ2^Rh`>DzUU72d*L7#&dzDMQ=d&ol|K2!EP6=BD9~&XzD)tU{gUA$oed zaUbmt=@NWa6JKe6suMa%M8E*4CdO*rdo!FnV+@=PKg>N(m(lKZ=J1==(aO-|kOBWC zU>RjHCFb7*v)G!_!6BC8Wt5M5L2E)o)^y5LsK4gx>F|(~xmQbA8MyJ7(k7rReV-VK z3B$M+ZBzLGPkd2v$ui1hVcMGk%rx?Z7`^%2Ds1T;xMrO>E)sKEB(-PurXJn*F5;2` z&W11oJ_nOGRF~x^KQ=2+EhPi#Zk#2#z*KiQ_^RT2%w@e+Fq$%RRmuClW$z+TnJ(mH zFA=@Go%n2d`VkL%Ycp~hs7<3x#GRNX`22yQ(=`2wxo#LxX-_cUR`MDYD16B8IP(th z9BiP~$gDwK0;j|@qreq;`zXwx@OxjF4UFU-wdJQb54=UVtP*}v8?AUXH^F+-@0Mj- znF#jScw&(@OboWvN_^z;TuqHQ><%gNcVs^$LTTMu{Q1Jo!}LU4EN*#`MqBg5SaoXM zx6SnqP|d6l`_fB{q;Q9iT!J2O#K6&*Et@l(Tu;5DJ$$X!ObjYpC`mOv-325U~Ly@lznX3n=V}_IJp!j zzD2roOC0AL^Ub*%SSaFM9zFPNtz^q*A>zK_g$>H3e!hYcFlZ8*`-skL4zOUhn5cu?&i3yt z;+vR&cQ=-o*sLM^jx4tJ?)%ca9c|A`*IA|I;Pp;wlxZKSb!?z)2zRtTAZ|@G+XWHi3jTkZnJ>Wu0hMH(24}-`L~To-J`4l1DED74?zo&(;Dvt zgdaV`->ENRYh>nlj(bi-PQ;-jQAu;&`Pcs5GWISFq*_iA;VDJFSU(V6v-)}_a?Szx zG3!&v75T4f>+}I??WREsrS-uj#D&&N>iPBQ7+7|0k)6U}9r=qIt9(n0_1M*y>G!NE z{Ol#$7bEW}Ll5=_9ke>P18GgHimirSvQ}0i9lhs0(gn6vn+77XoL)UZ@a9_6uJUv_ zq}Of-O6w*#C`7KLBar)ztjlvYA#naKr{6|1gK+lvP+Rih_hj0JZc?_OEb z?f0(TKeHOvPbcKhq=MKUeapyvZZS+frQ^M)Z+}Vig8ERJ^x5y!%)`^jN{Cg_5e*8~ zJy8`I>iIfV^ORZp==6?TbH7M?IEDg4NWS<8Gjnud8y{AF>h4!pdrE;?PpchxP;D)} zxEfA;aaCP5!5yb%8CYWd_qZ{k;L7c@? zEvKh)#&~$`*k>%2eOSjIq^c|yt)=@^h`;k(Ky5(iP~;I#5=Sfa8`>1@dg3Zxp7cUfp+nnQ--0Oc< zP3*Th@v8^hjV1)u0Zp8-?32&t8;s$-9+nqFKP80!UfwT8YQLo#n5ZgTRVaXWqy@aA zG)2Xh$2PyZg_RuA;Ccc?TP4wE)X!sWbwS$jII2tOHd--{_Y{E6>e;7CJYb!s`Uw1i|J z!Pr0h40Qs&m*QF|sN{`vpIPYUboEa`CV)0{?x+gCnXRiN(KbFx5iy&1+|zzfRmo%R zQ<*9%sVZhpT*?2qsQ&w}m`!i`Yy_Qdd~p6Z zR^!H(N2D~sQ=Q!eUj183&qZ+GSiRRQw4CmLvQvM5vdX+0;?C^dJuUy%(g$e*OK&LR z5k>iL?ekk;pRGRV5&m0C|GpPkdeZ%=%#HuiPW}Di*;#>o4&Jl>^lvS_;Ui$_;|8cq z{;hpB{{ZZ>R#dF-zqRxaD}beM^E_e}{I~Y`{|~YMyrcgg4zV;{E7HsJg7(g@Be%24 z=iwhqW-cybwF{V+ga3|Rn$Ke5css>9?{j^B{*!(Gk6}WwgjMv&__+Tni8-6DKKJC> z%r#7xO+R3ypE)`(H+Gb*ZQ++cazX!jOygM9;rO==W1M8aIOAR&*CK^+6PmrfQ3{jy4J`DwC$i%u4hqv$VINgp+zpD4M>QQ@k-mT*|$UI%HBG( z9uw+sQ#a*0MV$3>EWW(1pPSiVREE2pci@nD%Uq3aA^5l0^Jwaiz=%$PZ7PPxy8Pm| zp(t7~)QUvaVIM8Lw;@5g7l23aguON$4>pCkHuTnc`5cfBw}1YUD2MNCtk&HzZ$dAFC1YpBZ`m}6&7H<#4;Mv5_cAvFjp}!^})fP zn$ETUHlsF0vZPF1A3AW5%xhYiYh))euUjTCnlS|^_X!}!X==A+3U3)ov}WAsWH z4!d%nOyAe$6a>SpMmzdqzOnpQcPijU4LoZ5;>fy9F$|F{L)9}NZ`4lijv)vb=t z)VYODRE&@|%kgkTRAp-w$793_3r{a!jN)7za`fSxo-EH-x%ivUFHKvf>fFl(e#fd# ztr@}#)=O-VOYXlB$sK9r`Y~PCb_*t@#dpt;oR?uXIC)DSUixWk6~KPGCa#W!$asrN zw5!aUu1an8ipd$fHtJ*EiaSpuW>>u(pmL(N?<MJ5zA0OxD zL@IEGY*k;V@|KHAA|4~sr9{8yua^xz^1difcIMk2$(k^qBXE9bs?j+v-J{kCj`rxH zFAA+!9Cm|vSKM^2Q=IZ)m*48HXtK+Qu_Pfl)vdJ)r>llDY~QMmlHO;D;cG2Q?5`f> zO&Ddb7xKI=$N#SdPLL`MbEnPDZWq=@zB5&ryZ}}utNLcl)$!@$XBDU3`jmauE8m?z z@w#gkA4!O|*IDr@5&yM_q?l8-zfSyCioD0awIL*%}uc z?r!|>Vg4eXMF4vv<6dv?n0_h;=ebc8MWJw#R?=qGBC<%ygSoMPcH2CTnp>EpG*3c) zYZ`G9R?n`wYQ6v3yhJ;{xCk=y8h*s0O-JdVUK1_g5YrW1EL?{TVrdaU_{Dm!Rq< z=>9Ep)K2hh--|N7J`WyBjp})vh7Dftd)XT8?*$h?V8i{52c8 zaVlzl2~97VpU9!oiiZ(*4<|EiC8wIJ)ttdaF;yml*%$*x>t6O)LJ~w5%Tf-n>TtcJ zsnpD7j>xIPz*V*mt9p(|+ z6YW_mA=OTLQ7zQ(`&tU9B)J|QxtVy>#mG&u3#U9BkYB6HY(tM!_}=CIwduSyuD~i+ zio{T~WK6Yfk2N;lrJ7)KYTV>$q(Q&WO6(hzmt1rwU`f>W^IMVV$!TP-llD!<9H0%{ z6;Moqy{9Z5nY%XzWAa0f3|)>$Gcz)Zyo~(M&zG*4kc=-1Bk@WTUuC`#IoXVS5Ceg;pN75${#mo>&>o;mZ>!zFXXPXR}*-hj6O$M`5<{_ z&l+P*&buy5rrmtyL#GFpg>~zDszux`(TB>}i=ov8!C&EN@#Ux+YT2&;RaM^8JJr}s! zJ*DA3wk4HpxJ#!HkXR)3;=rs^g!WeR&V>mBUaKK-dOf^9gDLB2e0Cs?7F zy_|Y1XRD8^jNo5&-)Gk@Ov4lT8UtR&+oh>zYZ6kO5!0bmIA|{Rd^xV2^e8ubJ#)1* z#JNf-_VTCz82&h8K460%_s-Ikq#WlRpvQ)6o|)-#X-&tAy-cL4hXkH)7H7jkBl}(q zuR;t8br~SNFZ%g9Z8X%<7V=Gxo|Bmj|Mx{Fnpa(i*VkR9FJls(n>ryZ^ijvE?$jgEa#lxr+$U+<4KvqZD_TfyTUf#=qIb)sYq-wJ2UoO=GH*d!?!kps>2`335 z^eXYaDA@^<+*$2oGtO?rDwXbZ8!V%v)#y}V@k*xV zjT?f(P(5rC)0kGQjn~D=1apG#n)cUj0SSXA6euLR)Gr;2r0}I2BFgH7S-`9PwbY#G{{V#l1K1!tl4v=j38$$|+qD z1NW2OXwQ_xQ*)xq@3gnKh~6F~e8S#1h?^QH9OU~DicEYx7Y$j@G}(iwk3(Mf!)(tU zxpSJIdMf?d^sm6*I#$d}v=tFqjR5~wC+;tv_{JG;9<1d5$~|)NdpMR74wSQMjg8dj zsDix>qk>GNNAIT;w!Zs8nK`Cd2aYC2zrqOxL|hE?fNqoN$xmO#8zr)=Kk^q3dp||R z&b$ynH|DqB#XcH^6c}fsvWM%(^WC+uktKE zy~ovAY&Ja5v?>`Ys~xSvU!mLS^u80UDg{ybb$psB;kkmZGSHc`ssQp1I6#l(A{Z2_1*>!ir>M>N>wsQ>H$h?%Cp7 z$&$N2zjvyQ{9o+7c{rQ<*8ks8M>=S0m!fuymZCM*6jg0eQ(H<6p=zFMCK0MyI+<$> zQbkmWnCC=WRkI)ng0wY6#GFJL5x%$Xv!7?5=bXKt=bY>J&+od$CDnmfszmCt9b z_jZ7U4~tJpEQOs z@S5$9xRZ<;Pcu*6sj^e$tSHM_&6TB8$@|SX2e*QHyZ1SMNwvLB zJrZZP8}N0qw9-1|2os-_sd*9m>}MS#g$Ax*N|Ax51WE=_PGXlkm=HI^;XAumf3#G) zLuSGw>hm6-@=$~t^vrhgX^23pgyV?T*na64NNlGQ%rxEB)h5KRG9!K9Wfqsw45|E3 za>r)KLhx|lY4z5n8OpLy)`>LVS!DKv-1eZl5F>C8YOsx4zCZgxWmLnN%&QAeB|9fdkiq= zuzA;OPr2C*nn?rlC3BW3 z8^)&CDg!KJ(Q@(!p6i-;j+rj`UL3g32?ioY9F7Rrs=Gd)wP52ho`|*0GQV5T(=KBT zF*DiRX?Um&>+!%Z?$Jq7;8fh2b7{zU65Q7iHlieDHiLP?UdxRO*%N*9-8VgOSGJ~D>HR*rJ*`qW+FctK1|{gWeD~t z`%6*yaP#_J9$ulOd$k*bX?@M~l~hR&VhwuydoF?a=tJ2+M~|ImUl~Ot%3=4=1~p5z z_iI(CIVKAwLrjy<#8PQ^D!IMLEjA?(=32Jz(JKT3$Wuf{EGHHXmQcuWq?w_TOI$I0 z)H*}{a-I+Pu50CB0o<5Ww%dTDwBViC9W{EOMRk7Ol=oO5u8U@mra0W%OztLLFzz;F z-XjF+5KNY49%+JezTT1|G;UhN180TBibY579v+5Fvol=HLyB+PMD%QHi=*ZEXQS>s zEt&KcJO;7M+W}(i3*NAAY#d`YU0+%Iz<}y>l?r2BOQ8`Y3v?!n3(}?)uv%3xkJ{ zo93(E9hLiaqKxbe=L7_`paQ|u;eN5cy`N6W;Xo$RDe$Z=cYfu(*csAYC4;1>Fjk!! zokn<@RXC@pBB%lqQmgT~jc+QgV^`DHXI#>RrmM^r5>fjiy%|#Y*7GO5#saVJh=_xQ zv@1vy>X$ECq&o*9+??K=lu%U7+|2eKe)$$x6$N+02A4I=pP>jXyiHPe^a1PXqN{t# zrq12?eqFZ%x#v{YaX&U4BYB5qf9Oa)rrxDJ5GH?rHk(L_6E{FVk&`)jJ_Y7jM!(d= z8j+`c|8_eLjNZO{`X#qyUwC-ke4Oo=9UBGlsPFGru+}Pjtk~XFm#l+jUM(naGU8Qp z$7irjTZ=IYt2rmHu3eG0@rWc9D=@rw_W4c8g;^i>z$-{Tt0?kpoee=4OWo>olmC4B zeF^#1BC9kiA60gA1JF3ag=%|<5>h!vu&Y@b!;4x{jonArzif%!6f1xGx{K;N*3(Wmhm0=sa&Nb_?Ij^7R%;7)7tO$O(AzB}hh=_R(p~mJfzv@s>3PoslfCopyT=kz`nKaZ`mq6? z5@8Cw?rNXu3p2?5z8E*xXy0@?kf}VR&d4o7L+L{mcR`;R)dmp3nXKj>Q&E)B46TJb zXv7h+Iv_IT!LO0in3>ch_BDW=9CWBi7r|h;9=ggsdZpanm&R<%zeS9TbCaiUlz!12ONYmH#s0O5cy5QaW`KHK#Y@Ju z%2D5YpL_9gnu7aF9@uieo;i>>R@mB?lED4(U3gIJXzjHuY_% zlt+4$+;A(Y_%B%*${GUtMo$*_mbj<*%8ayH+AABXrf*f$Xg9{Z-5xctN3P>-QunLH zO8j8yK+`B>%&NdDO-Q0!+1<5j`o70|X0r#i7U`>j^LY5#7H z`5x}R(b`~}WA^zSZnMRuI|r7tGS+w&B;ZPBdSsrKHezu!)4ir=7iVO0RhBY1P^@vs z7+@+qEO~Xwxzg&g+q-#xjrLWG9-9|Qw z^6IRAp~3HH-K=-e`fOJg7#Y)s|0KaVi>+r z81Tpnpv0U7(q~Lqk@ubI>1jKf-HZS3a{S|8$6j0!^O4ki&J|5P?~zf%t!j1uG1wS> zQ2yfs&k=lUTz*qInP<6zdM`#q=Ir;&Bu@r z3u1?F5DYr^TO91}oO{=l$jzU0X$V_Nx&IN^B|2(6%^h32c``@Z;OJ9{`+xLK96UJ= z20e&$7hK$>GwVZg%P;#?=$2A9ea5WRul|A@A!EbCyk1)j0hU-?pS)&nZkCm8j)7oS z>{VuNMjg+*s}Pz z$LTs;w#lNUEl8Y)BY1+w;RD8#=aKW2zEYHxZRFPg?g01q$z7I(#x1P`W&Z;iKNlU^ zavD=z1L8g+A^dHPC#`8-alW6Yk$cb_^3R;f&K3%jf}0%wmR z`GmlBPR7OvCm41FNL%~*r2D#PX~+A-zAWtk)p(#5#(V00bYS!#>|%R1xxX5QFB^VM zw=N+@0T7XKtA|dj0>95@&2MYav~nl?@kS+d{6b)7#ZIg!1 zs0W_=cvf_Aq+H9rd{HeRyhuqsazIFjslQqt%Rv+s9`N&y`p}o@vs68lSiZ`dUC_-I69tgi-KpOd#j1Bbb?jm_YUiWDjATCdd4A)>K$N``O z_31Lecxo_QEbI2K)wud=Rp}yA@+f)Fzz|p#I5RzRj#u@{<3nBzHRgtDVLBC43M7r| ztw44z!BS^3ErBbuwj?xPx`pG zER}rElL^*2B3zyz5N)e;H?W*KV--fsC1?U23r|$Sp!g&!k+Xqhg-&6W>WA~E6&|fp z*k+%T*=khUfY6hzp^d^TDnA#xSKoGr=&Ti=x`T)doa*tNC1Pt$bQgCAZv9+Y$9x2i z`fR1W#a>&hTu$(;>S%x6c#aH*ZXZl39Vi=ZKqUU97C3mgbqr;>Q}pEg!R-a`nWc9i zmptCRd2E+kgp0iIR+hEc)p7r4E5gx*NCUK(HfSn9K(F!7)% zVD|7kBaUrwchd!ha0|~qYdj#yi;O^RRCzDsZ;jM}gsvz!LP(K{3=7cOXJoAJo4OlS zlY76ew7>JTn`fcR)W&n*T&2e2xz?ZtImI8v? z<}Bg+$JOcnUuO5hY}J|~0K#Rkja#g?p< z!TyIK0jjFMZI;tVR17StzKPRg9BiTR&W-P3dH5zUc6LHuv|t?W=iCoSlk@Ql8^TgA zW|vmV1+GWwx?kY1lCQrd4$IqL9sMAU%korWy7=yx0~QD7<7p-t(U1;)o!a!hHpz$0 zXZ@_Wcehbd{7$y(?0h2TO{Wz>af+HTvJRv$TS?MitygnQB_$ixu%TB>jvHQRuFZAl zmN*DxM+a~0QY*hYa*cXY#o+H_!TQf*A^9L~-C0Y6DSULZs6*$Ul|jGXcLlqPKSrqI z_G3UX)zb2mQ_t}5u&*JUs_)n;0(OecXfeAC2cSIdYYJ(nWt7lmyVD-U%yH(N=1W~Z z^15A*FfTLi3bzPeSJImzrf|wX-p7z&3{e(&F{Q&0ua(Bbnn5b)%^qU95J>E?-78Mw zxAI4J6$(D@&oRmuJ5>JQn_|Hwn-{_b^oc_nQZKW{Pe-}-sUN6ok%B7YeUs|d?1ck2 zk<(0|mFU$#11idIFCg&QTF7$mK$*1UUL13zcPA;ZL(+#}K+AO{heKHJT7Km^n=R2$Q=W8o zx0vAjr9tZS3ogcp$%g9&uAs{@Ul)CQY>;q;xldPw991Q8O{LtZkNPH#_+H;5!G<7$ z6J}cAod{r?O!2jG2mC64PDC&@L^rcAgVHYyU|VYPM8v@zS?1Xu4-VpJT77{If-3}M zF79g&pu70aTKnpbPk4ZVa2yW+GHRoABavzmK>umwOst9dyLp1r$)k<0wauxF`XJBg zD4yQ^j(@p=sO_tW56#n-zS)t}Gf%9FF9v7yx*D7hxNP$Sa_ap!QstB>LX(nxt+0zv zmMFn&$JU9sP(3p2G*#u?oyGBqX$HfXx}m86$QNQ%o6|y&@$7_$lA4SW=`KJ)0UstW7ic1{B^rQ0#> zt6%Q{eC-~ZVZL4cr-~HR?^Gp8AqiJGSL+J;@m8BVZ}=s4cx6c7Nh+5|*6YpG&L5bK zG>tw{^$BPm^|aBym_IlbBR9Wb%X_`Sf6RI~bHFmml3P90ORk|9S&5A57E|{>!(ZrS0u(c)<~%{ zXlR$c10?GFXIW#hU59nVF1p(XvX2{j7n4g|MBq7R*$|t5Ma9acG5mh)Nh1KzTsKeT{g>Y1 ze_rd4r|UoG@Q=Uz|HQ_BV&i|VGJjjB{(rGXy#a5O{VKjI=G6z5S#eml!%anD05zJi%58k+SQn}J-f2j5p zD0q(9@nY@lDG$c^8v=b11cKmQJI6eSf2T!klz2w!$8sI>|vT_m!^d9O#KW<gifK zJ~8LJ%*7L^n8dQ%=HgNnc^qGZexP;?gfR2Y>{eJv_~W!cxLF1{lSz)bh|_LsdA_ z1!zSr?pEC|cfpnw(^jN0Y&=Zr1?(IuWTd{$yeUpk>W zUr|`Tp}t~qNO8n}N|N}N^&rE=5LPi!(2JMd9yNu|i?+bqwmz|bUWN{;^zX2!b*)}+ z#5EpH^4}ntY10-uJrJRW5coK2BvqPE14=gUeeXb1)YHnoD#E_D$MMGg<+`f;=6v_A z+G>wbDFseXwNWwO{lJG|s8dc7Lsb|aF3B>>ow}0D8`p!FPRPlFB`*~(%9(}1>Kc|} z2YgDTgInma7>R&S}?ANe!RHb}a{1Z>H!dFnoITy@p6a87x(Aa<<9DV?)W1~fMp zlL5k;&$B@B5`}F!ktaD`DFu_ccM=iuBAdIEp^}c&{%-ADz?LW0rqY&c|1YNx$|zv0 zG!-IV#4vj;4Wl8deNVS|g#zW&eRd=F2yUsXucyVhPtV{BBVMJC02t|rxPzDcsfK~U zb(@uMVl`+|oM+!4z1_qdYq>>=Dnm?X`wCEgb8$S=g(&4?yY-#D48e63M#sM`eQPSS zV`^4kE?`-Y5#igCL;F6SvTLIymD%AmDZ`bRQ%;a?_zGf3alrxkWmGH=VQB22|9jo2 z=kq2?XoM1~AYoixWyW2?gXI6AdphHN6!OeT`pUXZd!EnKLccef^Rb{b%Qkd%x%Q5w zQ)X^TYQJXLM(+|L%iq7KxVSBN!H;kLZVvns!LP?k8@f91%q_ozte(zOMD%+6ay7lt z%H{dK{eAL5&ecP+Q``q-3I(15Vr$TcJ?bf1H~dn@9rxn8!Nj%zUm15Az5fs%vQ=KtP+KS(P~8ow{cz90$_E&N9Dm$^GRIdWNzB zDk-}q197QR{)SrU+m>Gs@2p2J_LJ9pOj(pj>pegV>FHq0UA>j{-uWn_?)Cec8ffbV z@ESb8BR5#u^@I1*;Ofp$Bj<&3Y~ILDsYZALzu&^c48Pgep_!zx4-I^G31Xp*Jb%P` znbp(%+9UDLdsfpnPxR387v-6Jgv%Y z2wiUTKhm@(!#Yf{cvKXpjuo>YcfV2-5wZ_#PRVb7rkm!Va*Y%QX4x0*5gXf_EZ@@> zh4oE%9rJ?uCSR*)e6@h=khO2k7w@m3Vqn;4f6X;s^&qv@EG9lS_3I@#NMXX(yjoH4 z)!R+2jajr|N_Cj~V+!j6rdfpd4HGWYcvg6QP`B@6NN*0MN+2smT=*VvJv$Wcv+Ak^JT(zcyr-XGG-(-@ake=5*xeuLT#1pP|C;_t{@k- zi}!6;T;$AFoNG1Q$xq%u{Yo$C4aMX5&)RI@YYT9j%*MjWK_hiOl&_N`7~0UVDIktK zf!T1+3*oGUwf_yDZXUa!pt1S9as_ip1MF2>fB*BoWtp#LXMaInwii~y<jb+O=5Ah=9aRaqaDr?@K{#DC)IAqbo@^x}?Y?c2L<%zx{w_ys~$k~>B+RWJERvs~TiZ4GT+SMG!?T6@@goZj7uIFl@Bd&TB zUZqa3*Kbc>&xb^m}v*Y&{s0UqMJxFSBG@2c^_9zZi zVr2-|BXBf&L*bWa8^5$63nMv~W_Q;h_U%}2Ue|lel%7{-OY&*NkH}A2@5LCw(A1ug zBF#P*9S}gF&0xO5jrvr|1^-xt{ZS|R&@28X6SPZwUg`sPsHEH3M=lj|XAzDwTtgHx z&{b@IRVQOiFGseCGg7t?;R~^JeqF`~dNZ^;>Gs6LM=d*}Stq4zthx_}Y~<8l6zjBO&aZr8L{MB9<8b5H{rrvqlLfr41Z&Xg^Jo1l+3Gj6@BmFs zwo(XssKhFvz-~r3)}?2)jdN;weLM*nPq%=EV5R=@L@%LveIrRl7o%|QJUNoOtOcW8 zPT&r$Nub0RI`<-=J>5G>x$kbFjThuxZ5*tu%wuH&K(Iv^qTql>nG5g2O@2eLD+qf8 z?&lM&SW_;t98}si)q0ys%6t>@4_vk%481*bQc`TjyWK@JfY$H#H!geNKnXsH{}E*5 zdXs^di?}xXi8VpQ;ias>WnK$K64df#V(gk9HFIAUUQLEQH)$eJKvSeVM8gfx>WZ1r zU?f=;ygDS9wyl%VF%jONn0l{TApZC<48$AHj9%bN2wB`sB0ml|=R4trdk#}ZPtAE^ z4>qpBDUYlzJp-Q&hGgJ--1-S024(DLe@1Z9A0wD@N!)RqMK1?F970b!Amvc15Xe^) z==y?y9l`EdI9D^!m3sm_+EGqIYZ0%(qW}bw(^Uhu%ep3*IJZ$9cB@87+CME3E8X~F5PJ@+Oh|)7Vt?!68h6T{-|F5om3lR z7_L8Qe1$V=yXwbGk|Fba?%6B_ePJE#arD3F_Rff#zsx1|ze7-1HV(0GVIT1c~2`hgJ0KGZ4GJ|aMXEV;6PJq89W5Q=y6EvLd!wZ=c^C$3qBo0 zAVm?7x2-UJ)oD_@G~m^Xt-E>UXW@F-26CSFCrR&>rFSVYd}JO!E3pV-KH2 zd>mD!!d-DQ7av#lbd<cg?ORl;e~4S!U!Jc8K)I}GV~$X)aX(GV%iY4_%x3E$%XG=QURIg)$zN~(vGA- z89kCP&<5V3YBh`N#LTAwEGo6)uCDzA@yYL4?R5aFRXO&c_zI(@xK)>Y=m?C|YWN!s zRS|;aQu#bJQ;>FA)l%D|dTbI@tu=Cuc@JnOFM5eEdRMG`nkL`no1xemi_(cuEap~L zja!-tT&PCj?&Xi)e*z1@YP!WBi~r=h>6|>=2WiHxGBmZ#OCo6lhHo0De!-H(Cxd4< z9_i8M9CxfLP%gIcg--tHZ`de@O;`s{hskLM06<_XjNb z$MnQ)f5ktAb1%2=YKVI2Pt^zk*xdY4){c%b&o`8CR3dFR*_!uZK>10K6K-{<53wWV z2D}cVZ)FRa?HeC)hpZfuoGeCT&St7qCvAoIKi5ADEF^ws|cX*^va~lU%t8XSlIIOQLSRd;5RoETNhkDa(O6}y=1=UAgdKP zcji(KNI!r4p%}6DDC0Zbz9z^s&;6pNclt)~=8JxkD8cA8GtCy}(SAJ0i9o6N4L)}t zDoc?ikBxO_9vW^LYb;=0a{Q&cu(NU=sft~CtL8ELPAfD(NsE{I5S27V$$m8-a3(W5 z$$xm`JwI!T8`tK$sdETt1x)ggUZ45VU-FROl=*E_MPcoedG%%&pUvw=Qn{7WR1QhS zT)HM8#5MHo$rsfyqGInYGvAF{VzkaneBI_Ug8Gl#l;r5|R*RGd=`nUYtLhk?yCi>D z6S~e@zW*S!`6Joj&^*+OTn>`#iW`vu)d9tO*B$4jCmZT+kMC@feQ_Pwol$oRZy3cg zmnvE_jFq?r5iY#`tgO1P)~G~c@$9YA5v}ofLlGcCe7^yRdGv9PI6*Cue{g+FZ0IiZ zNri62-RiVA10h*>0NDHdVX)VU-zSv*1Hc2@DIQwI5kOgabvEZH_9v**T*BbY`~zN2 zg(mqW3mTsG{+$)x;>#DC>KeIb!gI{V@B7ip7(0*a10@lfWtQP z``!`jdB|Evt!UUlL?q@pB&fNef_gSePHT^wYqv7Pg(Q#efod`tx*}U3h3sW&BbmO2 zp?nH!UAtR9`LaN1agR?rLGy9`=!O28Ym=Q5wWQC4x?=@95fJ1~`y;Kjfa} z-<2%M4gY2HSGo=J^>eOQ^g=eHam)z@s2h?kxYiKmx`AC9qrNn?VYeL}bT3Lnm+aIW zo$mrH$}jrylNNvpEMAq3yEOK$r@(WnZl44DAxAKl=U!|cgV?5Bs z&uYFpYj>V;Qvk;|!ily|rQ$d#V|Qc$?HgVu|O+ z!@lEVw(xfSD|W``?GeZrN^w}Nv*pX*X93`$H|m6^u#1hq#%_uTt|KJ_;4()S*QeKq z6xVD`OpW3KVIJ1N%TOgHneAYb`MdHv79-ciGEWL~1B&kt^S1kQy& zj`bRy5X(GN%NK$;uv1^aD?+Q%YIDQwy2J-VGFbzwc*5R^nzrcs-eI}+28{da&a+zG zv4c;mZXW-Xfb+jKZAZ%!j~7rn5I(y5V#x|>`C#;Tg_^tRfm zMZWgYJ?&FZwI%N?DjBU8bb3m3Dd&UcEnjD+0m95ECxi^Zl9P&ORqrhrkzm(N)2=8Q z9N4S`ov zI?PMukp%Rf@cboR;{r<3mBxw^mu{Hk+izfh^Q6RBo}G6sl}wPTpR+xyu?q*%=EjHH zZS=@;_?BrnLq_VpYT4OE{o?S=>~QnG1V|TZS=h!?(>H(qV)?F=j9KmtTb875V8-Lam&s zOi#IB(EWgcl!0%ie zC$frTx%Acj*a9oLl%$8#qGzVcaQZRa1&Y6O{Ar3N)GFKSV$XtGOBPCjOQ0&2A{Qe0 z6OLEd67|%-#OrVE+rA$;JHQ{|a8oTjcnn^7;foyso!-jrvWmX-K-w7%*TKquQr@@CQl^&UD2-qAvnlbbC`{2v}&1<#t;J{WJRDQZEf* z3%dg~NWq9L(~oz=6c0ScZ<9kaYzBVUDJb;#_DQjfoj5(z#tDc^uQb@Wm$Uqs> zpD~ejyL4lvoNz~D|H~8oU4S}!bpgoiAZx)|53cL4#2h1ve2a=(o5==JCd;!3U?SXK z`aw>q?pT|ce`5kn1>L=DoiG{^la+q5>ekLDrs&Zm3-w>!_UaR9qf5GJS7gQ>&iBz* zRnUTc(W;`p4Wcp*k5QvCW{O2a$9GTHel&-?Vs!M`D6R_jUk7}vFmo})c_)X+U;DUC zG3i1OQ!-%XWDm)&Kl#D;wyHY-=4Pmx_DwTNdAnkjQ~nt+^1M^h%`iOt1$=x3d#SgD zym9A9rfU7enbw8A0;pqZpD0qP3E4N4C`)+mS-%?X_t3$P4R|GomdgkX;SmLk+$Q6N zi<)l@@~j=-O(h%|%jr7NX)Vrn;}^mjLp}A=<==QmQI0Rs4@3bqo1~J~g1Sp&EZ7N> z>1SMQ3rXB6+k9y}X+{=`>Q#%FF@8p`&w?yQ)Ai6Zo6AhEGDnx`^%&_p1wSj}W_nUz>4KUD(;GTve^Np2~ROf#j#@FY-B{~_! z1q$6;BK_wNfZwcPk=1L5H)ZW5kQ6EYZ=ff%ECF;E7zfDA^nz2O2LyQn)?aNLS+eE5 z(g4$I*fW@mTMMb≶J%#x{XT{oO7QMg!~ueNBy47aG|kA;UIz?fCMLBXGd_tE{=j zlItJa@;NG1pZ<-O`&-a0&I54ILYL+HGcglmP-4UW#futy`v4szou-B=YNh9*J_an- zgkCklf-RXH#?vjfwRifUB9kEQ+G4Ghw!3I#^3zG)5wVdnAO`>1itwTI%;C|*%ne*f z;NHP6kbS<57tCEC2g=mrqSMp&OCuPm-~1{UX$DcZBo$To9Kk@vcZFz?``_+nKPAx2OAv{ zfrT$rMR00Y$;{zss+q$M_UOD%myv3YN%!X0dva$dty%1Oy|V*_;9~BbDcp*oEos>i z?q!jywD|N9A&J#gp?&jlDTyQ#e!-0b_AIk+dNuu>K=Hkyhqv<8dxjE$%7EXG61v^1 zx-Bbmi`D!BBBWDCkwP4m7hH;_PKHROk(;6|H4wY8F&gqb7uupeRI zRFT10EWSH>Ptpc(0rR$uJ$OtgY@tdn$C}HZv{EM|J~nX{Q>@kgH$o5MPJ*i1Fw%g= zXz#ZQ$=MyRQ4w4&3!@D(gAq3k#uNhHsEj8XlRknS%1?gM-;L$)vjH$QKgICIaYp)C{b;yOXXczr}XAwUaM$5pgt^Fo5YK|lg0%NZrv4&`w>l6 z8lc{lpp=7b<~5&%Tf}-tCK#=R3igS)g1U4;zwOO^!a{03K8 zYPnc%0oY95sWF)Y1-PHJ+e=v`x&IE;O71qEWU4zPvyvGAi20MbG`a1GYj&sEo`Bs3 zh}AI@to9c~0?(v+@9P#$cwbXBRt&n@9HBm6s}GTRG78k8{yLy4X%IdxBtq+P1iBS{s2UpfN(f7>I{? z#RFPk;sFgvL&Ktg`1D98G>s=ko|3y!prKv`rvumUdXAU-=(?~r-HpS#r441@oG+E% zyTS;D&$T$ua^0G}sxBTaDwyEX^X?U6HG0|F-Dh5)%&2%mzN&Nnc(GP~z{<2_OYV+P zKZHnAeTQ~u9BLx-iar>mR@l<=qB+g_xIZkw2RhW_KcOERdR(u%L$(~T;c2Vw_> zLqQa_<{vy092KBZfpckXAP&xMaAO()!*DD!92gvNUXE+V{e21{Wx6QQ2DZlPcVZi< z@>+Qs|7Ph51b$N2I zg-d9hLn0v45mqxal7{axkh*~E!j_+`<7vyc&O3TxZ<{#qHeM#st(~_3){A)J8et*0 zeDu=h(cr~sJ$l{%*y1Z-~8K9kp^e;}l{xarBT{PL@#wXktfKzBQz z?~+H^w-|%Ql@}W0<6sZ==q!DU=2LpP3bDoiZsd5Y6})p5|s3np+UZq1TO_7HfV8@sRMAyKQcm)(NAYaWgp z$;l@c=8r9mFE&1KGTt77F^J7lto)l@gBV*%shn6y$-HZDDDV`qjrA2O^F)nLLh^ff zq*>bigPaaS$rFx{s7}Bx)Dic;(@}}t-@w%af`x_5-1yxgBr*YQo`p|t zh5IXqqe~({-&+R;fm?Tvv!3e~Xuw*`L0&IU6{}xJ?7r|k$*&gA_Wl{@qdS1)V9S&C z(>Bx!G~%qgQJTe2%P5C|&6pCPz5?PqopVOYGBPTZ1(E2%hjwQHHnyKREijZ}PDPY; zD!~Xyv6Dq9n4ZxPc_Ww%!h&$-M;;>Uf{c2VMKXN#*`HFN5B>Jr*99yUk!C2c9CiB3 z7z4-FPgX8J_F?Q^ddOG}+aOVXkQ)4lXpPOufuYc~kKE_X)vE7!ZhksYko|EkQozgs za-yW8EdO}-trT=)g>DvGFL+gq0ZLKc{tFP&hidwF-Ky1v$_dC4XX}K_C z^l={iZ&q<6C+s~}XT;eKZA7YaEVoD4Z_bY*S7-Lw4l>6s+P(Ei1#>Q9N;%MmvMZCq z#eXFqqx~wQPn<#cGT-;5h*Vj9L!x7X5b>AxkerXXiXRM_HXIA2*(J^#52bQ=*U{O- z+0l9a)YaBA)fge-HK=d@-}yv>O5SscW(6`*@5#Kbo4?8XFrCM(0*VCidum)+v+v60 z>k4-fW&q#}u^v}JYdO}>XTNuj^2-`++4M`K6>rmCYnpJ)K>$pB{CWC8mAiq*ZP z&*>11gCpU(MtykymM=*8fAs~)L0makPAPe15`-PsM;gQ(4~@;xw&r;pesJ;EuUp~Q z8NlFV&^c1MAgv7Hl~)%r8(2n!-PF3l4XUBx`0}CJ# z8Zb9*Qqlf(3kKunH4Pj38FzL5;I%!f3(D_Dg0D=EX4ZC(`Uik0n8QAZNGpN}YpCvr zzShb<7v3>h(VrRaw7IMrEk~0x8m6;eE`3Ji4@BqREw6x}#jWkHaJyybaEVw=PIj)k z)3Ax}8`Hso-JksD2YRIq4J55&H@~yJsvAWD!|cLmK!31E^e&YJV7Ok*jU-Uw-3s@8 zuOkx=%+L1wMW0l}i*cW*<9y+p+#2IS6Sk4;AbZ7YZ<4u#Im0*WbzNsec~G(HUl*}! z3H#zKrBw}Zw)_{!Kg}miE(DEDo$K$V!xS~;t3$q?Ph&ej1rHRt#DX%@N5h&wdEBwX z7I_+uG@`|-*czeb@{C}_Nm&9?baVU|HM(1xK>5uLv*cq5Hu77jn^oWAz)26fl3BdD z9EC`}ZsZlGrNC(V)Nb7k>x!J4NX{!@Vc2d{0*fJ6v%+dNSa`SZ&Dv%ibw#He0HY1_ z=I~?v`&M?rV}En5Tx+Cv7|xcq?i#4YWOXUpT+!Gvt|0l}yum!FX}VGOQD*_jvVkGw zzZv1A=;PhmFuIj1`?aJCocT9{3K(s$PSMUvj@)6^VQ`TQ(|{@jw9Z+ZK9I~j)WK^V z)h*3%)LH;qYM&JGwC^Z_y5ycNm{TGTIe_Z#dp#n`Ln81o+TR@1{VzIEr0@SzC(5D6 z2X~AEjkV9eWRar)flB4VI|JQKs3sI+z4Y0l_ru~`ZcET;gIg%OENqbNeYI8DA6=Wf z_@GK(chRFBHY4u>dO8~UN>O8Psmnw=cm*zl{RNsbFn&HPq)TokgH;*>zuFD>%S|@f zvZ|bc^_si_Vc{)BV9}UFRgp)`)hRKQ&msUdN=myAnqA9s&hpI^o^yd$0N$1sNzb&@ zKb_8YS!LIVPGjHnb)(7>EpdFkW!o*e1_z>|*tz)<=f&;y!vjPFZLz+p73rWOWWl!6ot=G9SIzg}qe~rEIWveV!q{nK!KISPI06>SUrMzm#Mr}HxuTDx1w<%_Ik8nA{Mv`0+HoPi>XHWpyyP3e%y@% zUIrX>mU(p*B!aM%MI`%B0kCjAfOULrfBH>d;d`PQ_1}~!&Ks5<^nEbnj{$D_U%?Yj z12&QfoUvsc@WdGy&@{eEEA`h^;cg@AR6N&Rcse~T*L~1;v@&o{!9{hnSI6?s{M;Mv zQGd6<{UM3G^<&3wVS7Tt6Ve(jSaz9ybl3x;XhHiD)1Ts-cV?pL8Hi46_Q4&p_H29b z&>esXPs{9R(r2$uI7z>VOdlBM^-?3pUkir>pFf@Ux5Iw8bp9fc@Us>s_-^=CX{uttFsOk2aYhThlC_Ehr%vkd08)!%(t)uyo!lym&%jbs2!{t(Z1 z&m)qW1!(oX;n4J~0~%{N__w&u|N2lUk&7XB7VlL}y{v8q(YF7Ye7n_~cJ|or)Y&yC zeRG3GD)EUuD1--(__rP$exG;QIr0c>xmRI@D$~$2(^J8cxum>hI_csK`l|iXezCao za5*$k*nNEau)y!H;EeoGvJdk>`(C}D_MDvXpEkmQ2f_zWTt-H{yu1FVL+srU0xN(2 z>yCkhZw(FZ*+&_l;~@wokP-Kw{e-mrf8m9>;=S(_4Y23<*PWC4Gg#!GMaC7-?wAfI z$H}Mv?7=_1@5-eudyc--%d)?COaJ)n$-96(r@<%jL*Y7*dea9ZB-YGOiRSueNE)1dut6VFwrf!w~ zg_dq(W+g8kpME@dTJ`MVqkHx4eM*1APS5(Pr~yU*T41j zhi8vGN*If$yJZ*u8*lzUA75AWmUuy)VC1JSR$13Ek(B{A$Lm55VU~y1n{FLAw(sBk zPL5ulO`7X&EKsVP&zN0T4DNXG#y;guVTHMcy6b2KHjzDO0ChHM3%?~gfHr<WbyRz zl%V8^wS!*NEXgqp;wICyNJ2R>Y5azE(n`pi;C$+v6MiJs{N6JuFasJA@*=>eOYO(F z^Q>&i^^4ETNfL6+wMemsx=(k@c-;a68`KiGqCIWI45%^Z{aOmo%%B@nZyL9tmNS(+ zVr4D`T-mnkU%oxs_c_l#aAm05;n>_0*E0cR^Q4;r4`zy}AEp-$CD%K{1&Ys=cry&w z$bxdC=X^mp-zIIhE3b!CN}ei%RhV`Gizpu+`>3NNc~pSLXM-6z|f*7%gCy)-e>O^99whF^lHTdO} zn&{hYm%sN(yQZuppXilS3el@oR(-c!a)<%Ka)UWhB4?%VCso{+d`{-PPZ{yALXRuK zU+n*XoPA|bTW!1UTcH9i6ffRVJV>FqR*)jW6I@zc0>z!wDDDKe;_d_plH%?fw8cGm zfDkx&zy0mK&&)Z0zV}ClnapB_wRqO`Tyo#fjY|IWqFYnH|K1S4lg?b0+q>Bc>8w<% zaYmEpGq?WpC;ZnfHq%mbk=p>il}s)Y%Gp0@GuGD(yc# zL@$vgh1-pMR{)_#hUz&1vGY9RHfJLKrLD^Q^uPK{n$zr^*C9?6M?Xv^dI#aAo$i<8 z7UX_ZsQK8@Ut&6#OaK$DY;NcM&Ao1_k@IPtyV+0CJ4z5Pr~H<|_l9NrX2os@xnCe6 zDb~gT<8*=49E0WxXV%t0sDK9LABX8Ol;WH)l~!>W#YQ3r7gz5tD$Z68Mz}U(HO%Km zvCCrER%W-MYNSS_5Sg6^Y~I<3Q*J~M`jDx`T}u17HQX)aYb2Rkm=w06ZcyqS%9B#f zJHP)=wh`JdK~UAb*NK^%ibk%=CL-HoTPeO%-L!T}nhq?)hVBM3i2dPr{Twrj^cpSO zZg1%nT2;>^PR{^-y_HAi8}XVL?QZ9>t*O!Zp3&D!&r3Ilrsd-_Dq_b!a_;SmC}<=w z0{wbjLq8eauYG3Js>%$vN+|8lh}}}oM(5<`j|kZZ1G!s-95f?&TbeWcXT|3&K3mnQ zS}GQWT8OE7cX zQEoG)cEae_=XOX5Zj%ikrP0&hEDK-^Tl<1O=8CIF@~m+d7M&GbRS^8=Yq22dZb{tk zz4Vy~0Us|Vs|=hOC#mE`$rm zjVOX$E_XdHD1zQJ)M~rlo>~?4+5ql_M~^naMESAu)SY34HO_q?+SiO_($XVWDaTkd zg3Jji9#44}78E*N#vg||tDe&}x|fz-hQ4ge`kvqKUn+^Kx6!TBu2XQyo~u-I=^LSV zLc+kTPhES>Zn3HKZ7jkqnE+AYw%I*(zuf0UROB-L>%vK{Wt!u${_x`xS=;tqrc3ij z3z+Tq+vMu-*kk;pyR0Ejk3-?vUtU}`91g)koDs$f&IMev*%sl?{sAd?%=$a&THvow ze|Y@nXHus3^Tf!fM`sMA`rQi$r93jo_(P4Njj!jvIM6<9@|;#lzEX`?VH1S$MB&cM zZtXfxYPDTnHiKB8%;nrqcXZ=o5BkJXwyrD5S)OXOEK9DTv*>)rDKR9@D;=6mv9Srx z%^fkRF~K)OKXsTAcp7qgg>IKY#4db#S>f(Q8Z3~an>456{W%?XT4PGFg1cA3QDAUHUhJq;S=s-2orN$+i4g3N zZ!~%t?EQ05+KCcCwWgAT`5o_Cmsi38x$Gat2Z_K7XUfKrG%?=DANRPgf(5d!{4j5L zaVuwA+D#%^zh~Ukl}#x96IVS@DWmP>1@(X;dp*j%L&`9Q|9A)b>kF;&FWufUc7snl zQcdAR_VY3cqu1X83uDO9kS^?>}D(=;#^HeRkFXzYg?OV%<*&DUw zn{M`Zm?{{yZBt!XRuxaZD&JA$1nxxF*DTadR-Y$G4X?}~ATN|gDPB}>I0$pVtPd3T z@GzpFzHnKa=8A8z&dGO7R05!m4As*SPF!j2ZiuLb$I8vFVuuQ8?@N}NuQz=+K1n!4 z@&YzI-QrgkB99FRL)wG;r*e^21gc}Io^B24ashwB7zcefbSiyNeX{cnXSC~+R-FTHKT7=)7=s(V zux(TFRbp#Sdna+`Ikn+#7r&(BzMp+(R7Lz0Gi@^ltxDq_^Erp1r?`1F^}6S&D&C@w znBdyMU(+a}P~F7|9BFGCyP2?RW?s+66Gv4kA-3f_9mT-UW2XCAReere4xc2B+J;<* zkqQVxcg_8b@>CT(ait5}-VF_CiZs?$5@EvpG#mW1b1Bv0$!<6@y5KlFi-oNIzD(WQ zZy-)8oJfVruogEAW$kj3srhL8|1+-p$P z8XqQZL*F7t-P=1Pmc^4{Z}CqpV>VxI!?3KE1yw5kkhO^b_tr!79`ud?xKOZXp7H zsQZ+rno7p{IXx_vt@WDpRZCJ$i`_#|8ZRJ-ADK8R0^)DyF6zw7ymx2Nt2HHBaJlr< z*NiAek$0J^cYns}C~^=c%n^mYNRqulM;i0_C`(dzI2XxzuVSoq(NEpK%`GHYE`E30 zZ?W;2|Nfuv4S0e1`t$3wU;Yh^5Pgvx(gathVZKZEJI#k9^d2`^2A5+M$1lpH-Ka~* z-cTUoY8$;DZ;uovSd4buSZejQe4F@b#+Bv;A-Ozj2G#g}&Q?~ZeZoN`>IMSf~YUu`VZ+x6+AnQvxiYo}gPp;ypQiY|l^4GnNwN7Ro0lRs= zH)xkBH>s`PG6`d1A}7o^ks-^@7Ru0O_vD&xo~Qpgo59PSpf}HIViul)zZaM4Wn#}! zop=!M8LWBm~5?7`ZIbS$EcIEt4f$5~Gc!fyT$=S4tukw!W7dx$e zUap1aC5@t3Pn-fgTL)~c6gb0Y&vQsk2jfJSWL;=YOZYH&3xWdEO6x5sZTj|Y3@qg5 zQ~O$~SlIt$p3p2#fhuBM!c#7H=)#yRkx~G_RH?~ac#+{Gl>_fj2;G_bD=@J(vyzRH zzQDL8gto|NL*yFAZZ_{H^~nh1Q2^?u$oQI;nsAaO_%v=#?K*Bs)@aLpbFstbI6wrG zOXNSE054kt4mPNYTPeyM%Ag#Ci=hV;8>^WdXIp)KT+2`P@?fnFMA6eRn;LT)u2T2A@iCq`#>ek^V0I>of#I~CgeE3URjGu>SK!_b{Z!40 z*zaDUSA-z0>a`Nz{7~ICyCt6R+3GPc18_?%GP|U{&`dK!1}cxR+vSx18=tAM>p{Oq)34MbrwzveK;O~6&`7bD-c z9HO(q-7R;4PD5Tjd!w*^EChGGWUHlf!S0jGM0IF=oMnU&8~o?&2ERt-p2gwkjzodW zYDIciV>c2I4zEFZc*&(5byn481OXP?s%_o6T(~e9JBggGchWZX`S5U%K0#sUwvy-e zl^tgIBWn$eM|!~DB0_c#6+?L6KN~Ro`&V0 z9iP{6M(7jAnf$Os4FmXM5w&(2X1wE+4iHknEJ5xqgHF5lQ}G`wOdlaFJUeG2Lr*U5AGi8< z-c8P(%mxp5-L%}X53Ue~7}tfusrE`mNi!g0^}MzFac>jm#XjT3%YGMhbu3s=WV>%# zD(D~GgEr}x8~_TW(HsqY%o>FfgQcfBr|;N;x#<_hj>gqPyoD%ek{|!)GWf~;FY5ar zS9+Kf-()I1T&?t3RCHXc_Y$_5{9Ja^|I&l$A7N~P((PoX$)W)&uP?8cA2i)0Fj)^1 z4|ndSr&%oAu$8&4KKjr9QuWvezl`jGm$jqdfAUKGb?e3LPtC?7U6EMk}QQS31+TWdj zk5f#2ou2sfYp=TCr-f^+Hnv3-pjcbUC)mS6Z1F7r{G_dF5r%YdY!|pR%zFPo85F4k;5*q z`@p?BUh{G(1yFq5tq}3=8&wau8v*AJc${n={!YKv{bbTNF!WED5c?iy(9ddBNUV#|3$m-Hm4EZxX2Z1hgf@;; z91C#TeNtAk(H{<|wp1i-zW!~m|I#np#eUOf%N4isZsC|rw5PAYK!j@iGL_*3uUBhC zM;rt9mXtzCMi{cCvt#_O*_9JF)Y`qfN5;o_GxckIK>&{p6UU-m?K|9E?=5INQ8aAW$c@4 zcVPzT+)0LJzE7}i`Saj;rnpQs_mgK2AF#k)3!pRd3f;E$MdUS#2PpEx%jaK(7KFfd zBYsG)2B4@LhP$ZI$%?O##`v!4$J>nWBGp6#Y4hZbDHDddbCC0*lTv(NDu$U-4!>~q znA1CVsa0(*&tb(By^O!@ORTtXt;*eE7LgQ)i;#M;@X&F|@U6!|LnceMRMEi8Y3qc2 zp!ErC!KNqC)pqpx5pl`b#=zWLM=61fb+x%n;)c2I?hM7jSwUCKw=ZK-*b98sm`Glg zV)hY47fFmekGR%ODW{2+(-Iz;ZA#k~gg;Zyy-Rk%{ZQh^PlBW)8FOQ0xT`WgbZ&rGw8wZuEK%P8Ah21@Y>+82 zjI%91`|{hRo9*bUC8O8f2jJdnWcvC5tS51Mly4|6j?Hb7(|gau#b(>eZr<JK4(}l1a#YRe6|WyQOr<}k_cP=qJh?R)R%fF3wtlcj zdK3$|7)m$yxENO6T}ji`{#26pT0ChQvFB4!v@$wnGB+T_Ni{PiiGSxtP1ui% zWax2j%vjGk^ya%R*Gwjyi3!ssP7-tcyTN2Eq|?B!yVB)^G)TFZH}?Yr;$U4mZcMVO z%J`3>jdp@bGNy2W3uW9(p6I6yI0+ z^r#l-Uj4;eb|cR*(`jzzQz340AMO?#JT0L&TGM?<>M{35n88OR_rw8c$lB;o@}0W4 zJcguw;YUr57Vp+uzD2QbE$3?R6i{ILpGy&qyK{8v6-3x8MfB|VK1>m@yaBREgvO`I>%^v_x&Y&inh};bpS%Td|omOQ9jd>uge3Y zSwZ)n(0q9<#Qen9L3vW&;^21hAK4eYJ8%12mSiKZ%(EOx6ZabPwi4TFWZt7=!1B*q zIQbH(#Qw}j#EoVLlY?)FtuS7FZ_De zEV>|S(5J#UA-fa_Px2Y_dS+PtAQO4!?|3rJ7%Ce;tN=~xkzPELKsY4ISC+NMH{DN5 zCuWr(SmjuZzqv<~HOd~jXDHvQ8Y47T!Vu7tH^w3GaKz2jMZ|bN3*u0p^{Z=AVk^UA z;M0xeT`(ay4vt9O87a0YC@Rr=8lhM)5c8z>Oa%CLGL`v5WK}43>&8f;GhsRe6<`6ri)A*we>d@$W4cU(A%O$|hhyb5y665< z-zpb4KHkSfvD?3r^39}IEoCUro5`|@;S6_4FI(mdWoAf7zQc$9>e7@x8SXp$+4F^t zYbB9VufB;}3TdDukg)6B!N0@BO1bKc*K)RST>N0QmkQ~BFUviFUp^R^ot|OkHZzs?FtRYxKWGdk_r`*Z zUJI4;F#fs2lek9`B3cBM1YfvVqc*LAx0*s#W<5+pV-c?V8w+*bo{y2QU{ykmp552p zdJn$+idKCMb?CNL9{y0e6(fPgs-jx#k|Q`(uy-{2h?H!rFNd%v7sx}IV%A0dv(G<6 zdqr7hsDF4VO!q6Tista}iC!b2>8o5b5Gx%;@|(1GfDL>1;+Lyo#1qjlGLn434M!>u zq*!0RDf?-m{RinTiNIr>cc{SFzb@?}rSTuRthd7G)>Z^P5Ldf3uN6fg74C8T-)8}w z*-`zy3eo?y3P=8Imlqo_-IU7=;B7vSRT66G+B?6`8{^LvWNtb3#(ywa%fB{EOv>59EzctmD@k zc;?|R+{JtC&gv!E`!(T~Ph7v{_h}jw>L`GFtWnPcY^#%Q!VU9JuXO0!og?cn>_(n_ zRh#$UnS8gsV0Xspyw;>iq|$kq5ZtDPa_ESJ-+&UVuYdiw%y$-(Sj z5b7>3?Tot4QBX3)lZ|2N zmPq%*^XD8N`l1De%@WL|mA~#0R%dK3DQug*^4D?`p}{E8#-7d0%M=;I4QT3&o$E0C zXYKja)UlPSF@f)}*U}O}{J=;_C<_AIc*IO;)yHrn$o}TcWaMgYen= z2DoMpsm#0h4Iey7VU-egR&X0wrco&>avflC)nqK!&>xa|=*NbXk&)h_$0YH_SGkU2 z5`62TrJ*(r{<9D7cWVpVd`;RW`u)A`M7c8keyeNnOyov`>9GSFr(0i)Nca** zlSdzH<$d#>HU+Ca#`(}dgi>Z#_mecNGwK5`&m z#&eOnhIRYlY6LX3W4~5H9JJLz*lP#gQ=e4Y2G8RU!yF3=n(TPp>orP{@TGmcu@TC%;wN|V|dWLwv)rlSGqBU z(%NkT#_z{l#^Xe&pEwWaYks&vJ@twi=H+1AwcKx4G9n0t| z&IYrN{J0bbuQzCsrpF7IGn?(W#v{Z<*skKf z6a(DlYFImuv{{%nGRLMA`A(dr7W<6ANFHa6E#EmgGb6IT+5t=ya-rbp)OG)+m`4e! zOWLd#+ewe48yJlI(XZV(S4!P=HY~>WIwh>ea!A0%?^u}p%9=c$q`CQ#)Hy7lohH#C zvl|zm{$V6%%|&-@G;FavBiBXroa)0qYKe^7DNNS+EUlj?w}4-HjlECE0UivcO$N5y2xjuh?&yk?bt#E_x=LWGK_m zsl4G!JXEV}7`YOFP9=iZto7n*BTJ{YrZRQp*bP=_u*Cvo4SO9S-(*_sK397d4frzf z1(6Lu;hPu!1`}o`ny~K$yFv_9ywZ#PLh!P^e&NPKAImKw^_y? zonl)xC}rjBI42M#pA`4v5Qt_{G;_&N5+6q>(vm-vu0!InEos^Z_ebpH{JV>8>#L8f zRg8T04ugGf>4as|U*(~bh0O?U0&SopFqqu8m@Xx?jT>`N|;NdJ&VNAlND$XU1? zzdZiO!XuoUEKRXEW*SfYw*A%TUNj?x(~$A*b7AC^eB(Tqb|=$$OT=rm?dQooHPGVy z6}$1Xj%_>pzn!V9r{c>oy!Pt0e15%4M6OZ^x~ElQQ_p`JnH$}HB!ZeCvLRD`ePR|S z7iRgd>ZCM|=a8`)0XmUzykCWa;8G)NTOUgFzUOwr-*&j2P!TyYED)BKj7^PL%NgIH z9;ib{fn7K3Ui9G$n3`(hAJe}&GP*4i)FPnA=Iu%#p*Wq+9uP5Y7|~bh$x^DpV1lax zRLEOu)p3KrYcLlRx#d0;TMQ}vX@pJ87iI58JU$$=H?A`v78{e}{w^oS#^vr1Ug8PI z0YS!9KsB|Wc8p?E7X^GuQq)U}YBENwe}?pNtnFW6AiGY75k<&W-3<+;R;y}XY;ec7 zWFfu=#qvK7!;T%c_qblYZ=wKys>keU`E`e!w;QNe-FA>Xjz5>-y||1C)fRgh&_9wPhsU6pw_EUqVNB+A)^05 zeJ!|8_o&1=X&;!W1g-G$%PD_wEi9HSG!(LJi@l1L{03MzYY2<=#QO;~a5A_<43hKC z0;uHbUe;aSc=o{bWYuh5^fvh7F9T+SeyY21u7o8$!v=;Yi!K#Le{6kk3wDKG{><|(!w=AJ8C20j{L1k{=$6I;N>J~ zsD0Oc;gzUN<^@Z5A|PIPt-SH9g&8u%E*3M&*oj)F1=&ox3-X!AcnGk*lJ3`fV^wT0 zD!$QW>^e@~zPW6I`FZOq!YF+v=ixdl5!7|Gd-3wn{5vH5Z!8)kPT5Gck)b8w8^0c0 zZ!m42#m_(9wF-VoB7q*0)3OC2VPTS7IyQ^fQ zZ-kghH`+}=&GnQi!map&glf}PA^B!1og{%o@4-{!AU7O&cO(Aa&OJ5%gPsH+j7i;U zhV=icp|`2t!o9Gu-`7e(DC)71`8sS=vig!b6-#16YgdVflJSJKLo6(0Ci+TDQ@ z7g6ly7%eL5E(BfxIHbi|hv;f!uGrOGUiGthz`q5GWf!?Q^rtHO^Z@K!9H3mpt< zMmn-t-rWBfSKIyeEoHL^<3n66Qi!sS{i)8`X^Zk1$uzbQ0)>b}c_1}3>`LNVZ=m6a zLS55~M&AzczY@UZeO(}WiLd+;29CoXpUroBQ?TFEze>9}W^`S7)qcvQtylko?tv!r zk{%HlOD$jQGQ((emswf%@f-^q`jt>_$nbS#9Isiip^|2b7ghK~W#yHC`{5pLvaFgi z)My91Yl?9#j93e{cZU|5i!|C8Qn~^2)lB<38{}X8Y4O2I&sGhl%utiVVpP>$c*Sop zYt8S^Uv#}6;}Lz|Y&$L8CB;T4W~TS%;32$P8l&p!0|jA+D)6J5fqtz-T|jtNe}3K2qgLt?lS%LR4;HV!R`4%WrjbC%c^2Xc%w{ zMyEHoq+Om+cEZx_CKBo0YZ{!&Iiu4s{l?OE z?hM{t;3?R{+K+4v7e$dEpOg~ z&&2obp(rvmhT31T*fRzkJb0{-3t1Y}N{%xt7E~7Dim-?5GIQ774m z+szZexJHXd&j5u(mkk)hnBY@Rv|QY-eP1O_Bg?Vg$D_uA`rdOt`z}Y}6=R!No4=V; z=a8k(l^#tCf7^E*0NgoD`ulq(<*5j1k_d@c?X^FiBirkt=SyZP88X5vcT${!T7D^P z`_1*|_vD4y8~3kx11|(<_B3EGo8>{`wR_#*#lOosYj?u7kP{_d`9ej?dozSEvPw3Q zZT56#T*>h9Ryp!&O2bs$$$qS$<`PVC?xqMK^=VCGWGDTQvhj7I{bXTaU5tGV54|y6 z@+w#n{FjgIQmp){GFaOrcAG-Ww5tO*%HlNBbfXsA(Md|H*FhExz2>{+M@lDfmxC}Cva~!q>ykrb#CTbSS2|kRYqMkd_DiZh!4a%%IN9s5pk4nsjhZE{@d# znYeKn%QJ>C?2H$X>u_F=omTxhXDo>-m6szzfpWG57JF%T2M+AI_)a@to%v1YpQFj4rSI!!(Zn5{UD*^Xk#0XBq-nC4%{`7BN^wh-1LkY-a z;={uH)BJQg&(BxJSfyZx(%kxo+lg?ZzrysmlCFt$OK|t*wuI+P#I*-PUH-QWC_<=idwt8I?k$t}N1K1b7g zpMKGs_^dO9gRIL9_3;I#2#r(^O_jY1@i82N?anG7&8ThuT}{goNKdK*0{aFZaFfXEH%% zYBk85A{+lO6Is18ZgbP`F$VuI{bKp64!2M61CNHX?J8DkQke0*2bcE)2x6zk=N+dd zoK`ai-F~yJzh*AC8%LaeP!3gsQ^${#JiLajZl|_PvU{(chpRmvSCjbQG6ge`6qx^El2)# z4d|1NbX|6B3`~i9o-H(GF!AlSt+;fBmnG`#`J4C;XP}#$W$*+Nb(U>F#9Wr7O;cE6 zt=80c^Zi&{BRNm@uvIHG^)MA8_Ftx#HWmo~UnHwj^kHck#J$+Em#h25ZRYRvPCY%7 zBshu_wZ=0en#W7>RnOJ#^T!3VDQd6ip3xh;)1~E?*&$^g+XXBk z_=>)6^ctP}^JIMEYf*M@W73|BYM;QK0tlx97sjOwy0reN9jul{4U>Ifuw0bmzSum( z)4~BG_1OTPN4kg0@c{ME4%EQL0Nhm=PMhjPKoau%|KMAyl@} zoG$&bC8|MY?4JQOJ}4Ja$t~Rd0p?c0cfJJO4(o`uiW-pUe8GlE-D`&Q{esh{V7pJG zYfDoG9dl&rCYMo9@PjS(lqP(umLc zR>FBM7b!tK#HCC^dn>HC91-x*$M4enh6}&>8CEM^1nPQe#!>3nC{=BzZc*WCrz0OU z_O(=IB6)oRej+1YK0IF3O~dZ|=A+A#Y89-4=3u7tj;HZBuRGsH|L)%&mnz=lvWtAn z5;t%?TNAm|Z;WzHx&hj!k&L4^z0=92Hk3LfUYCF2;q>_vuY@eTcieg%E%)yN_Hp(H zT{st-y$x!>0`TntGxe)5L=$f2c1H0VJ%-ah^v0&f{HJ2@V|?%$4T$4-jf}XYKB~{ z%A>qcUG`$Ghx+)V7pQzd<-mon-?Hl%^9cl@s|Yxu4R?#zl(TYpl#l*}g9AoHMtIZpp~L^%K6#J&|(*Vm=WmE9njwP z2+2PY+5?s?oQSZ1Y;ahdqUR)JJQ`z4r$OgYJLotp%eA|Ftb#6KO z*~&%OD?7GBGUSdaW{wKlnS~pB-|*}|=h7;PaYoqe=b6E8vhDNnd26hdbmw`pP%EHn}&pMEr?jea(xP8Iy)L zt+iExV3C84w1AAhh|X7eQBCa;B-&Cc>3Y4p?^G`9;PJT4sn1!O`d5_eOL3CL+sgRs>(i-RGOl0*OE8R5;v^%Gy+C) z8TA16(Ks=>YsH4tiFz)nwAo!|sydASI@dzF*eY+Bsdw7a*HoO?rUT7JGpnoK$D>w7 zZSSVWS}diRi67Yuj=A&EYYpFbP*3!_n?lb~~S)WJu z2db|jfXiuJ@q4bU5-Ea3l77X87lmeWDa+NCSx5W}$3Re2)f{>cqWO=b*)Q{LbG!DXVnR` z6tC1jLd%Ot`+c=b;?v!qdc25(h^yKUJGe^N?OmsrriOk^>sc;bye*6xPT%pu4+Ej& zZ`gPVj%d0;4K*fSNw;pee_~YjZ~_n|i#SFYx(K6_^UOj_CE8Bzs>Y@=AIe*a(sM6jQ89Vs$%$wHC7(e{ntVWCylt*|xNpIZqw^uFC zjLSVLvY%^GltN$!HTDhB8zmGxE@kgjZP3a^hWPnV_uPxxJ@fC-@vAe-lL#NBdK#P& z=a}CDh#F&N)N6OHsH)7yyOj}pjhNB=VV>tsOSEwbo`)_ak*JdztTT)lfr(Z)Az<-6wa$zVU&jjSUKi>*zjcK%+V0*=BX+4^(l4$39wbFxE^=BlO$0uOvQS zOEQ+&yZtC<$jpDLdA?|eish7YOyG^g8ZLm1_Mf5;QRAXY7*VYpRy6zrwli`??v87+3*!!m|`3O6t!Paq2ci8D^iU#6psd1*qy=C zv}{&$jkX3d6u^o7gjb99m4|_75pfL(Yrx>z>yj43wL*CBGSzS*^X~cUT19x?aEbZp zb2eNGM23rqxFaS=pJ(YirSm0;%Qs<2hXSpSxehfB3#b&pn+vZaqoxd!M@G_`x4 ziImiU8=BXwguN{NtxH~`%6Q0H-0aXYQG^xJvgqQ$SSea=Ef3hSn&aJT%rR}?W&fA zQCcptYE{ScVl4cJi1o#aM=E`JiL6dwW5xYmH^dTWI;Tn}XR}L8J1x}O+VRp%R^qC= zKLG)%;ct-GP3FU;8Ae!WTa&|-@5l46xhQQmAb zIbqg2C<*l}i&}D!6I&!^ad3NzcHQ3di_{f1BhU2!x^xGBlmk%}hE&y&<(62CcNa@q zM)U)+%`vRQq5h;es;uSCYB}g0&a$nt%8`4syx$nIS>BEm;qsm=MS@RbN_%}O(p3W@ zSFiPJTg|U$EmI5n`0-Lt9$ISh3NPRUxl%&G{E$nLlj#s+yhG4UUFk4v7wP%K{i*@9 zu=ur?z?Jp!p#{giC|5`Ime?6Ft_S#@Ad4e~i=S3j=d`@>NY@<`XH7jhby5QhS>rMb z6R>`-90T|S^d&~V`58D)`rA&SG~4?o3;Sr^_`3^_ONJW>2gFG zzW^TzWJsk3z0*@)ef4clq@Mg(|^z)PoGY8O@~#i7>}QA1|%-Zc+Xb+ zS#PqIPhcMGi|{uL7!8ztA9o}X7E@#Scr#8}yx0ie$YhvN6+d{)UO+)^>H8b}>scnC z@qJZ5tcW@=n#2U5MJ*Ge&Q&X#Eg(t^@kq$0XWl>Tn$2M>bs#M8q{7ukP!@fL_D zHKm}Wm6x{^KvM1YK>QtFJfWKs9`>GGH$_ip#q4aeOiTTkqz7xOd zBD7{anO~G^v7J_Zgh+bogk3~p7`egSxMd<;8U@80EoMfR ziR_#7xCteuGszo|o&a%CH{g*;Ld^=&tBt>*hNLGKq8ed4+j_ z#P~EBx+D;C-IMbQJHt*5iyduxX7ELJW@FJQCmA|5CcgNAzv~iXGp_l{2}?=;ZTILS zY1<>0Moh2`pt@`Q$)OV|-Q4jy1zS8X`8Qr9Pzr}#)8gD_>~fntpPHl$9&<))mQx1z zz{O7vuW3nxC$GENQuidd5rq9Md^t8u3a(PV?3FGPPYwh_-U@CRrtgxxC>(sj=gPHz zOPIQxrPRW2iO}1vsR=^8_YPS?#;dyr?0_Y(75K(!MTADo9!0;2mA6SR9$D;$=TZ5Z zap?Wz+_tdc<*-=R;9T0`Ym5lK;?hscFIS1Z?5NVs+qTiK`~AD%gT7RESku+Vk6`En zKsN{JT1(dE{Ra0YebwM1H7$5dCiXdXKws%>5s-3eT?AxvziP69-#+@Iax3HIRe;5+ z?u{lpPu+@%$(ti~?rU+}?P-O@lFml9FEt?PFuTY&zVK6&9&(U!;IO(jGOP=~yf zO;4~nNXgyyG+$gV5*90KF4WKAevuP;`Kf?uC$GUgDiXMj8DE+C0ok$i-U|W=8l_*a zY0uW%=KJ?NMlX5%YG(Qt&zdmIwWB%>EFf?Nu+M&AKOf3~zV8Wamo=PjMa=DDM&`Q; zPXwMDR#+yiuo*@W=qLxLJa(GKN*mN64huhU(VQ|(rnf{O;_Qd_V@^qy5LjW6H_uG* zV-it4EO8D-<&wn_!fsj?GY;)jgFn8sR7uaU_d8}O+i2_QM{E_pZkHVXAMhCJJV}Cu zNq5vhtM?QF+Vkd(%G}g65+`BTGXCDt*)@fpsCYAR_voXTQ*I5&D|`-zsiZ-oB7A^z z|3fueJEi9CI3T`W;LVjMoG6!HajGUjsBho4N_Kn@qOyf+H@GWOy!ZT9WQ5ugEsZ2P8u?ctd1Nkv8kc?OrX<8 z)k1qq_8CONC6M&fqQPFz`1g7r@L@aPjcu6?NGyJOs+7a7l5=;)q}F7KZTXr;w6Fa4 z8D>c(TXEFq;%r0p2yYctkwx>et#k;wqsx@Xg&*5M82Q$Jdp-606WI|gRQEH#l>2X+ zcyUgXg#7;^7hG7LuN&Wn_uMoH&L6?y-nR@G#x2gM?a~&CJf|PM${Xn?9%rigW2J}h z=X4Oz5KOT1cAFRvOx?`WqttSFB8=x4{U>;i5yav`?;J!qDchmBHnf*pBj!k*=ALT> z@@%sMq#jpZvt~iZyE*U^b|Xt8{shaVmE&WrlGb$o&y$iE_xYsRR4D+;;^c&TVU2HT zUpKXGZ^c5zdwjgTFa?~q>_iG+Rb3W?(7fveY?c?J=kHzGF`rCVq7P(Gra`BSzATt& zaY*ciU5ZndF?i&nQ->q9ZTV_{e{^xyf8LsShxHy$vu^Lpa&}@uX`f?O-!MsW-AujLK`nTfV%;gdrVtw?dw6fj*>FhkC z;r`bx9!Zc8BoQRh5+z#nZj^`-B}%kG7%dUeBYKOFAT!!u8KR8djowF%C{ZVRCpx2d zh8g!K=d5+^i~HtWuPtw^_5JN3VUITRrz-YZDcTQ5(n_n3Id(-0Go?ek#hUUK zxvrg@&B*rZ^+gVU*PPlZdAOu+*a^pMe}JY^BAhhAnv=U zH6yR(aVTJyyx!$ffsaG8`2d_$rshRyYrM}8hg9qIa%?V-DUB(U+h(1|F{VoOX6k4P}-o#15Y89h6CFGx_qTRImWWEVdCz*>I1bt{3q zI_8x$h8ba1qBhN)31^)&tM9^Q=TuUVc8BVT)06D+V3~>9Sibs5Ygz`bYWN!cR;)ge zvsk5NJ&5xxwE&vPUau#(;oI0+Mmq(T+BWg*oqMfW6iZ=Li%E9}tsp|U+rXl_ikXIl z{!}6IF2H3OQzxs5uHfo;w~>y`sDc5Wi;&v?Hh!(WT_D-V0H58?L$mq08`rTq%}u~? zVRY(bdKs%R3zkf%^8m)TMO$Ine_Q}P@B7>nWE8FicPV>b^~TZhhnmI#l52c3QY^6H z#Zs-WuwTy*9k2Zxp83nl$?IQrk1t>h^Q8Mi4iT?cU2ir81AP_Ma#^DVz7MJx<+PL% z_Ikbq;v+==v0>-WCt;c7UB7^_Gq{!4Sn(rK-A=>a++&TH20O#!k+~GzymTZtsK;4; zKA|yjTzVxl$pfdQ!VJ08aw`EQDFUbk-w}>ILkp>GmN(X(+qz}kQo)GoOw^L7;h(ZF z-ZWBn35RgeBdY93Ja}5}K|_6`c9)@mpoI)d;T<=lpGFYFM@#!=ugFe`QlKxL7;(LQ zW+Y+&sDpBpAPQo&Mx#C&w|}P>+lWl9{Qg(z=>GoduP(VeBd6UVaGa+;&E}}npE4O$ zHT&y`jY;HfQNgD-j&Q(Zw1Cnf%PD4r&1Xf`JMLxUurZHP|7=r#qGF%e}Q- z0pLlX2w;l~FTzHxI(_3-szTyKotGI*Jj`OtB~FUsG@W=0I$LJhC$755K+|#6Ys|%S zwK9&$ue0XjqLtH_90he7 z^8RAad@<-vf?Lj?4v_dAaJCW6hu!GgU0YH6xmt{G{cReY7y=}Kj%RXB?T7Q4s(h(N zCNo+t4Vj5YlO?m&1r|xUaYkDA@s%l*mk%9|Mr+2{Meo3IwOD2#+qZtFi4d74a^4co zW`Z?ow4&`LWLreom{`v#TsuKRI;mz+Ih`o0>v<2Z*FOkVod5;V%*D(Hg7H9Gy^%@) z_tlWvq`# z(sS+1bTb|v$r+iM9dRW>VMXB=PUjDFn`4V9H^K~t&er)Gvs^c5^&m;Hz*z!~#cv;n zd3g9;V@6#obE9x9}j;o0?`3`KiEI(F?&bp^xc~T(a+Ap`&wfl#ZNT!I1)?Lm5mby|h zf;0Ep^p`BMNZAG_fjVo7)bk~reH!=E6tX0P5!gk|xWfcn=4pzsa&0l2$BHqKxL(do z6ArKhr5X9n^iZB6RS@aDs#ek9bK<4JmDu05ehEsu(tf(h@}u0qPDbFenvrb(9hlW_ z>pxDHC|s0)xcKmFbJ&blPq&OX(U$MkE5H0SBKD^HuIl1ApyS(fp|eNmt?l1#kRjzK z)gJBl%sWWESk+Su|1-3$E`X5Tz}|Kq4cG6mzg*4d?hT1W0>*1lrx*|=>OJXwH^s9l zW*^blbwKDJ45Dx}Tw&%BwTH~=RlpIXmwxiyPo!}61ycho$Zr2U@)i&wit2mIpxvaw zo3f`}RdvFNoF3>td8Hy|bbMRI%{#NF1gO@{^!kvwMM07~%6@TjR;QeE6`FUuuvlHB z$L-fQ5q8tq$Q>V+L_KHywEbIg#&h@B$d(M0a5zZZssC{Hda>3cGSSu>tSeyZa-%vg_2RPX&?g z)|e3RVPK%=OO}24^gnfBG(Qt|PmEMNLnwcS;`NwtVp_)$AMNwHa7}a?PRkhZ4~=c5 zHv7Zu&wGiT5?ZFeSuGDiS%v#KU3x~trUkx>zN2up!+tnZ;fofarq}0iWWlmMF+fTnd>OQ!i=nX7E6l@yx|A?{k1?3A zeCXT6T*79&QO+6pe1pIAQOyO^(e4n?CmE-+ip$Ttz{}T$E$cImiG^k+f6VGim1}22 z`IxXdcSV2-peA6Z2WJxpyH@9HA$@h7`|sGu4GAut`Ug84R7vu>!5l50Q*H&mv!-9{_5MaLli{BY{o$rREZdT% zFKBrodW2s&Uer5{(z>rSwH{I1I;)b62~7tylfUOO`lQVQmkFytSo{S+jDzd>%ARX+ zjdkwUD_ZIkx_18sf{3#LKoG3~O32(wc6|NUVH*oqV=U{4KGpDv;s10kz!qz`pX^p%%TlML9Zs@rd5KA1>rVcBHbmdn#HyRfNgtu4~o8Ji>K*?Jo zyr;jtHrwoC48Q7psjZL>{pvA6;xx1cI*o-Y(>^yB#_~`ORT#4dOkS?n-kk--(Z86DUJPV>{hS49Z zCsZ;((@ffN^2DH0>9b>_>^fTiObsz>HY6YGxKu-z5{eif$GfHX#E9(t*fw_gMR8dL z0x9mxx4Mv7vi(jBBbnzejel>sIf6+%a;(A->x@(Z?oSo?Z;)EZ-~j~I6O9_AWhP!X zoJs%L)lT5P_yas4n5!n%g&|M-=zF&`4r=d6Ho?!vQUgLeC7e1@>TF##2w0H-)NpFA zFY&Jmig9dd&SseWyDQH#@;vVbG5I)jz&XfcnbGgBs`$qI``vg7yc-u|Ny4*o!EC3l zLnwHm6>;4}y+pH{!=*`#eRQdNVg$ERW>_dn?{dU)2PbbDwj`Y&aVH!&goDGlVAxp; znkN_^XiT>S{yS3<-vXnr9S?coASee-UeFe0LFDJ&U&vSldq^y>xjSW&>o0KUoQie3|Y?%~`uZ)28MP81*7)a3nR1UPjRnICBQGjOY zf~%^Em56?g+fH6Xz1Ql1iy?krMyISV_Uj0q$1rAP_Z)8KrJ3`e(ukh%W&Q{h_K0zj zF>zLR?|WYnI5X6Joa`sHzORB#2cjW4z%qV=l!m=+T0;-+)05P#s+9iy?!DUBMh{O( zy!nQ{tfhbx|Gr4wGlaJ)4E2aEIs?5(xSeDpID*pq4qW|8=YCgPR}mC#D8gx86~5%S@3=9P6p>Xr!m#)2d+?) z+9gIho)q^KvBuDV^47TZoe=$Q_xL&~v^Hd^;RF0aN*$x>tdT{6dr2c#1O3@Gx+@E) z6r56e(&xKba8{#e^xE3|O?`;C5ee7Q2e;%eQ%#-NJ$EM4W?C(|OvUs5mnt7BlcboY zibB!Mkqe!Rz!s|ItxxKN?S7A{3$w-GWqz>bS@O30=~tQ*M1bW^k~NUDq2m%Z#6+2$ zwQO(zZV+P=(6`8H5ZlyB0&;^0u?@HGY>SS07@XN9t02697Z4_}yY{=G7Q%#-Zr>43 zyA(NYxk?I<>^~<@fPk}5{Od~I)9Wezr`=V0qq$Rh@|iuBsqw z`SdV$z3k--JdXY5mT&xkvP)v_nblSPy~Oiby~cU4|10SStIjN?PkZ7+sV!g32ZF4A zhq(7e^hu}`ek z*iNQmEA|}Ffl!`q(cKO8!PV*&d_-9sZl&4|7^>RuX~%isz6=Y}KFG0g)}AkANp?c? zg3uKzZ2b$jxfEuoHnI6Bk8=g))WXp>Tj zvKziFO|fSH8H~gj9!GS4{t9j#D_QMNm>?K&;?{4Ae|iK5NCU~e{l87uH*bRc>_Hks z-DLDHfPl^GBAU=*R(CaZMpNb@!bDYs>5i-acH$38LeQ(>f#cRHhgczTo)=w@M1!C4 z+Kh{(qGQL}LsU9qA8@(sG|O|(ltzd>G#+w22`eQsD$!5cu-#GJ^Q533W!ff>JJFmQLv;c}M3dsX)F z@yef?Enc&|Sh}$PG}z@_8R9g-4qa={395xD@?6^P@98vaTG8*OzR?mnX%5x z)>a-d*YXCpMK9}dwE3a}O|><@-*=vQSupBI5AiEsecE#G>nLPEZX0Q+H_ohe`pb1f zC_@nD8ihZvhH*5#^dKkl$3B1b?>*0m00xDf31-V+rg_)NnIk3l@?gE4jUmXWenC7B zGk53@R=<^-2jRkuU|j;{FmjFn~25 zs<2lWO^4NxOxD-dv`|fmS?l_4dCk3j=%tZe&cp2G=ROxl)7YKsQYN#zp@u$ei$*E{ zg)!ul#wJxcM*Xp)S~Hx(6U41S(~jZq64b!w7-%&ajW~&Q&pAkO+;fZ{=JI9PPJz_t zPim3P-%O|2z0`pXF5xB z6tvbGd8}sAiLQ>;3g7fi2t@~;^5zTm^^0Ve%w2v}^L1Wlpoh>pWH8H0dwO`-@-X#y zQywnfpfKR!GL=m9y{lk;60B{sam z_w7lq1T3w_3ps9kjTe$Ys=t67_9ht2l6k zk5lYq^Cl%6^1@dW{$6=| z>b2_Vfj55<_N|L*761yO=8l-2e7QC(hbyGT-Yh^nc+J4bs4QYz%JqWVT^Q;rZo5~o zv>b)&$Y$|QO6#}5@~`ay_#=srH^+`1>x~~X)gI(K_7<#ff7vw}w|)1EYjdsIG3=wp zx3)j@K|TgsERkh-R2O$9Wfylw8ANZq{G;6T;?Rv!jwjl|n=74-gSKj0(pTF4hIyOD zp2X6*#z6>2koGkQ*;L!YMq4Y*?C^c|_XtDZD zyN+9u2`Bq@&XeSq4`dQ@Ev7c*9Pr7FUK}0MN`F>4QsX^9IQkG;+)bB_g<~Zoz39bO zgr4&eq=(DJ7;zw58urn0Rqo0aVg?nkyl!!S;Hf2a>6C@dQwcR-Nntk^Yl9^l239x| zyJUM>a%=gFTyixLg|i3%gYpfHFpbU!2`^hsuhoHws)Nj+o-RE|< zIRT2*`rfY?KL8kWYG;9z_hSqp3-|vl324jZp1)N%?Q$(StlH@Lnt@I0%mi4LFqzH` zaJ1{CN=D_Ol#J3~Q-FF#oo1HvChv|p0O9(UBI(8|>@9kTG^nfpRc$)x*6NB?i1SfI zdWmE$Z$8wb9{EUXik5UHFQ9>s*wagnFzaT0q_u;{uiL}?>xSo%OGb&x;As@SrWK-3@r4eM;O>#K zb5`INP9^8nTv~AMP!YQmKd0(gz%!rg`TG)?=p)Db+uxzitHr$@ zkHv1pC|$w3dcN(F3n!p`#7g^8o5BIG$+PApfIZ>-P7GhE{xENxLX&B`PsRr~b=PYh z#@xeWtO2)jNK!on)(imZ*}7x-E;mqeCaaE#p{5Uxcn84HyhSe1hzfPJYgPwvgL06| zpGQ3R3JckYHS|fZTJSoI-LFDFA&Q+MHp+dJ8IlbiZIT)-Os_g-nu^j+EZ1i5-rDrG z_C0blC@Hvczt8{5jguRxRJ(J{8UZ)W&STA*gCBbzJPsz3f=fTWK2c!}4x~S3(Ar9( zBImqEc2)2O1I4i>W3s?Mfu6mS;_3J^s)4>1Z6FgW-bqS!1Hh#AvVRacaKsh6S5zuF z(zgI;Fww;3{*fj|`^~&@eU8^uS0ANP=dR%=3;jTmVKl>OR2S%Pv89y6*RS7|zoO~} zONC4S|4?t?WLG;FXbPXP{@2$Z{>8+6M06dvMsg+h*WLgAegyCr{;WQFwX*u2(h%6!v4yWOP4T6hEJk(*Hw&bZxCkPSePJTg*l&FfaYWo>e{(NVU>Lp9Jjtx<2LN&FV<;j6GtM{sRhUye+|SJ^aYF!hMua0<)@#H26^7kHu@g0VGM_c2+aq_MAfSNyx{-) g+=Rc4(w~cKyWS8r<&1!*SAZWCMGbJ#bF+Z|0cWkQ*8l(j literal 0 HcmV?d00001 diff --git a/src/crewai_tools/tools/nl2sql/images/image-5.png b/src/crewai_tools/tools/nl2sql/images/image-5.png new file mode 100644 index 0000000000000000000000000000000000000000..b7d6013dabb306a84987746604fced2606cc3bc3 GIT binary patch literal 66131 zcmeFZdpy(o{{XI(uA&s>t_!CNa=&hFmBgroa_wT6*_QidMk10zlsj{mdziV7O67hX zhOt#{F(Wn`=JK1)Ip5FueLsD^k5B*p9zPzom%ZMv_v`t7US7}p!mk_abMF`2&%wdL zZE*F{O%9HIBkb{;yNG$K|r)rl>?dVT}o$g^jE;|b*;H(js^m<16j`rH|N_WZ}#wu~?TsG~E zMt-^JreSfu^81q}+W6NLCW^!f69wg`@C|h&U-5vq)y3Riu5GLSo)_WTLw}s~{L+WM z4^`N-HjzVCh%jp1p)tb9N9ct{3w0kglWhM$mX)#NIh&r#TG@|l<&7Th3OS{bTkdOD zO^Ck!L0{Q%*5ARsyi1@mE$2Jt$BTziX#+Pun0(l@k3~bib&g^;U%xkAFqn>ikcqm< z8Isu4uBmY8@_5H!LZE|gWg-N7eeV8SozM%pM#j&wT3+(oibr+r4fra)BR?8PA#1q! zTq}Q`+G5(xjiBy$7Tkjx`r!{=?K){~bFQ-|K$-GJ^#CR7y?R|lkE_cagzH0Yon=xv zDI(4c@PwiNhZ%5(Cd1xp$9E1C8{7B6w+$RXAdYkF@m`LdJC1O0vPV1EzZ_u@j$J>; z92}DDcMcBDm{5*A?Dr$=zw7Bc|DN49lFs?}nB&d%g*v8s1_tbR(+6%~Fx=e<;nBZC z`5gzxP9WsgZI9a^BlQOen9MzU#C@=g5A5Og5)Mrtb@mVj_P8hR19O49tNQ>>{=7n+ zJ>LFU_N4gFOFW=}lea{lHdwa`xE65<+9A)Ly)YN1z%FD{jOS7+#cK3yQ-1Cu!yPx{=CV$`O64?EL8|0w} z1OXS{zVE&J2u}~d$&=d~{m;*zaDsgx|Jn)e{?}u%A5eCCMpjPdqU^7hc|aWg!?Nv} zKbQS{u0OZa+`gImb%+nx<<2DtjEz+`Xh1n-Rn4E<{M*dG0R3~NnLF4`4*_E@^Z@>g zEPpNj=j8uh_;btK|JqVsNnZY+d;VkUA4|8NLEXd+!rt!QHWGnynzH})*`e%IypOz}8W(d9`We!YI;vD$za3fSK8(tIM%t7q9kz{Ss8P zv$H}5EU|O1_#=-0{^j(py_oS!|NS=GFs1YF*hzJMfA83Tzs)vGyGC3O{T6KE*CRNB zyehxE-TzydU<0SR=Ql9NzWJNWkF@J7AKrfQ8<=B1N|09y=kKAr|D<-hjkIdf?-BQr zwno?QpcHcsbH5{fJGRA0_;T!T(!|dfx2rYVc2Z z4lHxbj3g?*YSr`ZMD#Bg7rKFp0iKod%X|Azze8q7CNc-zx`Nk55JfackioBTqF<^V zyk{iU!z=KFW*Jo1Nq+|DJM}(Czceo2*A&`)19fpWe@WQ13krDMRcrr-E_gdZwNUv2 z`3zpn?gr(A^31D~C*0jior%Ig|J!NRjv23BTbNtLckc#K>lpyxDkirmS32EhD>AK5 z&+pJ5uiz-60R`KtpZ6L8i%sk<*$4M;Q96Cv2bK>Lxf6M!|OJDA-uH7+dj|A+=gbA@iqC91q z6+gbugG;cgeEr*6tAdKNx(iK^{tRdpZe1ug+r9ho$GLJftJ#AX4a&27ZhgBsVGImG zK9lcg}1D4^SLLq?{_}%H;LvTf+e?o=xHro1nOk~8RJ2=AL%Su7xDWI`yxHH3l&~Z8X5oa< zOx=d=)HPL}Z~ZlLLM#2!@ldmdqH*D<4w(NzdD`bh=7tsRs;1aS$R6UaPWlObar1N= zaz<)4biu0(dh3p_Y8kAMztg@Y#2(>Qo9<)FkZriXu(B7C07aH42EiVffEbS|p@ucq zfW@*?Bblyw%s83L`Qz$7x>bMlz`qR1^wzdpi>+E3 z#xz~q)sAq%0prHaFq6@Z8shSIbKc#tiHU8;Bw?H$BVp95T&{qND!i&O$#z#q%QK?h$2?44+0d|z*5WX~lT zDg^GwDXf0>m`C+eYIg5o=Vwl`fQ>62*Ukt0@G)&{j5{57Cbw{~#IoAvoKdNlzTr|? z*JMJd51ZWfTgp);YOBg`J?Xy7kf`neDt6Dh{0J+DX6SwlGhd=95G}v0#@~ie=nty*nn5|GEY8PSh{7VRW5#k#=2`owcl1 z_0p#|nC|r-D;QCk0Jr6Kw;sBm1O9UXDFq{SPzCbv4xy_U-12e0`~e<#*7g=nz+Ji{ z55ft0WXBA6(w7NVIBQd5rJNARV&t`zvEUXxx+554F1lHg><6mAOPEoE7uE8NvWEZ4 z+qoed)OUQBh-GWE!B|e^aPv39HNK`SFK6<#q-4^HB2~F5M^y6_TMA(&hWOA!4yxxt z>h>-=1DGy^Wranphf8s6I0i)s&`4Z111OrA51gS~5k0_4AZf{m?G*JgnmsU<5qlHg zUa{-yXEk6a2&HaTSvE?48c!X9yk3qcJo#*m-{CGw9iHC0?DnQ|K@N1QZ`Gt5-Ed0X zuo(`s&zs44NSg(e57*b8#lYu*-;BcIVheQ$#>L=wLnVFKw>d@f3yaIk{mE8NNb5q? zX8kHLRheVE3TcI)hmffG-I+Med1XLzE_v4AN$)$X`7J{+uc@J>AT zWg_Gc6=^BL13^M3)__d8j2bZ%z|-;@1kSac0}}?_?pW2EwxuG2$4E@2iIr+_$nECN zEWJX3qo)x$$5`icit6PVsIKA%qYyEQ_52GnutZ5C3d2au{Ash;tkry zxzi@rQ0T-1-k4@mGSM4;swvN-sxI2wj(i#~CS%+|QRs$G+a9|w;>Nw$IZm>x{HD=a z5z0C`36z>yvzbcF1EwkXcX2Wkq1_}-w|DesMSAhv@&r4%DB$^yO0T>-Y0|4{8nscX zgsQdbt(8F#Ix5U~UV`Pd5Pyb-J)H(+9W>bx&s3elok=$RUQXD=-n?e$B~@-m+Zf0D z1#ApgDrA?>CTbeXz8#*8ZgxCYAfRXLuf{F|PqbsQx7P3}%e# zemG7yGAiycQ{tX$Vw-c%EIDkdP}}ZhZn@OD&c>FP z7sLzKbu^^0@?Sbs_qN_FE4?X-g9d5RzTB)kw_+9++G!O0+4Jy5-^QaETo)BpIzgqU z?Hw0Ix8^ZlBGwl>F3FXydp;ROHFb2HIC?XDEFZPcx z6P!i362(77jcN8C_f!ZQdsc


;CPR)pd0t8Q zg6SSZ#T-2Tnj|zD37TA@Q5;&VO5;Dj@$5_v!8S8joVY{52fLa36gp@t_=wClm?Ca+ z6jEZwSUX&NkuMk{byZ5b^ug1)P%nHzh%la4MVgX;_iLXdr`SI}f*0`^3*ftpB!epK z*T&C#D62jZMe{<*n(?EGH@a90a^cvSGD7-8q*mV(rLk$GSDJzh5GFcuwgoKVOg5KnRSd_ zY!$RGx@KNVs`ab@_p_0_9i+fU*+NU3Tw@X)T6scW<24c zzJskwlS?V8n-jKf?OBdyg&$I;2isCDaJOJJb>j0ZDS{?^vo=?6gwJnlUrpp%8ezm1 z&GeF4sHt@sTV@kY(*jZ>0f(c;jkc-v{0u0wXn0!0oRq0Ev zZJk~j@^{_h>Zj(}0F4y23AyciG-LK=53bHj^8QFu6S&r1Al3#!^pNYn)O^g9Sa{My zd9$#%Pr6y~BZ~VhTY}ZdZr`wK?sd_$cg&>Yet@3ZwR!=0BEKUL1D7Ba@IqmFEx6Oh zM5=NIm&CNwcXATj4s+a3rbI4(Ira;mHQjaJ>A=nT8&Vs~bwV5#)`r(xuRFG!m;S)E zvsQr)-~0Fq8jkn)g2r>{(a8Z?O2yA71YbtxhUJ$6{BsYUNE96!XQ)kWr}OqV6!~X4X0xeNUj(8q8$#**Q>Qi z=;f)*kN)I_CvF2G&5{jIoe+M9Sl=z09ml%7zeb0j6-u^focj)aEL7=O;AMwtEz=ZN8#@M-Q> zCMf3ZR8V$L?eMy*onHGsxs`X#VXe%xP3=l?jbi9@+U_L~-(9t@A8lO=(>&{X7%QS; z(*8qo`L=}~q-}I$z89H@C8uV_4#a5OdR1b*0iWH7sc{94VmhK92!2x@-X)3}f2mNx zH4Fh*CZ~}OC6e;pKoZSTAJtDs5m|Y)bvj-n^DfL9-Y8y5G%{rKDvyUt{6hO$v-$vwZEo zr220SfurjZzN71&pfHTo#O62>)bGbiBP2C5aUyGJ_;UZ7PHGa}W)g59tc+OOeRWOc zsSu`+NWTb>wL@8)`vQtz6&33or>UgFOSO32O&#%uMIgtdy8%M>dF{6Arc3%Tbt=nt zFD`OT6y4*70RhiqQvAoJ{o5BIb#=uHzUjWG((nudCz<(}SW=DuskENl4jVQ6h8=?o zIP=(9T&%>ShSKlOntD9~^;$l{ZgRjx`zTNBkk>$nQBOhwl zFMA6ncsKM--Asl;_T_;eDz(agA!~eSE?@9Qvt|rK0(c2SVZGa$pS1-I)h`8wqgC9^Uc#r*(to7 z++96sCzUP^NK(ffmNP}Xlouz5ebGW9onb~H82$D|08rh<5c?>zL)}EJ&hEZyQ^(|~ zNXlX95$Dj(vHM*SE@$oF?7b)yO0KU=eSo#%96?9HWwuf5TwGYAhDjRoZvHC^?g5tvaD#~D27;yvmqj>lRLeDF)yO7Nyta+7lES@9uM3?pvTqbT9VhIif z_8;||U$G_Y&TC0{YAkYYfWIZ;_+S-V5QNuo-&#&S(gnNBU1Nm4yu3=3WZoglv@Z`Z z)m)=ad{C#YT2go&R~Xd`VpX6&xu1oVqO-YQgj=n&vbbS^8qKt}$sHaHd&Re8eQY+a zxT{i{GMMwigF03+BAM7NF>ZW5Ms#d{Ce=U6q=o!wK8lR_qRXdUWDv&GoNl~wCU?1! zD%3tP*_gYN>Q6o(LFkPh=eud#e>Ez+$8E_Kjdxgy8}FrKKx~B=4ZYv8EIxsY9(+# z@n@u5DeO+ZTi{qO_c3^A$Z!R!aouh6vX`qRLO(t!%?LwKPPScXa`XLSg}A0eeIZrOe6TFVlkgqo!6E7dy8l#qF}$1 zhRc>YdUxTZq`BR)*?=uMJHhW-Af{}!YVRquv21e1WjJodH?>Hi*rr^M-hO0bppok= ziS}f!sdTXBlq07|7AjYCtcoV-8|#ySrtl zY>)4DvQb%-Q|p@PefahH&EdN{U+dm{>e(VUPz&19E9=-K2qvnaa2VydC%Iu}8DOYea#QL)_?5I`WS^3GCsSp1oS^~43?^J3Kw_V@jspBOAQ zJD_mX^=_0y2E3s;ulwn(FX86aWCXe9JNf2)Kezg26UWv!d`QWWtS!&^0ducgyM<=z z4^?XRpYZaUNzSJ{FR40GQcv;96>D?1jhlL_Rvpg65$3GpZuKw*(W)BIG7 ze^j@OyT5;T?=|;!BK{k548`#WNq524S02x-(a;CZ>2c z&g(qveoo`V-I?vMROrXqq}`rb7q}0rXvT#OQI`j#q8hg7!x?7D_NC>)g`qFR5TRIa zPHh1rvbek5KFSSq$^=r>Nfd+ywyNPd`$)k{5fsNZBloC*{z3~4wP}S@e0mqB-R4(F z0{#nH8J=}>P(B!*uT-mM4V<_-C%F1%pU(hk!=*3138u6z{t=FkE}d7%ihIB*MyW6& zPk#(dro?R4{iv0LG@Wuh(BMv)p>39}>Mjk{yV{|MZ0l^XMlzIbojIVHSoi`f4X-J2 zHd~lHU0^-xPwZF85->-RswYzdTi1@&O+Xt}aE9oY`zNUPcP`D5D;t`nCJ^sn6g)D3 znjRD|wa^yZC3IS(F2}D5xt_OhI!(@nh$0_oAoTW(k6T5;q86Y|YTPJv1@(t?^AruW z4B*i%36~%QM>8KIh@PzI+Ev>^7aUtQ(cQR@apNq(`|047w5*0es4}B_r+@0O=r8Os zaiez{J@A&0637{d_-(A>U88Btr!C*=+t)e-2F3~lI^h;2vLuKUxPM>%qqyQJ!%txd zv^03?!2OLXdxk^eMWt1+PE`Okiz7{EypK{*TR@0yXfj$5TB{%x*mM3^OSpGqj|R+V zqtz^2Tw~1T>vOtnV#IW=A9WujO1e%z>hT{f#_`fDD7XEOq<>{_DLlTTVwnr6tg(^AEm@gX(e<*^GXNSPjLu3yWo}9>TNoV7IpB z&`UaAwMCxEaq8DT^la|F3uMTUj zf9D~^co*(;C(G-ds0;Ec2Bp7DUQ47r*Pxq~?aS^TPSzKS0^B-+& zgX@*pCQUeTap;=w_>5Wj_~xisgl9!c4BO@|@b^RxfqO*=nAR+APLTC-;()Rs@Fw0m2_rGrBx(c3~KPdQ4k zq^*t`%C|43GUfLA%C2k{&kSnK({x%qn@J{PO9@y0JhNIX>D$XZo1$b%$)Xt{!sw0+tvsz)`!5s>CHu)bWigUO)y5Qkbp3w3 zUn*cm)V(mC#&@!V6C1jtisZsTyR-2LqFpY1;tm_$tI~Xv;db@s1GiQ+%Dd~C?gs3$ z@uT0}EAOy_DqX$+C*8G5)Y_@mcON=L9n7UWM5E`&zd@-;M1SqUR#h}Q@UB6s8GTy< zT+aA&E81NZ?VRS~ruf73z@fJ(K&4N((9>1Mn+wVMoR47XHr+u_mNwhN!i7|q=Qa$% zX00++dlw*N+B^P>9h!TFvH`jc>$e$?4vb0CNB7|xAc?LEY%4BVvlz{PI46E9#!nPR zmK&(6fM2f{U%kK&OPLO5M^3ezhiz+3uHcQbF z+8Y@uj6>ua$E5|9@s$1pK4TgA=IYqu&~>Wg!s&qP0@81ip0aEo2B#@PQ~t&GBlG^F z({9rGG*)i)bkMvCJ?V|_xGj$^`GDmWr)O3wV~q=yZN;%sl~c5u`0B|JBi8YddIA5f zK@TD?b5(qzc08_0@mGn5alRfg)Mw*?&q=utIyO z3*{_-%6F4YNt=`FF0o$pnvDRF8AGV=ly{3W?~h|U=apAeL^Xln;vG*%DDV0TNH;Ow zC0<>@A#|?9Lc^Pl6%;}kL(^6e8N{Z z>P^iE1<#P40USGoj;WQI355!r;q=QgGUHqIpaF6-;3F8RfU@fWTg7VOUq#21DV`JG zWTWC+9dq`?R1-g@vQwyRugVv*IWM5#I`8gEOo3_GK?%}bxEt+ry^W|_m_ueYO}i=y zul;EumQa8{w=T`8!TJ5#0n={IS`tWNP&sfE`gF|J4n@1ry|-_zgU95lmjxEusJV%+ z%3ST)N2Y1F&sYy#^W&c7J*LK7^}OlYbLm-sv~E|bbqT`qMKwDnTcUF&^m^@ z=|?HKW@g>@=ga*;0vKl*&J>b{P=PltdivvLu*OxLF!RHo?&DdWHmz({W*ulXwE&bC zGa)A5H=3u_*%|>_=T9bwD=$UG+jOjN>5!in8?vUfL zdwya2yt8{(sIReccSIzzGMAK$5uD#+KEn5$`?-+3+_4Eyc7E!Pm6C@`1b_ZLTcyCd z$b8wZ&%#90+C&LEnB}bh%R!`*ex2&9Yi`AAAC;XeyxgvI_pX)MJCkO3zAeooxC=MZ zW~tEeDqtph*mT%ZVOKh+fm*go_k7AXlnZDnwtKmM-o2__8Ng1ptFcU*z?3!l)oEPB zncg}ZEH@yIJ2hoi$6>YJ{;hpD9*M3yFQm=y=Reor5_JG+)E%OJsW{g)@7VCbR76}7 zvMgNrncUY0RHTtqYNF`V)d<1J-2~rDo7i2f`Z(m94G3=^# zd7AR$>khBVlph(J@-%c|mM=M+?Qh-}iP0y@cg);S>Oyr@ZOk(o1{;`Hqzn5-V_~yH zxED^3K)zR^RK0MhlIPkf~mOcoh4H0G>*)j2}LFA4dE9r>CG|0@JY!+}yNM zVi?WE-63GM{qpv?RoB*JC7$j=VpxhsjWrq5@h8jAja-R%dQ&IE_q2-Z{j~-SW<^%! zquV$0$rIJ<;@2T(9)k! zvLV*`dXXkq)rLpR2<6dkOP=WV%9PrXG#F}>rcK+nV-TYh9hrsQ(o&~C6(`RVnU<4- z&CNe8nNc_0+;PE>lq`urXf5n1%b(Y6%(eZV;4h>ieQ{s5erNxUMrS~M|3#zwva~05 zd!`S3oIG$7K(4{*7TK07tQ`rYjQ}7yXs5?Y>R9MmK*z&!^wz2Ygz{`Un=xJTy{Gua z(IoA}h(>iC%Sy=MVmBmtsQ&Z8rrGR>MtNJ{kHvdSIpj2%`O+31JQ`(Hg%R1^@ljgp<2!1iZlUrA`gg2ztJfW8KV)B^RR_ywwEv{qR>Xts z?6`MF(GH*3$>qfvh0Lt#Q&*t8-?L4ThINnm-8y-UuhvD@FM6!?d{+NxjF!k}G8iwE z_NY#@OHU+(R?jT5CR8L_L-_n859KgUuT9ZCmkMQZab8YVSVrxq?8yOkc6?hR^4`2j zJ?clAE>OBS_1L<2oE^kEr~KA3&q@IEifN;K;V?amw!!5Uy(O>b80E3E*|P?b#tUUb zFMc{IE(h{PAsaH&Hg@BYt{bd#tGRLzuwt#}5-#C8m@N7P6V}^-as34M-Ta!97!f?h z4&6%I-@U@qTBoIjdiPMIqX|S<@_jq+p($HDJkuCa(e$bkKDdn?~@$RHL zihC-<*VrQ!V~z4EmHt`f({CO&GRQ_JTYIn*3vBJd+SVQhe`*hP_;y@(TUfp;oW#XL zm?szo8d34eRMW;hR9^XXoA;)1 z!@PS<9ADPzfO0reG{lw<7UWvGr>0)O!*o|_8ZSJp-4F4%4Bg!k9VjC(8Ry>U+^U3ZR{_DBnUUH?s3UTRQ6<^-x|9pS7UgYc)`<#L^HoJIDYpuhi z2C@O2%g3Vyf&Elp-M~fLGUi=+&btr=zxIuGnPVa)z0}JKlUngD>nDVcu-exVANom4 z+KX%=eWLnyiySgn5k`b64g@CV`vjazUfuRixUt%8!6)NL@~!httrtDYN76H0%LZyOpiMVKX<&iMpcynwGv@!7M*L= zUG3>k3o&YDGv3|KI*mS3tRROvHMR*n`GMN^J_QZvWLZfMCWw!$U2`R}?Q;1rjz0B3 zS})VAu$D``o<2?gT0D|`S5xHVK(1UKJqGD$_lH&IkBX#gMu?ZpF=Y+8RN1QS9LL}% zdXn3wz-P(LnKC^S(z zoo!rsB`4XATS5cS(5KR}r=t$>l;Q>>)#XkchcaIuXVk=HSW>-Qw-X;@c^VzbU5*E? z6DF0Pq-aPGRIJh(KI67R`inix)_9C$RNl9*$tp;rE!<54`MQjjYYPD7k;>gKaso{l znB*EU^eX!_)ahDZ=_$GcwD%+81Ctsaj*`LSYjm-<_MJ!FGZx(MBk|Dq+h@* z3I3cLpbDlrAVUWvEbUhG&b{emu1qYc+@0bXbpdQ`MtENi%)VjzjGYasi3}A+N4q&Y zTfy4(+FA)H8G@TL)Eh+kmbOQNaH*=Jnh0cCA2Aw+fWne5?W8zJKQzOKk zA)3M!xezlbNHjE<3QIp@Y~roK6pe0NpP*v=)yKJprBiO4W0Kad+`p|~m%(VcX<<2e zaC~(>IoW=SY(Dt<^0QP~WxbJc{<5n zyUwr89wrChJ98dVYAI^CIlApLXLMa3R2r}#OEt<7QaipD(+}uUXWBr5&$Id0O7r$P zSN_a~D1IzP^BDnJ=+~A56zZ3UEuyXp0q=zMp%!`CFX;3@TiC&)L#+ybd;tnP=5_w* z7Xy%6M~3UwkG;xl$LI^Q(_J)n5_4grpPlvae5!L*6o$?X(9_5PAipc&ut)C~vuHJ; zLrZ!~P9(6ykhThr*7jL2czule&6t(8r`FE#>%M}Rud1qzyWT9J~)S^HA#9`#H!(H0qTR8Q`9rAhH+xY`Z1`K$%gK5tc61z zf;)gYq^EHNI^7CGKas zV~Q9lA=>@6uI45R~yf8tg z=H42~D^C)_e~*cAT)6>ni`xC@hUhBd;;%A&(PBYKS@q?w(8|ItpX0IfQzI3xL3@}M zZM6k=s~M@pp-P=AzL2};Xg6FJq99iM@A^u#GcIQnraI0@#Mrs0%koq1g6@JW*`@=0Ijz|A^?Zv&}3u&A-G z3kVc`SF3bvYOcN_cY-@R{5BAFypm#vbTiA84qWZET52+B_(LJh7HqM*arUjQaS=E(WbA}7~U+fxfV*c<$p8yhaKb0b`QD7fscu+5(B5p=#6&*dc zVSl>VSui9Acd5E*YJi^OGg_N-jQ1j66+8V`+?AzeEzUD4ud#Ufj>GaH5zW(x#7CE? zIdA{s_zyDq&=^tT9(bzsB=V-+6}aq}6nX1o#L^lKTA@7MY=JwCz+rSSW82Ek@2(+m zR5udxG?@WZXVz4P+g?;47859royd(kfvvi(cZnVKiL?xMv}i;n3Msu@(+$R!m8@Qe zlG)jAy@fqw;Fg*%HGGyShzsoW^Q$as-}~`E>p1l1p)VTY)rGnlU){jpS+4K-HwLZLlHLZf1U4h)dbE?zBSv2 z?X<*5tEwL8>taJ|N)IwzT53Y6M5je<)|N+z<+wv3MeG2@^|tDR2YaP_e^vfJYmlDk zA=^f^D)%dRT9>>N-)>d9>9DtrztiaM$h}^>riHG(BW_g)F!sDW5o#IR+w`g~)fnx8 zJwzKea!5|Qbgm%ONJuq1Yjky8Efa@^^nFR9L~dAh*@;Vf#Vy*kPjzO$qjNZzI>_Gf z@_i7n3;^mENOOb7vh;&IdN`!T-%Sj};xSD&{t;>R1vg7asFw$Bbj`-YD}8 zBO1Pt%OG~edOG02Zf~G5(hUIg3t-L>fj25Er~GaLLQ4FKxu1Brkgr){j6caND2P3( z3o243ynI*zCNKnNbM^LmQe%b9F{2n?D@fv8p&+&tyQE8C7mKhfE;KV>i=ER@sym{i z$gtT%-8cjpC+ zL>YQ(LwP_Q!UUCwBc=|IQ{i>K~s)tv(x_by>yUSQBFxsCH)@-PA z?|d)mM~T+geSCAaNRu1B`NyA#Khh4~|NJtT`g&?$B+$`2Cn@r$+YmU;x4 z;g`zY2Y(SW{<~5}G36Z+{{422%W!=n{epA+ab?Dr_j=_>_a`o+Z!OO8h`?1LjOTw} zJ)v{ALvAcbkMQ`C0n&Rup8J!=Osfpf{Hjc*MX6VL4aVruWZm61J&vGL=XwIK@0rn3 zA1)mG+NLM@)Q@_&nCHo<|H1D6?oRk&d&I|tYXld6=pyX$FLFEov9qTIl4e+NLP5q6uF5_Q~=@+;NzA3t+D zD&8ZbuU-D1S>YEHQ-ra53X>R%Ii7zD#D9eqwf($K@u$Rx|2}*EE3zeF*+d4hR{GTc z4*~uMegDK+kT4tb?T4f1ehWa56`RPpD1V5^{~@FQ#?ji5Y|KyYz3TN_0Om@wiHrv} zU=ICPX!whO;`(gNyX`uy@mm1?|Kjyebp3x zWEDxh{*}e>Z({Q|D>|+31j+ZzuXjhiWEb5YSt}G+U?0#yT}?6~R)(xD*byWdn{U+C zcK*TZ{#>xajBUT|#M_oG*krjQ`Mo6#QWcS^Q5E+=24+8<>o#1&)Gj7g9Qpg^|8duU zO}sG)O4qK7@^`?buxf7H02~rgzd#USdaE6k&PRve%c{Ft)gUR}o{b}(pGZ|pri{A& z*}n&`XZP=^(-fa@m(-x1rTCB@y*cY`yZQLlg|Wnh1(mnK{;Wkv_)J@H*d)afAg#eT z9XudYb1$WSE4GP`a_E+Dj1ZZ5t9XaI-W87+h(%=dSx4q;_(n|gXZ~7$lKJYm2o1d1 z@xto{eWc*Bg4%eN-b8)(i#Sq~`n8Iw&P>l_i-ngKTMgIJ>K*S{^4b_ybkk>7EYfvfU3du?|X zcUq71evFVR)(}&&^iE>fB5hU&KG%A=xAqxsPtN^7>em>@>cXG%i0F6!-Uc0mMqg)`A@jI+(|=C#dG71>S`mb;`q$##V?5 z_DdMxz!F{f_8%(1i)I0g;S=)gQXdEfMU0J0{J*TSf3d@#Bk_nw-sxiy0iF_m`#cuR zF_cl%t*t@*;qpDZ#b2$LjQ~Q*FMnlem=@9-orT6{X@8 zC~a`znvq4Zb>+jlTM4#QMX;bOy3y<$Xd$FBM}p6^dE(3FwX;Ojb#*Vp-8RY!%~ZAs zfDT|-bnmEQetYlq%$rHc`!P;AoK4T>NahkbIp7<;VjD{#MLJ%+Tn^vbh?4C>Tb9y$ z&NUIo=c6FMp%KKBA6cZQ9@<)cDI3{|6@!q=Ubn0j6&^WYm5^Q#5UhSxKw*~?ks%Fc z<)X?Vlkj2egTPk?Vf(3fLjwk~^*;080#Vj;m5@1x&4cQRSBc)>?!5DkTB!$13NOC5 z22a_+3@A!H2f-say9~@l+?&@)>adGj4DifwF^n>G^Lq5@!XEL8rE4?JI}e5#&B6O0uQYJ1-`ol}=7}FJmVUa*|2@{Jf|T1?vbb~{w?&m} zrX(T-+S>!qzI+|w-J-*npz=w^-PCcnETm%|3LWECmFr74#eNbQGhRYvR%nba9e30{i`S_!}NbS$! zGgm!%ZFcA%^5kOhe`S39DN^=_f9%I-Eb4Jy3QjK1VFzDqJLL&zTKHGDpnW}*w3p1c z`}&fWI|NtD_SN_8Fl-hko5GPUmJW=M>+HB@_-4h-=X{g4Hg<>R@S+qmhMkXVFL`D= ztyyJ0Uq74U1a$v4l#iOglao(g*6RyP9=PWm=-UL9XMLi#yf=g=IqiJ+`LtJsKWXsN zbyBOqp=Vxo0W-#{7+h>cra#>aFy#N4@!vR6DD_QENGOH_{dbTn_hAM$GEK$Qj^FZ{x)>qu^sO z!HWeIiiP_x^n*<{oNt_LG0to6yohyobcwFY14yT8>19ixXdP-F*Sfw_w9Hz2}7i&w3wNDQC>;e5M&Ev}KwR!w<7ol7nC4 zshMG#7P5PlovCtEC=dFq-W4z7_9@0e6%RKR*WfviCZNO0AYYeAbX;@l46*8Zi9Dbj z(v6^MRK`UGc-Ggc`}nndytUxzC1rWVj!*k`e;y9w?zhu=!}weeyl2H@Vc>u%Wscpr zXI$rU`>1^$v2pQMxxP+n*THx7E9b{AQOC0N8sQ{MQE7Er%qE5etpWItRa`h z$nIiLl&|uD1#)iF=XcztTjE~nW7(#D?QA-}tH|3#yu6i=VG5h=IiplmXydgg@Hy#j z`95~jKyBlCdeL^l{(@R`f^1ranq?#5;2x{{p|S&k@B&2udU$eejnOUyrZ8@?PZ7tY zM17iUa&m)Mor%aSJvXc?n&093&18(NWh6-bDCLvn)gDQac~7UfN7!J8y?0bV0fBbj z$}mgL#Zix*v)wlH8dSz@i>ZV88V_A8W)^lyhypK^mlOzSOE+-~_j?t&`(;L0g)Qa^ ze1Oht%8d9^)^JNHctdhrWxm8c&n(-|n~cB|=_<{Kq0c+_R^yk)jJlte;M<-M!CAw3 zAeiUV`%=_G^(c=UC`|LPQtW@wcfS&BZC&vJk=r8TVm0?%FrVTok>IWa6m}_5o?kVk zM@fmE2UqPlXy~`9B6-=@<6A?rW2<4HZQ5~pOz=$j+yfO8#ir8Son@Oe!jay_j~6^9b8>A*vQj{@>-TCje-x|a zq{9xZ23E+tQl`dUXp+qFdqx>6y=Ymos(AeeQ9x4)=`9kCB&|Okdp$??zL?aNa=Xqx z%t;w0lV2Bfb4Xgn+G22l5*aJf!5-J3= zuV%|Nd&HNj?T8r36+^9^IqSajqG89w+KMz=>1IbdJ#R<&+QLAyMcWo?S!uXR>%@dA zyV??Su3ia%S|M$o!UrE?-r8+FsE_R1@4)pt7TA%e$DCB!=o8B9K+@q6e537!Jv<`p zV=13BD$hwZW(|FDLZR654U@1wYvyLw-g^Eo|fB&;^TJoyVmUJTSj-(Mab*SE|kJ3%M#`jDJSFxoCpd&PtF$=cdY942aKNzo}>h- zo?;hLyEO(*8g5&$3M!xusEKc2qxVbez)|uMoFJTdJq|l&AaL;clm^pCM98N4-fEYV zP2&!c>_W8w<>HH;gO5x=HJIl4D2v}vDdNf6sX<+TK!tm2zC^5M(K^-NO@## zyt2yjcQP<(Aj82sA-%F1@@3`LkA$f^8}KomyBi?D0d>r?XSZGaa@*s$BB@DOA8L$E zZ+-ZH0w6ahRK~_-=b+15(sg_x8l>(rHydN$pkth#ToPq!QHNgh5zzz=h;ql_)3G^b zIQ$H6B~?AwFWPFAtu?wj^OkSF&XKBhS;l|$ZTt}to8#L)k)Nf9519TI*q#!P5A*4a zv)~l0v}j81_P;iG!C-gK@^`yGP%M7Qa{DxWq<2neL#mGh8Z*|lWm3@5!^oya|DHv> zjTABEAT!sSVI}#GWX2l$4xVu~ZJvi^r<akj7dtdzS3+E+w$P?9(mv-_PBlHS${Kl zQn(1q&1Z5(GSjcH!??Kv z{!jbCfAQN4%|vbEMTd7|VoCDzFn+JS#3#d z1SY{cM?!{h{F#$rXrTpb-FUE`GMQl(f3{zA4qEunAV?dQo*~kPrJ}5-?wAw?mJ$ZC zt=3h%wF8ga8nz86(ioj&op4@;Z^`pO~hKuTZqv_vdyzIFaJFuB_iROs2Gj@pum z(Y;=8S~{NsLPwH9(sFejyv|vzG+*x(N##78E@{=;kpXmA2zZP8`j*sH&03IlF5ptx zx_gnwdjZa~YEoY}%YNmqRwZ2LnXP=9xcN2T+6?E(HwGD>m*uX8s83QjR^^4yvw?l) zG7)=rVbSw+M%j$MV>NdJ9BYaGt4-RCDJD}TzAVTg?y5-P2F719kJBa^x@i=O?$c@K zTQ1{B7t=%)@leZrG|?3aar$~^A&wgK+)7EEliq)ugU*sY%og1f*K6|{|IE*$9Ytw+vUS0VTuIeC09>KV*5W~*EOX3Eqf zXy;?%aXQ~CHrg$)7${`wTZ}#S(*BvW*KALGmNgwrwz7wLy@>nhI*%&M)CpZ|5qRM2 zSD}&r-&n{0(|iBdZ`}aRoc=R?zMt}d%sQ)%gIxQbGIFTxW<%cHSKhT+HykUi*k?E1 zmk)HcUT)qerb{*%$l>Zpq#pvmv{qQ}8|vvwf#~K1DOlwurMn_YaXq^&3OY8%K7I3? zvttmKU$pa}%Ur`?$SQW$$Pd=I%IfVF)I~JTmul@D_XH+_os@It}kJjRGEJOsT#xN<=YIo{QU-Djlb)i7;vr*nv825|IG+B|1s0ZTjE^I>o-F`V_ z`$fTVD@+v2^geBMGOy^^;2pj%!2X-%jAd@>M6POfn}5mOd0O&R$agkZ@9b&2xr)!n zWxjhej*!5)w7|F6#!JE8THuo zuqqE1Dhpjtj2J_i@rODMunJtKI~M@fRv|8Ijv+FubsV!1h6=(~vw5lY0%p=f^^T#~ zjU)0HK#z7qpyAlqWvIf(wK+GlTH*c&o~u3|D5`-_V-i(AVm;u?G_5htVy%6Q?Q%`m zrM$Ua(Pc%!UXEnTHZY7EaqpWCJ!wA2NY&#jcc_p3zv++vZ+#N^=EdLx ze|@T*gPHsfV(@%|d@uU{QkMDOF9d`TI`(VX4Srh?KyASmxy)}Wu+_3|UaVc64MiQV zKzIJ{C^(B4BtracICB4u)q?CDK0oDjWo1D9|Na90zMlVod5D&agxLR?`)BnZxqsnf z1q%OsknI*2>Ir_@cNnIfm%XAu~9%!vD_336N&(hAYyFy;sLYK~F1xG7qK)SXz<% zbJt$4*HhnCt#~m-nxql$w!fZwhuo(g??{-)R%im6z0dGap)hwLzNkRyNGLN}zx%J~ z)#cCW?rb}!xqYyv>@M2ZSoi`rn2Jak)St0ZrYnk1Eyk*}Qm2gM!pka5P*7!5a}-9* zr#gx|etH3*E1K+pOG=Tr*<4$h4{hzxslurGlNDWeJN9Hs0N#6>n7emZH?;oGi&0Q< z={PO5Wd#3bm#h+l7MYR!Oz~A7^2vmmgg^dU)RLzE`((mzWO9Tmba~@(D#>Zb#vqjy zuVGc8$&>-Q?H=KydlveL>-3V`XnnQEyPFO_D0Hf(0l`|Muz9}NSW+(QLp_NE0LX8! zTVG~uFTaia5$#P!zSZ=Y%iOhFPf9kq)SoNB;KEmAtO|0oQ zyLNB=$hyqmN+6*sV$@Rb$|XO~87~#96B2#fMiHJg1NvP(X;7eNMDX2s%FuW3yg*hHP3lv2qnQEDU(Ibc+lqb>(s@4pZ{amg=i!UkHJg8}?@h8;47_qWVmQ|KYb zE9rOZZ|zr^FExgvEX3$`u<(Y?EG`m3ck7;Q%fBHsYB6?&x2AnC16X!wgjueOlGSo+ z%Zex*)hem!C8Xy4oKar={-LUi zNfi{}t}k`CRsNMaN&)$o>HazQ!+(*h$_8(?y^3xOwVms6>g}dg#oV{httP+te_?=) zBRMDN;p)XyInuLC;v3?OoOYiQV`1dnxnD=P6xxMuYvIe8+a*yO2+&sOXP*%Nr`&?T z{|~vPM|kaSRh?8M*XG-8TvOu^GWmxv(f96lbnlb6ysx3_{6NtNj86F7u?63;g;J~9 z8mKuhe4BFvE)f5abBFBX>Ax%^m6a9eNqEhdOJoi~s8hB~OcJPSt8t*w$O!HP#T5nG zi$Vb2gFh9yp~Ek^caA!zDvS>6KC)RL+AClK!n-4PspOD0?UVZpu51^-hphO*z3%Nj z+_t8ZWbz`>%nN{muS3`4R^QGYN93<##dQ9pqc|*w>~#0By4Q#v9_j@4j-{nV>#Emg zSYsEEgWneO$6rL}fxgy1BE>6`&$*3eY=o3mn0Ur4w*SJ0MKjn=8ce4t_VUN}&}>2F z9nBsxwWC5tEJn!yIh~~%W-)<4){Xw!mw13^;Y4c(IM@c8G zeA{qKGVgVHDN!xoO&bEUL%N~vGFXTAy+8Beghem5=nh<5Tqi9(`m=58ec+zi>XN8C zKL?0V#W>2g_iC|=OWnVBM$CCKx&M0#q9mjKI^r8U%oI!Y8fWB3f6)Ml) zRXUbsjp|>Ev1lg{AgEUwcvR26r1vef6mA6Yo+{{Jyi9RB)```2fA>KC9w;~W?JDIa z`ITw%EWN9-i5R}*A(8VcsfYK;e!JE{4?*iEBpaYm6?-YFVuiYPnaCjiVgfK|=buS0 zMejE9r7GaHcZ?Spy=mz~n_s@_?QmL&^#(Ze)~t(3Zu-=uRc1%4La}{6R1}$Gmpna& zcI=C^lY+!lGqk}TmJA0=!VhjuwcK~a!HDW(@pczwzLb{?k*N6mfGl| zw*69HJ2n@<>YR%A&+=j?+Wz#08WrLFXt{FXwkw56ZrZdRrSO*_n*|WG{HvDRq9d9sCn$6j{^?c$!9@7a| zSMg$fYT1)jI&pU@z}5b%+u#}o;!oLU>M0z|ESK|lF8;XDq~Z-t_14pGRrJgL9GKzr zWMeOa%3OnjG_q-feJ%-Mq{)0weJ}3QOOS3d6+n8^XH5M$jmE!;0TH(%-t|6yNSkHj z{Y_eojl&?98)V_dw1p9$Do_KX&-~c`e-#3qwgaKjf)!0Grx3M5-=gg+L0wSLu5KLE zI**dur|<5GV_-{?DKL+Qk{~+_dya2$T-#j2`<^C+kY*5HAU0kC7uYwLT(hmV!}S2$5TXq#OLyl)GOs6`-3^MXRIs zPX0NQ;krUV!W~e}BQOP_*yibo46PblQVdNh8~7mH(Ttm!AjI@ngVe;8(>h8rTDz96 zhi3R!MgwD#!u;}u2TBi1o3>JaFX>S6KNAId73=2xBq$4EoF>IRx&zZa{?Zt6|6)c9 z$!p_}G4itlx^juW<>y&-_T71y&R0Mthh|*F^jo6QgW;&1^OW}!71@Y}yuX^}{1Gyt zln9wns)7x5=5$$t6ZYljaaolC!%lj8a zmFKbqqxW~z5uesJm+G>Vbm8{b7Xs9CxzLt*)FjKT(P^wNu8jhBX8f?+_jYtZJo4`2Sw2Rk?xg>sgT2U9QH$9cG$^^ zKD$en9>cwi+m}0K{iak{fFJh+`c(zD=s_^hXO^u%l*UVv(gz-#F;C+KbMyH-h$FDu z8jOtY@?KAxp8YN11Kr@64m@)8H4hb!T7-U{Wupe=AJ>EnuPTEyZu(++#;?-;>>@O# z-x2s+U4(*SlgxiQrt=OSpBb^%*G)nI7pZUIq~(=#=vdguFAQBB86 z5jcw@6YJ(h3t)+1IH!WUZ&K}J+ES>3meM|-K+`XW=;5J9 z{BRb$^z-Dml%;+sMMzEgeOJhGy?NC>NjtyYTB2xnW%j)*cXMSgxSo#+HliV5Szx_A4^AD5; zYB4{KWU5 zbEbThzi|-SMH&cCm*oQcLh(nUSBJp3QZXfZz|1+5!V((PwfcuGC8f)(VzWDVlU%{? z=j6(3vi|cq{v!LgA08LV8Ost3^hV6=&c>|`LY8ZVQ?_{T7O29Gdx_I?+3{1d>gxe2 z!YjMS$PDZ6$$vBiAf5L(c9*{)wU}g*i$(UGdy2Yfqmd6&-@cO`=hzv`>6oE9%+F6r z8U1ZXTUrdQmP|sZ8_a{`V{E<=RdFgdHt}{;zD|SMEm>8v@oUQ^x9l~JpKyaOyzuz9 zy(%uE(tWCgedSMbZ)n2OLnz~)*&`Kp?Y|29Z*%XlNu$lhdhba*1frp8@C$q*SF|Kb zFgMxfuubh_XszQk=U(!YUD#Xk`%xC>_i{1L>GSX42e+ea{)O1*w_{@tDfnt_BRl3P z7PmIEzR<rvKGg_?iM;wt*N{YuKS5LfuM;((Xfgm3E=L1q=i@Wh__ zD)sH!JX*F*V&kHMQ)O5x)G0gD)#$im`#a+GBNd!AHgz*0-%Bz}3g%@rXseZh2oV$0 z4UM(n((}l+&>q{G&WgQ?snFUl6ALngHDIe;cCB_O&sBaA!ozcDSJmjSCm2p+PyRVzR%aIMu zU$Zq_O@N5XjgWAcwAJ(bSF!t5sJo|FxL;*BKbkv_T%@{RY0a;Zq~1os4F_jGj6|wM z+wTE7t+Ha_y6{E9CVThW5TDsD(WSI51-x{ll_M@GCtSV`Ct`P|ShG3-Ix#d--qJeK z%d$*XuYBH{QQA5&r*WGr-%_{scdQk$N0!U^U`D=ux0(?=NcNum-SV2BQ;bO?^qj{n9W&+EhyIMXo5%|dp7RZJ zcuSyycNZMtm)nFG1>Z-1^`gw5KL?ROO?LE3KoA~r4qaohDk1F)G@CV0mZbPrEgnr( zu0IbVVaLU1#YS-jWKhrE;^#-XM^(P_H>GWPYj@gtOCRtz!q(q{&P5>Wv7xBZ;@E(d z!O| zasF%^2VP1apEIqZ;cYA5R}f6I`T@JOwyLCIasRq^OQD(7*#^sqI&nUcn+)BqRCIuG&$Bljrvl+>pP){5M01E1{s)ysWXDN%;j9ZTDZi6K$bwd4ER-*?3_ zTjn>uN7G|Xhob13@( zOgNc?B4FiU-qpUrmuiQ~vEn+UK(N#PsEA3kU!`=uakY(u^(oy_AokIwHPRjMEnL%E z-tu8;FAn87b(fne(wnt2=4q`lCmK8h?K%;YjjlTU1HE9-lHCdDa~}Q_ly?saddvih zmMOIG`yR_MNZYxG!bRb=#xNs##t9&~qUYPmF{^#9hTEE|JwNLLeXm>7k_9cMe`PUw zIhGh`a8&tXddRo6uzB05bsWvFhPli5AOsjvH~7><`IatjaE5Q~)BYZyF^ln8 zl-&_L0qBCdJ?p>nd0Qh0ro}zaR(?Bn3c-tn_eMSw6u-C9U1Q{wuM+)tTJ5(|X5@DW1FZO#HALja5=Spb4_E2k{j6wtw?53Oe@umRLI|hu}B@W`ruIS2Dg&R{& zWnHBL8tyR)8x6$Y01gATLnXIHIggKDyvU3=WSC$-h6l_ITp3b3KMq}8bnodFVy5|2 zhE20m)00&@Oz)sk$)Zz1d^`wdTtOK)`L_|5J~v}HS#{~&iTSDjRNbFsyIhkK12Sc= zV5~rdPqr~%j1oW+EUqife$Y$aFJqJg6_Ch&%ebU< z`5)ey#MNvB^w1AsMsoFkja1nH5xQ4~r=1p$*M0qhwq87q2ZO-K zhf_}Zh7UGFxFz}o1>6he`hT~l9j$r|KDfW{UPDj!inoAN);K8rU})cqRDwDX8FY#r z6XIc94$HcG7h?pL|3_3Sqt-eKh4CHp6SC*mK6F0`-lf9YA$lQb z=l00NjOY!LZ&E9o686$2%n6QDoZBizeEu>L!y4cy>Zm&FDK7z!hWT&bH`4LP3m+d$ zYH)N2QYE#(5<$S$dQMYfgGK6lY$?T$!ko<=XEi;QeC8i5Z(}Jje=5F0%tT3~iUGR? z2zs`Cbzj|o?}+61{@tsmVinrX`BAk4Z}yZ#`g6aW1H_mO=A8&>#t({Mr-;yPmuWaV zdLJ(oSJ>7>S2x6*p3)-42 zp%e-JgA(}X7+RAV4m4qQqp_RJqc`XSCw#rf-AawHKgk6vjyUOI_^YQhz3l)W-DE5z z_}!jt$VMGa7@A!sTnBCD;^6P{m3xYZcF2`zp71SlZXP!&xf`&o<7;vhvwv~ouy8su znMen1uMi3tfKlv=YMhUIoQmj37B{FwM8Wsj8#6Akb#g@W#)u_)w%|x= zNM6Q5_1T4}{~BZyD|*;?{G&x1!;^Z{f+GeMe5POM2}=+G*C#UEH8L8XQQ0i?s>pw> zTxy+69VnnWs@1}+)&ednYb;^M%}rsdcHIkZwDVhY-E4cwkm7;0a!WTcR68nN%4w-{ z3|BomhHe=~5#1xpf5tR&0**7DZn?fzr=8mJK1a@~_Xjkjw>5$=WCv!W;0OH7p+FmOYCR_;?FfI|ggSJzr6OAVu zhbZz!@`EoXL2d?MF0WmA%L?I`ko%0A+={fK+X-{t*uH7HS+r{TkP}eZgJ*`p@AFKz z>8+rHa-;Xs?!~9t;LlF?`Dk{u!uoQTV?Q8%JwIgJvN6=ikhdoGPq}Hpwzp)lw5;+R z)fCuQHv|3rW{O3zj?yOX@c#A04&;*>|4aT6>6D1k(b7@wo_B80YVL%*{{si$udEkx>MzxW>II-3ZMbjUh&q44zR7ED66Wx~nG{XkZ?^sT98bP8)ir&R zGFyE|h4Q}PSeV3jK66d})`a^i&Bx*(+Cp}Zx+j`c9i%k4W@e^;8rL2cFojNA3JHbYlUG_K{_lCimYn&t|*`dOsh`>Bw^sBTKs4StQ(r zZA7f~x4aZe(QlbgF{~m!EWO`b8gKLoiqdsNeH+{K%_6X9T^^_>yFnBp!fdk1fU8>x zv%l-Ul`@F%+KLI2cJE&P2%Jdtn1~KzXJcLbq;)Cd-y2In!?1>cRio>r>Do3q?NHyK z;*zW5N2iJa`FjhubDFQLWL;?+JCb;8ejA0JwNu$!Ieqc$808XN2^F%rGw zXzeJ<&}9HmL`3Wa_GKqNz9}Ov_@|}H8``&ItV%L9zt;1Q?C`%=Qq*Cr3>*{Jkf!+wdhnmwW({02p;9)9+%82G!i(+qP{ZWibrq6r zFJq~=7fqD6!u@jlAk9xLCu~&!`@nSUlkP!W!&e&kmdHbbFc?!KPLDEOq==KX+LAIe zDJ`tm-aI+6z{kM##3DIN5Hy2IHZ^sX+q+rq{!X!|V>yOGyfKXstE|i93E0%d*XTp@ zCF{E$V5_y|v$W=zJ$tTlo3Rbwt3OgQ`CW(6Sry^V1XpRN`aX{Xv{vjVu-ns<2+LnE zv2NcFaxp#d>X9Y*H?6V-g`G$IJ%7bA9VQfI56aCu5d?ub=TP}2u3u8UJ{+cp z*C~VF=($;Mszw~>K{42wg`?d(x-H-<0z;-H7bo7(r#p8=^dJiB`9x!d-52%wBl(Ob zsIBcLu+h|sB~vQgTfx~vkLBi2J4UrVzeE`Xg>wmtzds)3>UdJs8I<)aY+P~y*R2M} ztpq$&U|-N8OEfui`-c(&D;}!*Dv1{FE`L4tqe{V%#hzlyO}woN-Vv$Xf3cufx9@gp z7v!Mm>4VC<<3NOB&5R2CFsaO{tRo!rA9OUYX`?#knh}t~$_L znc&KJcy1D_l}~qIjC+T#XsNB9$!bu|P8y9YAQIUWb_J#%_QyMNT8=%EgcOBLN|SKl z%~&#(t;ZV=?(y2$*CJyt!;gl2DcbLUUujnI`0aWUgfw;Z&dCG)>Z92^>xwjhCxc1n zy|t5-nvJlgF@SA5gL0cUopnD|2gVy2ysq|+jHyv2mxD0!&fFjh4C(cEe=HrQ05-*Y zLt|_ml*f*E(@&~ce&h$P8`l;3Z0|k$%k?FGj1(C-- zPdfUP7WKrQfZ;E{+%)cmes;5c2Qf4nHEm6NXD>07fBC_Z3Y|@>VQU{RxTol7eY4NM zRdi9-&>*$6#nRAtP~rq_iMstzEU-0Mz0#Ab}qTuR?BojLBA71+8%{73@70V39=@M>2M9 zoEMko^&6NoIJYPqXO7f()<7!$!NyDMY^*P39tjn@Cn5ZDH#b|rTc#2nNVM0R= z=V@2(o3?DYssR^({2nea4J|)$w46pcnl$c9TfJ<6FMQSrmZ?hn$kU~-q*b#quVbrC z|D-AkV>E(Uz2s@W^1xGTguI2wj1O>tzWD$ay~B5+WKY|M#L78JYud)SzV~PYi&Rfq zo%i>@*nY-PvV3yHt%ia_3|iq%1o{!V%!3PL-B>6%Bt_9ac+A?hCk)8Mwq6VPZ%U`Z z$E4e=3B;;#;1aPuna}L~b}5?&G~hk(R1%FQ6L8S_o1EkQ0GgFZCqef?>mf9utt%*& z%|UjPQvQ7?#g+icv6n<<*A6%rO_5aNMc+L+HTAp2Bf^cZYo1@DeJiQzHG9$4Z*>1W z)f`Jctob4}lV}?)P}mTjp8a6zm6eV`unPZLrZ`R!(|W~hQr@_;xA0NyL&tvgp^R%E704!Dl{$f+JA#%pv*0WciyEMExj1|LCZ^|?6j)Y{KYdGw0uSKv; zeB@2J2uO(x<__>-=_2Bc7SBqx%mz=M))q$YkS_; zA~$N}*a<>j{Zk}ZYU*1PLc>3jsrMIt+n7eoP%{umNW}Vvb$_b6o|3?CddEg>#dYA% zJAWz7z}|I*b&!^P!LkvBHGD@8A5_$@!?$lc$ARL`1a5RgknJ#Zb_Nnpw=D`Wvd%rb z8;mH{F4oC3g=cJ^rAv$G&VMT1Jl8NwlyU1ymET1p&gE15SIxUPJW@ksX@%Z(Q&~uz z)+B284Ci>4^xt~yUQ;wYJ*ebZ*U`9u|I~hSBd+!5jNsH&<5GmK+8QY{lpf0E+JsP1 ze9Z%JPdZ^6(wB@2#OsQm_s7QEeLub~cpGzg__qP1ho@WlyY3lA29J9rc^tX+&*pW&~@L+@zlW;Vl>tCSVmx#w%l&D4)S-zp-W z6!te*Uus`%&o?n;K+M#OJFttuZ`dne1W*#nU{CaA`Bj*pH^)ak(Q|VLleeil=Di^L zSNE4c#W!j~(0QpwUw+1wtMM0}Q8z@cA#(U+99p4&XfDlri2Zot`QhGg`Rj0sY2G@q z#UE(KHDl1l48Pw8B74B~s-t%=d(dUtiE+^Wr)owxXLRCbuxKZ4k9|5rCX<&DUe2JQgA2MNnFH>8nEGh2BIuOpdoXUVsQuj3Sc3xGe_n8D=eV9ia zg|n01kbR-UqnB8eKacslAe@E8+bL@EHrSwSzO=?))UNkOxa#hM)*rQ(?zMY6??dFQ z5#wJv?M6=>R>UJN&whuj)#@N--~}nabb^xjY|*GRsy`VRb=FLAmj^Zpyk0M4HCVrx zNN#hLnJQIqgfllkz$bik`yx)hw;1iVJclxCMYRC!r&kd#Bsw#ilY593kG$|5qTm0N zR2%=57R@wlzfGysl*yT7?euv~^k~RYs?sDQ?3SMMuD5Y+oa^Yh>T6EV9$#1$9lZvT z4!yVqN5jw1cTnzTF43CVFbbyoccSn~{O4cE zc`MB)UtYBfW2^@&A{#~Oh^s1*_r#$@A0)7{poEXje_XKKJ7EuH?h#OX&}sE+&mQ%!m~9W@$V29n+5nzS>Z>0BeU)N8MF-| zVG1LYEj1Jkl(+NkOwWU7mETY%R4;(r-#=!E(sf)Ec^!F8+Z8Z-Enm2T+&IIZqXq&S z){lzB*%B?D?R7}uYjaa3G6=F%+f zgT2nu|3OE*km~=PzJq*u`>W7Z+8~W_CvicQkQK?6$=it3!Rq}cMSpdn0}*j+!Az6E zDP}(&@rWUj;MtdBG{GvseF&1BngP=GrH#{A%I%(KUlOD5!X@|I{Hfj))r(a!aBbMA z*T#!V#Q3^%m4s`R++l#_lWgM?R_0}CsoebgUKdp{M~IZdBGw#$>R@qefL}E2Uldl+ zC&csmS>yW?ql?z#Ge7?I(y>xe*B zvuK&lS=*E%Nq2E9@gJ_QPE_ez8D`4Lwp5)a1?TR~r%Lo-X^}Are7|Je&gbb**41~B zUY+kmetU-7vxsrYhbh$4IF~&e1{Jyoq9@(A)A8-3<`^>Lj8>L&D<(gM{Q;^8)rg02 zEu}Bs_{2&rwP<4xK0z$pa`)zZ<;Co4N?^-R;8KtaF~3%E>xtSz->U!Ph9_qnAm`=w z*xZ@Igl0XRAsWO}Uy3|*h0t}{cmr94;<@=%BtkDo+MkbE+V>~M4`p-@C!=p79#3kC@eVcl8+OlxBlM)p@%~B!l#qpY%6n_|CbUZ7t zq)@;pdl1=jax!Spqa`GnTwzg|oG1;0zO!Q(&n4XEu(A`4;^CB$eBsH`x$741*&0_L zW0gRhTf~$_Y;J37C=0|c_(-3tM?u4}#|(nJxrUoCaT1l{CtLjA=j8}RG*H?NQ)d>Y zH8CG}NY$7kcw?d*o_6s{c;JndhR7yZpO{to30m zD8B6W6%j2Nwx_uas=o$S=Vm_XT`0~8IZ1@riGskT&3h6p*P0cuc~C^PboEA@bB>O+S$5<^Blbu(a`#*67p57prO* znP5-Hy+UuSQ#o{L5(dB75$a7D)`x0@postAREY{8cfMZljXYAexD|_j+ox(@u->8I zD4&F>U}H6{S%GQh$RVj#4|ayU--z%GTZ33CT={2 zCFGKLkns~2{q*e{Lp{!M(HMu}lWigXu!q~?w^VZ$p0|46B99oL=-_?+-+NP)YrcK*uHiz~sRfZD8+k$r_^dq+D^hN&GPTZk((r)g(jVYxB=)r;0K+*-IZX6^qq~-pl!qEC6@c>7O!k z(K55A86-`ppb*y8nuNPs!N4CMFfJ`UV6PXQtv}JH7T7ZP0tyi-e#W zW{pQVyc)Rgk1A~uX{$I|ht51%%`|>@q*KdGR#XTER{5-*_QAeYg*=(R z@n7r{&rSC3GgBG15hMW{;N$x9*HH7pIjxSNIJ`$tz>T*2^)o_S(GPo%?KnNZ!k_(| z`EgObb5{I0RlX_iHyfjuIwelmfW(Lf+oDw@@z+qAQs$3jhkr2BdEPsla!>qXa^1;P zceOGeWN)n%-$yoj#rD6Y)8p6NTx0n*xzQ@oH0imvK{H%Y#u+V-LMPL4ho9urwLe01 z>})2#^XRoWccYP@OT9M5O4qlUVWub9q-gtrvtT*KQGdL_D5fG!nEolz;g4tdM7WRK zB98(0X=|WS*C`oue9Rdbup<)+b;hQGM5I>!b)n2obWhF zr?rQ^K5UE=KTDLtq9}Z!OXj4p(XU_f6*kEeu|K;uM*jm-9VKf|`o0zGLa?yVSLZd@ z(W&BFaRJ$=_Ep@E`lAWZhViye*E?U~etDG^0%NJwxq}8M zQ~k`Kh?5%`tYNl5r!8ccQVkk`1+00=Y_0|oDLYHnG>deWk%+K`e_y!Y#z{{RU*9>e z+<44iF^jIvd3o~Oa`NpZ3R>_-waYAq>WsZ@op{(QKb%Aaxa>K=cvSvo=Q}>OaS}## ze)X?`YWZWjAH7*yjD~bdLQ~b2&?a%PV>{q=J{Vk7@95?=o;)P}L_anBz3eBk)7wg| z^m*UMA6(S?;~GGhxBJyztM=&>oXx_w{p^>BN;|9r8?fV>uS<%h&OfPBdkF;&(iO?( zu8U8k#cNYao|i3CCu;svQfjPiqqs4`L(5FvkbM<*DWSi@j(|84u@oZ_i&#;~Y4Q32 z!|V>_^goSN@ws5+p866jp2}ZZGw|2yQt+6Y_~~4`T&kP zs%U7x$jk^L?IeCA-?^(mt8irQH8L4#{D`xs=>qOV-foM9L56|ATJ*3+n>*@wC}@{I*Mhj=NE$n zDLPb8|FnFUQ61F}%hUC#r*9ww7}oP;U-~#~IEv2405%4g3_u7@=>vA6$(E5yjyJB% zOdRg8x%KQ%GPl8|uCU{wh#+dpX*!%fVr<)zEKFtHVnOHZ<^ zL+-RoFZQUN*cwzFD%xkw6{kW&-R+!P)A~%`>`=uU0|8Pm?Cn9z7CR-{f{p6J2br_H z`zx$%D;_2;wph5ApQ3`Qo&H5@AT zC^#Vce}N}`aRlW3Q5$BXbIjh-RmM9)a{=BoY6CM77WA2rP9?c#SKmaJGRGuqsf#tl z8%LyIRA$Kos14+rvaNbj$AD>f(Zl8BYB-%kR6cK$gC|})=I6soUOpR)7*4&8#;wje zh*MMv3gGyqb|r~}>6QC)==vx5=cnp)6+63v!K-vr?5b{BeB<$6?0s(E+3gK6wD=ku zgGpju1_|qHD1Q&2{>ForcBbfn(o@8KGN@vb3NTTU^NZ(7tnFC5T^TGBa{N5xdw$>| zX99+91C4KnY(}ufCpN>Z_7;Xsbj_1e)19#_ZCI#eG0I6@JkM@pgp^ialvb1Z4;TuHWfx`zdt`-{|>SLVd0nE z;l6r({YDr7+88?+@objAeM|kd=hKB>vSLhZr8^;rw3*$dG!1KjzFre@x=hVSC^9Ty zsS*T3>Q>J$#+y6ib?L@Pv(s`7n2|Niqa(Z;d2Vhb;)6#ecP4>ED4WgI0+q0_X(`#6 z|M|A$QeSQ|hWzqmqSS<;_6f${^3ng7cm{Hp+xx5Q9*Bus>vRo($ApBNgww_2W>x0) zVlUe#frjyi2_xim{6$RV#QVj!qu5Y4Q|)QNcdw~li`*9=S?HU8Sm;EtB3K4yk3&aj zThvMxVn;pgTEezy4+_fq%%s+ixZU^78Y9!{OCQ?IZQ@dAVMQga2 z2uTsF#)n0Ze0niPfFz*D4x|k7lvu79+FSMUrjszk zkQ}o7@Kdh9bxMLxekNMqQ=mk2OcUEGoYN8Kn|#OpIS+L)EWWAz$_H0CpYd65j-CpX z*p1`)iv0HE8^jySmeLMb=lA>+CtLC-sjh4g>fc_|PVFy&#`nzie6sJPDF3SQvY(QP zLXPrSZbRu9=5}jTbFCE`m%sWOITkz5fhb9zdd5-B~iuc)!DE_9P!u&1o)0Mc% zqR&!o^mk6)Q~U1jEx~KHdGjaPcG*UbHXdMu$t5AJzM4(cNS5Dq>N};c54Pp%4p=nb z_VPK8%r)$v+X$Z>H_q0}*W4t8r#E=C?@%td{N~U5bvvZ0aTak4h?7@r&H@>KP$&7_ zuej!0pHwPhPrKvK#ybZx!k@xW`8;%RgN#uw_vI!)XcVl|`yloiQ&5s~h^;FaugVe>c%Hg`MS zI^9oXZUCiH^}@i+LQW8X-X#3X*_##kB~xP!F6$We{T~5$KnYd%Q(T&eAK*4ZkYC9h z+m8RRFy*dqq72S^R$O=Pv5e+`B%h{5GfM{HBxQ5)&>nT;2|*p+%j}2)5qFJp=KYOY zA3c%ss_g;vnf6Th-4SCyOJo8(9T=EizY*%8u7t-p|EuGC?5osAogd#N_Brun12_x) z&1gkXP81W|WJMli4f)1QVw7HZa%z=Shlv+6wEudDSIQZXGexep{OA!P3mC${dD^UsajAEl~jfIMIUswuE5)2O%P!b4!JpIKxcokTm8y~_YMqir}_B_H5aw$`ocnr@^ zx&X9;wk~eoIMgyd8-`IhQ-M0P#3n$84>bIN_ZMf>MY9Wv`-gj}QicYkbEbF_!X>AS z!?zen<==N#-REvXXx3}yTo5;WP1mbkcr+%o*D_Tj0yAspGNB8+H7U23nV8+CMaLJ_ zT7lI-(iV-GJ)bs{3G`gETOx$N+F;IcLV5{+AXMe4+yGhO5*WGTdEY#JbFTgCK%`FE zT4uN;z=?0@?%~j;bMytt33AozkzU!`4bO`s6ddL~x>jFM^R81)G8j(q9|(k{fj~~b z^X-`vWvDcm*0sdbq7fhLnu@o6%B;>{N61VbR*%#gMA5!8Ze9N_GR&{xTVw!J=7;ik6Jk=MRD5OBKhH@tsYA9q0#1aeTIjBz=aMUwb^ zYM#r%BuLnH3RAs!-S#2$mjZvNqrnL5+X>CsQyX5?vOU4{TNyan!FUCh)$ZfmfK-u?h-Lgk!E;s5y!OMG zd!{luN^pBk+5?ioxoC9Lf7P6NK2Y#nhy?v4<+MSTblvFHR**YSqQ`f=7bfA}3cEp8 z)D>G7k`~bA(oj)I8SFQip&oeDMs2e~oJ>ub>80z=WX|;mP8`)rN%G1uUkac;zn>3c z=^4v&L0I?Vz*oE$JS(9L1CnQsrs*dNt{)1a)jp|G@Mt_KJI%)DQdv|wI>Pf zQnx{<63G%V`_T_bh!g=&)Hog_)_3is##c4zocXe%O--7g*+wy)zHF1x6i=ANs*PrN zz7|J{KnF3Q&Ofw>8=SR?{cvPqqp2_)Ig7?ZYtl-N0amoI9@Q#8+@QIz zNzw6%M}N9bfZ+Q%lkL=CmFI_k%eu(AMhMggi09QvQEUhsYh!&6n3ZE;uX(q#sa$Z>KspxN$QLXz25Iltmc--(97sI z1S?CfE|I1M4{d8=I(Se*nw=M$45a5GC8Qm-u(%(3!CwB{q-yG5=ryqM7v;FkDVAz$ zZpAlV^_6k7)ka87Q@4x5vHAViiq3Cyn!R|L_6+<^8V!Mc3(stI_6&Jj6L2q1;k{o9 zBUj3L0;wfEp1F$VVv7nuP?v}vwe`o$vbr3SR^}AddKAdueZnmfIQpP>?th}Lt*}Nm zsh?H#->p!N7@o|;<`@4D(!M&Zs_xrbKw3a0q(M+Rq#L9K>29Q?yXzn=EuDw%mX1RR z(%pUN?&gr+*7v>lcklQ8`Tcu%p2gn#S!2$*#vF4jeU7S*M%Po!nHV^cj1EfeZQ%&r z`ZBvVL6^ctG#EFke}#9Zfyo%yHKjVM#zKdFwdxhf1DV-y+NjeQZt4V&4}KMVbg$hs z;of#2x4?CjcbldTc2zto@!@ln(~g42wP`p#@6h0cdS1#qmJL{F&<`Mpdlis}TO`Vj z6fpT487+pjl(lPUTy6!TLCQmvFTZ845#p6iaXwrj8&s=&i;+={TP2N~saWFD9{TD8 z(Aq24x)l`pcaNw>f}`LPnQgIA;rJ<)Ca;@8L%!z_9WL`8G0f2i#A$GpWPcHTOZhrqO)SRKhp3X&wW(et3tuc!eKT>F zUmSmHz0cPA*BaNUQU#!iQ^kc*puP7GyS>e|^bflod(03&o!=wet9sZp{dtsREje)F zIrE~Rv6V<1b|3b0n?ldtLYFHEPV>)7Zfr5I=Z@<}Tt1ddN}YWoQ4ia|9d#7tPAO|? zYl9iCZ@H@)9+?p|mRq8V&zg*!^{|!T_M?v6e?fPqhnQ@nfgZ=AcJ96VLrO)!V# ztl|Am$_B;4R*LlHl@+TaJ*(4?psrw^qaoz&afmI~94woqR?2`3pxuN|eaUfib*2gx zRh0y`%G8{cCg#ZR_K>5h4{aoLlEFZVuaGgII8NFb?}*WrF@CDt_&v107%BZynox-O zdym!er3{fWGl**Ye}^{`*?OIjf)5n_Gd#ZhR|;JE?glHj!dK7UKU+`V3w6G}NlZ`_ zZ01pe>$WyD4&LXHTEkhtW#w#FhSSSWaV?ZRc~n$nxmP~_uKvCe$!nz%Q9=dim_!ME zk2-QCyVI+hE3!EFY1aSM& zPPuc*V?WW+&CkQvywWmbT^9q|66)`41qQ#6>BhM{VB$7Y4+e+nL$>f&XStNPzjbjz z5YfIMi|}T>KhJ!o(~;i)*6x*@Fh+6!SW3UGQKV{Y5p|!@Na0T8EpoA`cprR}`>b#l zd^(R?M`r~KkyIZ1pRU_*5QZm8w=!LO1G=}_e0g2VC*)3HK-x%uASbS&)K=-&`L5&{ zR{Dt`{`2QUGrZjU!+d2LAr#--4LkdF9a28HmIT#XplA?xdKT~=XTQeZYRRD?sk(bd zh`10zi@dMS&HY_{RKwfmG3f#3(ToFqrHj;vp=Ym*F@& zTXWIOXrVa6;^Ogmxz|^ta=NqEpJ^ZSPt?gaI(tO*_^+sAuz&fp@YYvJ9iMKi5<&5`lz!V4Wc!DUj5G&9VmC0B)tFx#rh&HYwlmKD>e7eDfMJ_KiYZ-j=_q+D7U%~{qw3R-Z}EFuuq?a&9Ml$wc2%@Hb(f=#wZl|Au$<_;*O&l*;o`C~zl>^D4nq|#*6AK~*4PDqK(ueXuF$HQfAam3d z#7_FvZP6vppK__mB&*b6H^3jpTZy^OQPtVKX=($OAmV4oFX#<8`GJVFgfQ&H|4rV* zlS9^PCb{Ifq*Q8+3-PxVt(=4-M;-KkVuf(q^M4PJ{(=EKf+B3W$=!T(Hp1bwdtAv_ zDv%A04p2V~smq_aTpW;b+bdzav(pH^EcJ^iQSd2|vw^8{vaLd0Ftr8GCv=Flp7Hrc zNGTA#Fu(3VpVZ&qxQ;`UHPX zs#X0M+=rw}j{^ImlLzmT|K2~uwj%p_?%Ob*FvT(4m2q6+K0et)l(pv|ybPRj{YC>lc1jfl~8+!*zw%Um7NXwZDtx4;BXmR6$HCIl< z7e8%JPPNywzirO+iP<{mg(+uN=Fe&thw=V>}|>zfz(DKg+|$;a0eHgM+X?c9~rCDC#%7 z=y+*yE`=G4mLm=4=B2TsS)qzz;bJ|LZr=|OrM&GgQ46b2K3)PXY z$eEAOT+7J<#R5qX4s23(6!WSqi5D_=H$ z!;K63xUI{Kk^=GZOUIJgqKoBu#opdLkB+j+fc^^V*>qUw=ocA|Gwu0nK}cvDs9uT~ z^65LX+o&F1@h0;NDNWlpSr34CGx57UJNG>HX$Ks8wXueN8td(z!$ChrGmUz0$rx*_ z6!QgV%FywLo&geUSDADkU9HsNoN?QUZFU+^YCF+)D*GJ@+)5sV^zkX%GUI+~uq$4x zQ+l%_uYk|YixdG+%X`zvyY1InKv%AIH^6a5HiMdq-{Oqn83_p?d7BMi{(b(ypTk@@ zn(&F)kVmBJE33Ft#U2<)XNWYN#{*q8Oj76>Q%7}^m-o7|ziw)1`r7f`W|(2>EfoYZ zcv&XYj;*Ab=erD?-$MHNqKYAF*QtF@g8y<7dJ*rsFRw@q70`v#QOpV%If6+BSBbe4|+FAYuq> ze)ArAE-m>s|PoYV7G zE%vU{;`i>IzyFFNb8SEQ|HEA&+;*pILCNyQDxq{ydfDXsD?-Y9>g59kGl)gFsUtoH8bRl*vB zl9Y?iF*38K^3Iy%HSeCxvE-_r(fxKMR&d4vT{n zl@e_%{8DL;zv&Zlmg8v$YzmUBiEqQFi&oyT=st@=u(;Vkttij*SrVYsoszty!D)9K z{l5K!GB~O#cpR-JzU7U`rx0>S8io3=?=57iQH9qc3%9M)9_8qFcp6DSn}NdE3e2Z zjm|+$rwva{oW*(A$U@G6mka@8k+k{+e%e3y_e}30?tWXAww?xQ1+0R+H^3YG&IzG} zPq`V3qDmM{qc=Fg^dOSpGXUJWM=-MBOieTqP&?O_GoIWtm#>mLSy*H;v8^W?c&7~9 zpA#itA90JpO7~^C0*cmIAHGVvW`vbsjl>LVMe08|OSSkr-yz)IfIVeft3z{q6tCH2 z%a!bh#1_AhD@6%R{_-CJL5Y(__Fn~-B?~8N)thrHD6@*pSugd&yh|a|<$586^#%eA z_0>ibpt8%p$fZ4H4Ctq-qr1ZzFa}(#@a$izBm9B`+g;M`JT^!`&`+7_EOG@e!h9|n zODS5?f6ITILLC-Qy8)wRl2uK_t2|>8&Y*AoL zvRRyL^5;G3ta&a#R9hjMs_~;ivsb$u0yf>N)}8OpsySakIF8IL@8SAse7*P{aXN@hdjVc#Iv91D-SvM(JkUmteQR!yY z0250qDNFZd18$_$?NKzP#5y1YB}pzUOYt|ePUt4@1&s1 zw3HWSvkl@wE>t-xp##RwMf)9~hbs6PcXvKv{HZ%hA7N|Wsl3|*G~&02FQH~rY4_n2 z;zlmwXvb9WC0n-fqU#smBz9CAB$;3MwU<@ajk~v4N8Oh1RTe(bdAg-_uNF{zr+KNE z>81E*ug>smwHA`-*|Ir%QD`zmBd>fFnBYgHHq^Yqo!}8Gv)^1Sd#-7W5$xrD*g98} zG(!1F&R52}F4ibDS$zP$&041=Sj%{(s*?>h&tR6tNQbG#ZssDO^7E@>lz3JlMs$7) zk7JC#Ce~7@P{b$Cm(E)Fg~;K+jg%P^QyEPF8~6YB78C=id)Sjw4mV z#8TlRMEc+x5jvf?28|<|T;;GC{dCnDgb-AFUazPZ6yV)!ycoLnquiTFsqEHDqp#9+ z`|W&#gXuMm3 zAp?@@{LRQ(L*FtY8{jVH`yKuj;(srf{qg_fhk%d1Mxthv78L(qtNi<7|5+nW3r&Tg zHc9fkZ z|M?$(eo){>>`ws|lwKB`M28*4NuUK-n5h>4MaD!S-Qm2;H#8TrE7=MOj1r~1eq6JG zd~c_XU^4G`Hrn+P!(o-xS))R^Lur*98(dzm6B_G165#I2cHN_(l0=mV*`gg6>{ryQ24u;`uB`W+ugq&rJ#+| zSYo>T(uxe76E|T^KucBSr-k@u*ZL@DJ>id|juk6b<8oAq(ICY5{EitaC$?Qls(Nfd zT<!O$vLqR)7h_n(Lc4lG z)NuhUHXjDte%*P+Mp^Nc^b;g0N#GGYN@ros(;ICed!c2|h2TYe{zg0W^ z%t*_0YX?*ZBBEa8wo;}vm%r!Jf%)puaw8H#dlSh$t|~+hg)Pp}uTzWG7i8&JTe3^J zG5x%pn+Q1NYIzj8UXt>~3aD!+V;y~|X5Pa#``p-%d(*{YMtWp&5gMQ1As4C1VuASc z&If84M_z~T=$8|mJ$KU4sWp1UAAqnnXYi&rM|j&<(PcSErP{`X@2;r}@d8lQn?2SH zUDZ}%eR$$Dd*wNDXaZ-kdT?KSL{t7KfCCNhhzp=~82& zIk;$5$2#0FdHfpA)iJ|Y@+|(lkXXa&`L3L65$yif5J4`)Q9tlr4T zvRul9Nmr7N-fpC@Kce8NLd=@835cRopK)IQ<6mZse&AY4lKz%-WzBe7@SLb9a3%nf?o z_LNPSJ@9TfPh31;U)-tKahg-d4t#3!lU81})NK-nU{BKE=~XPl+T*Z*Cot1bH(^nL zLwD+Ava;OGg77xihPhM@+i)`8FbVMT4GMrRZB3u_-|S7cMRO?d&2m#syf~y-iamd^ zO9}%oD^&F9m?$Ax@3s;tN+3&j5`G?KAI(y)g788na_u>q_I|Aw@(uJ&&CS6-+n;0; z>dRAiXEa>65~B?L?i!;gFl)(cM3=M^7t*a#sMb0|qdX?SgY)95*SL&PBRp;1vPyA@ z`H_=mH)XSQ#37b&9&s+isUqWwzlcZorsT*acw0u}n6+j=EO|tQ%+{4P7oqLEc9)B> zDXZj7Y&qYwp+jC$&GC<~PWx?gxrENd2*H%WpidUY$l}Mgb357FQbVsTtmn%)moDc& zctf>yCCvA0!(Wz64Wi3bZN^5V(k$;`j zXhcu?xd#jl<;AiPam~0z-K6bC7m&A*zH}=PwO7cBsm52xFkdKF_ooOupyd_|=9h3W z7|r~Y8`?;~er(LT#7ak%EA#gt1;p@8(SE_$hISKI79HPCahM6seu zfQxiMZ#NVH|8xnq_hFCy1D@VwYMb*+(`2?N_%%mO^VuV4^6+;Oa-*&`)@W@D^g5eh z&waoAGN!IR824Ff{I$xsOv)Q&f{gq_%VsTYfi^eM<_~6N13Be#&z%fekDmufXH7{Q z2iif=saP|ll0}etBe;}SElEIZ-pIkiT-d;Nr;ZR+w^HR@H#!5GNc&gu=5MOX1!Nz! zUP~x6L;Kn?e!^`4oxZUE^#p2;8g$Wonkxe-(V$OwpAlYps~E8A-xYF}X*zA`vFl`T zF-N$s8+UM7v3|1`$?G4!)}k6|XwdR1BLmY)w0U)$>$wqHA#W=9Vb$ME zX30oF@6Y}Y%dBtlQW`HrYgo?L7O)sjeLHSqwyh58^aF+jSZrO|jo#aa53_%FlK)DX z&DNGYs7hrrioN8TBX!K?9r6AJVbZ8<|IUdV*Hx3#wq>VI>VqQIbWv1WX2d4SN?mMs zQ(Bf7gfb43d5S>!mvn&)d%j4mhL#>6D0Y^&p+j>0S~ZPF%z2a||J}CNPCo)%_BG0- zkG%IW*Ylx9chSGbv4-SKAiaA;Qg~zxt=I@Hzr9Ys%{txnTIHYBek-0NYGx9bOxDj18t5LF)FW> z;&1YJRaxNn%xa^zTAPq}6V(;FfRC1z7R@)(r<+SxbLXtb0iPGNlf81c;7&f{&uCxF zW?SoXAll898+NY<`R_`KF6u3*#*Aj<5?6;TE0-6C&$q`v`@HA1lTm)BRR=Je;JGo z1t3v9NUc}mYR327?0`(gjZ(Z&`g79UF3F-k1O3)u-<Xep-S31(c?2IhMLy>rtt14F;2~AWM%~s#c8TUL+?|n~i{o8=b=e7rQRqL%Y!ev`gHKI+=t_e`dw-t|`4`HrIu4gQo!QBY% ziNPb;S2{#eHKX<%7X4J?CD&3JxBsl>1mCOT2u0z;o*riD0eJ#7Se^IOFBunLZ+&)oVC@N$s zy4%mAz3UErR{hl-&c{?O`_Z(n3W80--$C}p{)78dz@;F^pK=Ff$X`zOW5YG7WVJ!^ zLJ9JjAzT@UXOrb@6_x525{zkW&NEvfNqMDTDj>_Qw_jD;Z`zH-4Lk%nm?Iy0y?*eN z{FJ$++z-4y;1fA?*Dd(kQe0y=jrX}?Hs4VfD2sU+Q(GHV7#gydsm|g01L6KB#;gA) z#vRAI4hvB$E>RdZ3dQ9g?ujghd0}@#{HjdKiNX=P=TD_IBb3meQm<}&kCNcu#PmOu_Z`s$~qw#m}iLK;T; z+U{e|30aH5??>m}z%q@lZ=E`0{b|GEwdioHcs|N6ale0S+_l5!ImYw!=DZ56A`4%r z!S~I#l4T+L2FKIylyKz(RwG}-J1*UR@5axZHsC)_~<1dFvzC?`66QJHWm0Pzr74WFdKs?E%0ie zl<*_9{%ZJ`Xrdj>9qp0Al@h12Qmi{Q!(z@gd^CXl);BW7g0ZXyrx^1rS*fUPfx?!` zP06Dl>xJ?glH=G2$N+7RE@1$SA#O4$fs5xAE0jfn|M#lTlZGL|+$s*53}mK_j3sif zN<_Y0EdQgXvZjps7B>Gwel){YqS9`k>6ukQ!RDIf7})c4N5@Uuwb{@r)Z6YN@rsJc zOWg|wwb(Z`UGtX#*#E5?8K#_Z{v9LTR?aM*Z!6`@J1R|P&r$ZxlRdQ^){RNs-3DFAJZ+4^H zt*)1I+>MsqtgjPDtEHXv?z8pOG_bONak2-qlU3j1+Xg6Q0yq!GvoL>JqDS#Cw@PWoM!* z@1?CV7XsGE2$Vm*?wqaFdkb&=H|7`PBR_RM+MOXo9Q!wbr#foLfS=BN?yB^qk*wfcYlfP*QB{<_CBLMOI;^7`ChJuWJAK z`e%rs;1V~ET1KB&8GUEUUYAOmkxqpk?Z(Z5yhGt;4mypgJ)AS)=#Sy8C=}mIQf8qB zOrt4&PP%VB?>}lohm;dr9wx06;)8T1r<;P>x(?pTWiI!s)zL#StVX{r)yT^VMOjn@ z9yb7&j#;T`-yyTftnNAdWJ{0Q~pJ`RTc^WnO zq3?X4G-}wXmhWbeA@TJK19??Sz8ZuP$Mt2Wk(>hEblDk!#bpj5bU9}t^}OaMwlvZv ztL%YUObILa!?_`55FPvW_1s6ia3K(_&3$*!3drD^({!rDk}E_C>qd^# zC0+{V%<&BjKnr??Y+HeRO`6xXEXbI}G-bkKvjvQv7QUD`HP=NsREc7-*pp`rAq{55lC|vd)9G(J&{&9=+MHR+cm^zaQW7Q`CWmN{d&%w-iK(I zO#i!MSvU2toWF42cTy_kCfvEA1~=fU1JyvL+|nP!$6Zvv?EZ`R^axe_E(uX>4JJKq zzU9^z6~AB$#3OA~tcY}M(Op3FAhORu2GAa>0QZ&tXb!W|Tz#s^(RMj&NS7#P3-r@> zxVe#t!&*D6uCm9<>Q*P0|MR#;{%{MGqZd`;E007KEgYZ@$9SMQhG1`L>JGV2U6XCVTzh-@22);?0%sZzUzDVF_Ulf+Py@KDP>(DREFznwE>dBI{v`amnA=#OIXtj??e&3lgZcG-E;SRCSia!`NUUOgXh~dE3Sq8 z&1g)3uW5~g`|ISB(24Er6A`mXY7k3Tf7Q%7stP05PN~`+?Js31`)8DNgSExM*lq=U z4No*C&LOC-D94@vUuPxChl0I#^v--8CHP0YLVLsX_9R|i){-8kcoT0cTNZdN1gCj% znl@eU`s~hP0kdbc++=$L$zvAQl;H~D{1R2xu3IMB5wlMD^+?@4Zsd3bX{dN&bnNf| z{JNcyz)8hr#g`e~)0CQCdyymdR2XcxiauZD)Op#_F`qK(aVT4I6mzVxbhYSYSIg!V7ITgk~Lb?PNc5Q#|5IS!`V|~`Fc!kSMb(ZH;VStcZg866b zUMxSb?@l&CfgCZ|_DPw688Uh-0j!9@>h{BvE- za+e~246KVcVCmW_IA~(*y3_vZA2&bFIEsyfoVJ_p;$<;Vr=eM0RhHVzO9>05ORwG> z`g4u2LWfT=<#EygSc5jL(>5ENbe(2Z74uYb04>Zbidhlq zk?8*UE%*&&KVm7EJx}5wQ=0R8%1*S?%)HFB$X)s@!>?Ys%Z-vNHm;+z{gYp0G36P4 zKGO_#(?^+|%EI+RRQtaG3y~RlWbicIuU<^(!56dbK*%N{^fLiev;E=OGl3Z?F<}+@ z^WB|wY+l?GL@aqd{=1w1X_avibJQd%eFmhs>iu(}=0_g#=Qi$A@c z8@j2T0L_nGodkcLn053{6L3mjrw7(aC30KdDQ3W=WHtpC&ob#2bSLk z7_f(=TF>VRK%H~$R$p$jS-`ye2kG|_WHT#Cg z3FXACIB2N&d|Z~A6Z5)s@uXkwK|av!8rK;E505RFL-I>P^^X{N7vuMe)8*QW+AN;* zOMDi7#-Bf}uBsviM7afZuF5?y;A!PZ#qL2yr~1$g-omI_8)LqnJ~ZCjln(NNEYECs z@YkKcRCvz%q8Dlw#2D94(BT?<+=0|5j4<4Tt>27ukCKoT-G;fUy#l?dbk*U2l;AT- z3rfkFwfJ`G7on+S@deON(bzx!j$q$TjTnyu-)<^4f9ypDZGnF%R;7eN(OHV`fO7cS zFJ#bEDgT`tvh{6NYG|rH2pv%T+2r?t0BDjadnwUwyL#nf0OOC?gmIJAFSq_AQF{*0 zUl}Y+J(yi@ApzI?pk+{H!ebv4bWP^aIxcIBHfc!_7IN^`4HgZ1SFy|<({-;z_W--+ z2QXTDL-;VFih>ofdLGT`DeH+&>oo{L6)l&_3+yfLm;2xLJW|0xJ~Zxz1!X zjPR_3F8eFWJuEcD=`KvvT43p4->`#Tg=QS)0VS$&0o3;wN^Sq)$!69!8rjSv@^a07 zq_GC-Uk>nnNW-Nhz@GtMS2H>Ni11@>ZmusmI}D}H|L&@ji+cF`IH?!~#)u8HxHz^O z{?^O|c%-&A8tQ;C^}o(|dv<#~rOC3R9#%i70R@A{yQNyr*xFIR2yMjMF5B;N$+7%U zM*ZPu#!{jmBl1BjLvnrm5$`$MK6FBPxE8X#ZY;2Ww$TKrLPmF^c0DTD3#vgLyGeRV zgVF|Xp34uvu{VCkVL^}<{&uqA4Xf(b#8{8#9Zq-SGHgC9EW=g{I5XFioMhs$TOK`r z(&P|wN8x2K7P@zL7_l`wiKFLJxJy5(^y1)2BYnmKo~f9XDqHVHIccv3x4=2`58GEK z6SKB=%*8v@+c-3(uv(5CM|k_mqA&sl?=O2_0YRn1_Fu^d*F$p`Wh9Z%>Y}(Y< zkVyA^tHl~#XTCATi&YT)l_4?$-@TSoC_&yTZwDv)_X%Z=jqVoaWeuqk?wV`G^b*zC zxOXzP|Cozy=ypMGL*e7U_9EL_0MiVP`1Gx3D?0PF%wH#i-#B;C;qGkE^Nu>AON;^2 zS6KzKH^)+#WlRz-H1*!f6wEb)wuwq7icVC(=6-zc2%2_U6)3Uj0W26wKQmjP?nj?fqU-aYxban{{Ewo65nv(%)0 zSD9IlTjIOgWq}0kUg?f+-Koe1(e+5Scd6&XFi}Y2P=7t@LZ%6ZrDLhLFP76reQCM# zRCYo-7W$?Tu`}lva#3hM*hLU1g-za=QZnMHa%~@F^snZBqP4`;(`<~GQ*DCpl&c#E?>%r6fjxdQ?cl=mI7 z50OQo@yh_WRjlF|38Y&8^!>eb2$JKIyP05_0nf8Bv~5uG_RH8PI`y(I83oyrXU)_3 z&3N-bXWU|8!hY*?Y1X+8J6>FQ5tBh}*xs0DR|TL@t^OF&ar7(`=4Cu4l_R6u0C`v2 zE=k{+@v1p$MiVfcHobDYZn}}qc)%u>=A9brNqk-o+P1{y68fhIM(&v>J=Ah?3CB_R zHGjO+{jNbR(G}g`TJCb5rL!@j#AVEsnTcBCTEC|Q#pU)Rd1FAC&@s^iVravchko6J zf@X<(VZbqJ z4J>Y%p_5e8KX9~bM6^cU=~>#oSYxg)0zDQS$|CY;m0VaOz59{nav6GYkcM7lQ8vrO z5bzrgf%pq=;Y&P>NlxcT|NqL+Y@oigTLNX<#;Oa37kq7u56iLsrQ8=g)QgXT9#h6C z0&|Qst9o=^-_GERiL`a-zvPt0_}vn(sRVW{+o;J;fYLgHY`cWnFp2g9qt#t6^r}X> zM0HcsBOP9FQ9pX-#rHIy8!u&x5o$IHs*lF62hz`LC7I8ID5`@j3{G8+8G{WBmKW4x=KhSNlrFx>;H zrOa65^69HIr`BqvEMZO3i8C7Nj>^Z*?SxVFgBG_FL~HZOisprMD7Dd(KI{E6J@w?nd3pg0%jP&y?m<>T(o5EF?AsDB!>r@K$awb8t*61@RV9IU>_NaJCT8X9S7{|90sbE<@SA^ zZ_~OOO?xe zdlS<>bL_>~00V59FAc{db6NWIhrM#wOysIUO?h}Ed4FJxT67z|Ue8%;t?rp&LG@F;UU>Vf6w9rRRV4jAx=m8tPx?vj*`XtW^Vv=) zq$qU<)@=GxgaGY=WK0B8mv#~RkGFFx()%rH2>uV?itb_wmtF)D;F=1fw}|$bfjd8m zk4DiA#@ZLkm(R)FssCY&Nt!#(U#OabZu=vWBL$di>BJL?D@H?7eILbXMfz!5s%ud)cY{r29T=IjDg>1h;y3j3lWGxgIK1F% zvdqHZ$5cN9oOee<(nY~98@Tn)y|c5xuGro4;vwz|YKUzQEV@_;b@$~NoZ);(u@08; zpt_Ck7IdZSmkZ<1{~%>2aT!L~@W+AEb-wwOr;d2XI zRoj{=$G}F1<2Krv$v!DI1gN`;$iTitEYh)K#9&#l3DO-b9-)NoT{@At@(#cHm^aM8 zIo-d0A|biftV93XHM0MK+BQifRsA?YS>0pfL~+4itL`=Y8$LSYiPB>B>W&moNKtl& zivV#KA%t+N%pNDO90>+o5^Ossvu(7P`eqw2*Fl^z1@XjFb@u>6oASAM{db(n!E>0> zz0Pilj9(}_GTTRh8K3&&bs7PC@2SOX(Ibi|z&E*#ChH0;iTB*FknlHKV!1mt%Hl^; z+E$GJYUzFOKS*?(CbM)D}zTx|agVWz*Qp6$5_r>d5 zz%WjHmy?MgF4x5RC%iC4rtdID-4U0U4eqe6k;O0!NxB0Cr;jB3%R0~@0Sg{t(~iZG zHRHdDX;=FtB$f_1;>q6n*Fer9N6G&_#w8nBZa?xk-uRhdKERYJ_mK8fDKy5+;AweG ze{y)!lP#)p!i`y|$A{N~yxB}TlSEkAqw2dU#c6aKzpN14)P5*AVoGstQtgQ7%Ms?H z@H7D)8vEB@$cTy4k6g^AiTpdyJKb|(;3;>vOhM-f(T;77@f(tR^)kiB6177ek-nd) zD!B4R{AJcj@Vg=R?Zlom9xu#fEH=`lj?3k~^L{mrB#TprzT!2WPnvgwRXYBFKx-gT z=Ku6S_52$x4o+X#GB76)ALRfs6$mr`+9hRcr$1K9 zl}j3X#}pbL;!sofrfu7kuN9Mn*8a&=`OB9R^Y+_@O67D`{@l_`+4AX^jy-D6u4k(K z)0{-`&kI*dN}{k3fD$nyon7>MM&Trob#J?d>wo(8 zQDI`B7b^U+k3-J+VTF0uW6in4x``3DdNQju-MkT@qwjFXYbZ5D9SHU%ohk1?W zk0}$ExVRKdSN#**zK9yKN+D^N)HJP2VmS@N<{A&L;QaJTo%eS7sK#+$I0e?`TqAH~ zf@+_^+*Th&Ke-e-*B9|h#@r6-4BqWWzSV3X4_~W@dg5m6b}=}WlNu}IhQbMdMyr=0 zR5mypKViV(%1@fJaX)k8UG3l;XPp1li~(3R^INp9Vx1t|_b$9lIJ2Q*nU;>n%Kd4; zpHcX&a$a0m!HeAbXgyE{7qco_vkA^7j=r)Ie#g-Qy=lwUyR5PtrOS>Ys&kIZDJ!@l z>62rDRn?i5-Zp3PelO|LSG!L~{`Q?VC{>-ahn{?#w~zEht8gi2n98Req35br+_dDO zB5B)|N`pjIL^FWJ5&z^Liz8Ck#dE)zTaYeGGnbaTi|Alfn~>Jmscc`%CDPd(8tKK`QIWKty)O?&RU>#D|{ z&l=V=aYjWQ377+6j;MwSf?QaT9<5)N!Tez7ojek=?Y-uOrzT-Mqd?7aZ{x*oe_;yz;+kk}IGm!TyoK|ZLj3omj2RaTcUGZmBVOb-yR$4zD z5Q(+Ix=r<$LP)4f^GQXCHDA2_!nJky^K8SBMimzc3>N=i=1}7Fv86sY@yjWdY(PK5 zuMo(=<%7YsS>0tM6@5e+)UI<&!+bdDa`QB+makF-bbk~bBVI70EGkqS{h!XxI>FJ-_F9 z{`0$i{`_pObFOpF`-*eUbzKE>{o^H8GpwK%UtKV;EBHOfTqSJe7>s+6)CJU{f;{*% z4>KfkTl)wmkFeuX=S(@t* zCK~k0PX|pkc8@bE8NMC_ihbY@fKR2B$+ud`pBj(3^SG7?6}hkVic8j1LCTx!RtI;8 zXrrn*6t54pz4o{4OH6oD**f0dUWC7GktjK_>bnP>VuBUHxzl0bnwONZG4g21|7CQy(OsbPl@+Ft^hryG zFt!@1OSh-x{)J@|c!qGZauob{w3;XJzASIO_qBfmxAPAZ-I7K# zNxCw}RJ~7EhKWzpsS-4!UsCw}hZLVUyk37{Ix+i^YS}ly$2|Uq1&9dRYO+ZZXU7ta~1tNP-K0J8#$Om~C@3bR(-S;fF@_XsPwwS{D%h>LP zu1Ch0leiWQoa@mn6xr^xJv94nc0lPGfNI4Em75k=aChu%>TS*%f%p{WmVtI6mn(t< zbCPR5Nmmjb#ErWo6IAB9OL`P3m|U^`yyqFuUSD6X_hX3F+V=!0EqvAU!o*nQH52kq# z-q;6*)D~n?F4p}+^!J=*q5UJ_^L@-E`q3idP0zO;k6*MQ46mMJz)f|ClAFrOkqwkvOH;TD7-ul?8kMk>!xr{NYLn>E@Nv={-W-7YG)~Vj(WFE{D@KTw zrZcWr2D~cT>cs!;j}I7EmUC3NW_RmD+f(%7_1Q0s0esGQ3zwI&h!3Orb*P>4OFN&B z$~G!X{-NmWcbNR}uKG~h9C8};qQ}ZVdo}^kA@r_!WMOWHS?huPxu;S5Q-uZa@P^P= z&k0r9!*0<(Kt_1mv2^~~r{QjEsFKat=P*#|d`24E-I!Wb@0E(lG0_y~G5)J>5Y3jz z=-EN@VyWew6rl!I(F~S_APtzYr6rjW)+Af0Ss+7E6TsLvM6YW4RQfP0Q>1NDrD7X%@^9&UaZ zq1kzY@YeT5B3&z1Ok#t(feaFLYaTeSn_A9UniVm2T1xw=csp`jse)p6Wa)rmhT{TwP?SFzTeC?8g&h zQ7u&V7j)nc!NdgTd#v7Y5w}Hhl63W_YV3UYx?);x%Xo>aerIX?Pu7{424zkJL=jY! z#d(zUD0%N<-YSz&e!efn5a;u}^o z!PEd%*MX;!5b_+IwQyGSXt0Z-?KI&wvjKhWgp$zSQ&@6}0Rv;$X2g zTu#pM_QI)uhNihv5hlt&;nE8kWKtWE*hUOJ|zY945US*dOH-L;7P(0 zqT-lgxxna<8bx`+Q`t`j{*3bR&1U?iwWyMh=-x+MWb3+>&4ed~3RJ)fM5+d2p9s(a zP+7;e8??{??*;GVz@xX_2Z$%4hRMhU^GNIV5}UF(#m$C;EkP$;Z&zc)7p}4cr_KdV z>v3XtvAl)KMdUkIns<3Og2E&yW4TrOis8a156lOHygM)J`sjOi0`qv$nEk?4-q*}o zyDIAEE$eHt>L~j~lBJW`#gn7NIIiF1O9B($_1T1vp&2|)F!xRH}4W$PnxQgDC(^cKix^Z`xNwB&wM4_ z@RnxZXEbjtGFAD$ao=D=>Mv%GOPA0Ip_N4OKsrRtlq5_03hjrlAbw0bKB$R)2kwfP z49=k;F@2KDv_i^5RA5%KR3%z3rO!P}N1gh)+tpLH4V*Vzdn^hA+=!ENupDbU+(Q*c zlyE(NapQ8(EVg#7SN{0t%p1+LEd;P4vd!sUTXClbrs-}6rPGCA&3KeCTZTQZ zwWWni)q&3J_obH6z5Atd^&waN$Xn#)dPsj259(H4PhRnnA`eUw>&j+!IH*X%#4s@4 zZY_UC%@W;GpK!w@U1QL>_WE6)?@aJAVWcU5*ogeFW5&#-E+kH4fV}% zp=cj73x*M%Z#exgD@H*v{G~um2Wy(2zO!p7QJ;#5UVgZxn%3r2MDT=SWOm2YlRx#&6<($76Q~h=;hYeSJD+w09i5k3YEiYj&rR>_mPWvMwud;c;gSnW(Ca~%SS!U`ee0Fy7dqA7E=8{ zIqV-lMyy@tT$sliDfa9xGF?Q*A5sKP9yw4qE#m+#E6~?go)W^s;9ljm>k@oh`qO>t zk6&B!651_U8BW9+1S=v7VNodRkbk?ir*u~Cwj6%Jwc>*B`jep923J*^E{bd!e-)F# zS=c7%eb0NN@2mEe<{*A{=19Ra=}|ZpODRvw!rR2tTvhnbIiDcFu<23nD9x%~p3rBb z2Z9=Z^GVhFZh#Hp#yKO0abgbfX*OukwgHMDlnI$ExnM>p;O9H2rdfu9$$tjtK zytmt4+uwLh3Vow9aCYJsCPH|Eo~5}%dhY7BrGGdBFr(r*5V9V8I{2&kDSU6AAISY5ke6~1(HO`egPB<2j zfyc7`sem3C&|q?S`1IEx#969ui!rV6DlG{4&N_BSb(!h-h93A@VxIsI#I#zg%F+xt z!FvO2?@NC%(ej;o)RK=Y|{7q_rn zQu9s6IRAt=_Y(3AP#4G?PC^8cvSdOJ(!40r;9yP}F>v~+^yJbkHIQX#_CQo@pLxxc z?!8A?-xF{>@0!nu<5AJiN*|sC*xlFzt=ZOJm2Rb@<=g8=^_~Qd-)vcI;$^7?diR9@ zX4Iap>z8-qkMbvC{wxu$AV=KWwAoiv75t8m@NSiwv%9>xzQ^h4bGpBLcsMDKX@!|E zrWIOg@Js5V!*u1RCcBV>>62*>Z9^v6VL5Hd17nkX)C$|+N~se0^8KoBN3nIZDi@_C z4f;cA(M$@D!m}njisna&gMTh|?;1r7W=MNCl7)yf)VOSG(W(TJm?Ufo;yDb%Jv{95 z(LUvqhl&^H8xadH<@;LJxGN-adiKKG3S3n;eY~?xPY!x_tJ6@%uu6WA<@P+?7=r%I zSs2Y}Ka-?#z?)+eyagGHVXg*N*Nm>{SL9eb+~iq14Wib@e2s@{QBQ1Z&c}ZHhi@xa z+_!fV>X04?Tr~lX2OjEWVhOA2inmhUR6tX@c1EdQ+A(8`5BKN1Uo&uE0x?~S|mW0<&} zcpjm@U6y%;qAlI;omwsInedDC_PM>`djeCqh6v5|XYx6jwCf)lBEFxvWg*#O}P+Lwaf|SHQ!Dd#*Co$=F zrMAxr{G*0$hFNv4k7J%tV)2zhK?&^K@H3mvks*;Pbv63idF8>JOAbd{gbqO#m5DnZKaO1t#%c9M@lsJsOU$W9dZlocwhLbTsc`bIM^MS zex0yTFvG_TyqQowvA~~!H=!;_#|(T-q@3sslVtE)uL`Lg5D$->NTJJ+_01& zQoL`tc)M*r8-f4Cofgw%)b2~w(9ChAhqaOD?>HsmQ6Q@OXU6_D0mHh696t#Z?l?2? zLoHdc(j-%QQe((Y+9*o%#EL-aKv2Sg>UY4XkmNv`<&?hRY}R<2u0qX%iXF`cQaYy@ zi>A!27m)JhB&4N8Y_r17INt;m>t<^!E>27>;t>~yqNQ`|3Rt1S;`8%eU`uhPJhG<< zcDs>_zKi|*&VTa=`ty3>KY4ry>%V(MvENjmhzH>RcfWr8`{`Gtvzsavw) zCXRJ}Zt3VJx+>sHC0%bFK_y*r0@C4l^@8=C=h~T$BoA0zU+{4A^>3L!0BxB|)z;T? zV(R+bEr8Uk$5ZFrUfF=N%QLDo^smQuQ&}6wkJft&5Yl(msEeWU24pHSGJ`UED-Uo# z-}#&8-8rr%Z*|kjQI*xq`OtgtQsw>~VV~)-%uNOLOpWC;?wcMMx3?HsA3WKXrFMOu z%gcUFP`j>pCT=*ib?}wfwVSLbsjVR;(SkO6v>^vGPj zgp-g0e5B3}C>Za0F!d>En#DESTfHo1%F4J&6Fs`|Hs3C_cXd0rySut>rCiN1g&j4M zm#7rNA)}qt>vu-7X)pYNPuPPI(omb1fEpc7!KOST$QX$)g;_)w90a2!z0Qw4tL>Z{ zm}wvW<@oylCN8(Nbnm%nh>O|dd~Br`VM|8eNN^tn z-;ULNZ0>POI#@55jxPS}YHIE(Av}3WJ`1VO#FO_mDF!Lg_xIG;eB3rz4p$yq->ooU zi=CZQ%ZV(&3L)<7arx0?AQBaOpO}IYj_Jf~4KFB$D#{cPT11^D>$(5GqG8V(FHAP6 z{^Su3JQ0gBI{HqF%toaz7^-cU_ivubtSw`nOri9Z(SR?p z$}1L4jIsi$tmYYmOPj!tuYG%h80mqCDSk#Mc?m=(yT`IA2Y-(xyrqnuDNBZwp#vR-*F`pTt-1b*@%S830 z9XkSJIZeZ%diGu}_33IVYKjKK3t4$;FZMk_h|@#MBOIU=%AI6l#3i*clwR+)_BWR? zkpfj?M!J#p-1nu*>MtUGNbtCi0u@^^@1}hH1-EtLK85cuMCa*mB3NdnQ`D#B4Zn1$ z?W@ctby)9;++0Xiv}^tnXWwWW-PRNJj|7MxSKdux5mYOHa+RsU z^PgKYry-VHq{-^S`G*7tOuA^e6@Fbph*sP{QwuYU4(VR4rVsVb#{~JEs!A*_5 z$3`*7nKWrYa)H%|x){xErWl`6nWd}35yVt44wdq)Y%7~s5)Ro_k|{x`MG>q=WYw+C z%+#EqMvpy%dggtPy3ToBR)?E=$6;=_EO6d8fuKehUyIxABebts0mw3OcGg+9RF=9m zl6v4AwE2E<1TixzxwdX^Hq+8=!~OE)d=3KyV$=FkX7vJSUtx}mwqs_` zOD9Q%7iyQqz+CJXj$8SjoTP;VzB=?vPb(Uoti=t3NKFb=K1;Jc@t4Zjd zVix=M{P0Q)wbkjQ@aaDGDkgby6Q<}WKmEG>4iqr!*u*Nr`i5uOLNg&9wm*c`MUyZ4 z#gT?KsnORn)K^h5Sn($`?$m?5!Cr@9vy44wTU}R%IqAT1s9(p7^I(o+)TBIDxpyME zGc?B;FMp31kZ<_$J`ynN&(Z3fxwO7i;!kj(4XBfjkP4yn{~D(79~`PW*2F0kDSFh! zIa!24bMaC*Q}-4{({hKJqm8?s3{iOz4gSC>I~P+Yny zFPVo$jKv_{^hY7huhYvDv>dIl2u^t{#F0hb1Dr0RXK-5zDu2*(AC}m83I1_PkqYDE z`=0QCjMJ&{B9YhG7KroYNL~*!;O$&G`dWe(@#8JWelf+Yqxm=fbj~W3AHJ=%$KOdc z3ZQ&XvwjX(@LCJYH%nO`OnAmkZ zbjk`%O`a^(8pVk^$i|FFyO7a7;FXZywz90C(MA(EaGQKa(O^NY2sTqhvRngWQ{oYR zZU3&3MpQ{j-nCE@M7$7O6xqvv&bN1SlfioOzg8XP3yvQ@9()u>FK zyZP`?l}k14jj~Ds{ruOBcxr1TnfW_^C_^I{za{FI9Cz7tw#PZoT_FZ!2!P+RqN z_JVQM)x|OLNYx3zH{T<#&g3q>>HGZFDJRn$vx*-@=AqX`LTj9Bi*(KYea${Wle+ua zbU8k$zGdPV568hA$GFqt-rr6Li-$A3)%}R*cYH!`J|u=gmX|zDOVSB7OlLgPcCv_i zRm8%2%d-ozLesA&pU%N%z45Kh^GuBoRRE+1d5Y=tAHtWHWhO-=Imsx57Am}tXEZ_1 zhi9^IU$0a-Rp0IwEw}hqL!V4Z6 zgUA?Z9gILo|6hTeQMc%`Yb}M{tr-k!*5#hnomJnK~nNycx$96_fiA)D6R>GO(l)Ieq% zh=K`gLXk6Ayc2w5#C)%FdBVf5C@;27up8GShzbTAgsiod*;=a4tYxK<Zwn@3T>tKi&CFK;+HCdX<@3yyYk>cfN0=E_w=T^Z2gw}SDBmSeM_iEfqUqkaim&sw>F{N(U{c|-L z(^Wc0Lo9^_Hm5dg9#DQ~^)KCrhd(;wr3zHZ3H4ppt-Km8>_Ow}TtAehrVcO|=W>e8 zci5w2dFb>kr`QBF2bRv&4}v@+nlao~2cbXPN~TgpJ?kzJtN{}JV*&k=M^ZjaJ3g9e z85$Uq&^LvM0);;&W#|D^#+&a?2Mc(Kg zdR}*2S39+f2ruep>kO+lUW`7OuzLsA<~Lz#x$N>%aXe$<+nxP{xpYnAb(0mV{l{VG zgU4lGuLF9cX!8zX>^Y24K2&<{sON=j!m>po^WvNFNbN-qPv+BaqWC%b;?2Qa^V4y= z2bA=}4=f%75ORK&Bo$f9ann7@PT=8+ic5FZ(S%7HNOw)WEOn{JX36z#kCc#<$ka6N zNDEYiAiP4)+)Kz6e!#=B|CUWBu>nJ&2l%M${x9u%N4`-wt(I$~wus+`I>tf^WhpL& zTtpuDT>mJPg^U!}zGDcnO&aI#n6bh8`mLZ0-dIV!$u#Ri_-+l3`Y62YDyjigO2CQ= zj@TxdO zA%z1i<9~Nl`<&3VXqWj@U<_bbOw2cF6$}0B#F&_6NRmAGONbz0;*Hat99;3L`CH!D zzMv;sVoq!fIiC2NrWa5t%AL+u`)HX1uMgy#74P*VK~JwQmdVC@y1V0;ng-yce1SDX9 zx_CNapJ;vbuOvUe5TGGg3BASa(!_hVbt?Vw)*==bipQ(7H5>OMu^Q{I8fKfe%hL&; z34q0D^t`Kji5bgS`L3<_OK8aS#hbp=^D>HW^PCh@2;jt$dHyYb&K{P(&COgz;{+$; z&zRquu#)V9md?9oEEQ1COkyjWlE)WuJfna-B&Rav{+NjKq!$NZ_E$8s5atBxEDwgu z;qI^Ve1ZjaegD{GJaDi&ub%U=A*H8PdU~DZmV)+qJE8F4lX$_N2Yq2eOB<&88Cm*K zU@pZ?YoWEu26WIo3vzz6i+yHc^*zRM75wnT@tcC$7FLvmnLfsXcXF;$=v^F<%233Z zV3xtZ3n|!6`RO}VU$O(+;|QgG>{%TY$QQdeEY)z9Ck{EB2J!wam#!Xa9$UKKxC$ea z<8k&Zr<_xe)$u={u96hq3Q*Vl*f#>5#2TyAOc3|hBy-(;`3P2&IfrJv9x5f+oG@`< z?C#pp(*8oQ4@=T}_x7eS9mA~x*y?yDzBw>q`nQ0RPI?*M%2Zx7j(>L#+QSk(}9NEj(EK89{U)7HKyj@b;ibW zqMZ{NlcM9AoEHCXT8^eTyzfD`1lAN$OIzqq^eySASsgO(%LG4r9{gR2eN6OtpyW2( z?kjP1!B0c}qUCq@{t{kooYKV`C^RM z=dI<{ABwk)jcHp~zA3~wv+x=EAh-=+N6C?$nsnOmeNLpU^J&iF&XPut<*!6W_PqPF z2XS>Td?Ab=^(}?$UV0MD#(->+YUw}SiyMRprg8b;Lz;_I7}ti7nBomZTS@rfoPGaN z@u9)25D}`f#bn?$(Cd2G2AiZRK9M29pIBwRVELyldbhLl6??ylYLn{)meG90K1%fl*Yv8CS2#1dH6xm#V2L$=Bc&fZN8xHpOJvQxoKNd zcisBktK$Q2?K|{9+5@_We-|?xBK|A|+@>G~H+vaP5?Lkk^P);ZrbiO&Khlq|Haf)K zae=>F+_3`plmbpoiY!{IgKm&Bg4Weepqe1M$cy*xVTuuxk+r2PCD0#s@h;`XLt^sJ zu>Lakh|7mXJ{Od-BQw|kOXf!)1J8s@?B&l$5$qAG(_OZOr^}aDmX1^|Y(7^5jA)RL zR(*;T_-}E^L3c+SnD`3Y_-~cgzwg(7YrDNmBIzSCUrk7O2lJ7UQ20Fs(>n3YwCEPF(%=P@{%ZsgoqFj5Gc}8V#*K@P>sOzet0P0b3Km9Ch!31q%0`{ zQ87+@1biW8sv&JACkMd*JcoyX47GrOd4CD;K?r<6KtSg}LO=tLkni8if%@M+g=)-! z{@>3b`rltDjzj~z^9zKu*jH6|$kR0>Fla78_%+wiF`O-0=M%YDL=b`Ox9HzUC54fB zVx|eCCKM`yG!1D9#Z$6kR6)g5$+I6tnhMb!M@%W)K5%q{v+K<6-oWF}C)Yk58`;;d zzU$}bZkYr3myc&P<3d)IFMQ8!_sdq5ZFkvVX}$0-6hGmqp#ER@zzj40V~ut76q@Xz zvt|9ZLinKzP3m<}Qs@e|p|Nkm_pXMwq%WWB<4+a5*q4;8w}B?E85ojVut zGgteSJkq_qzbN5#=*t|;2PWnFT)j9ElbN;8aD%0*6-@E+SaolV?p4Z zw^irU>H*RhUiAlOL~rC;BAePNMBZd%^Gp{tDX+^uJiKj3>R0f@TL#CMkj zfnDLZYhl{G$@uK$T;tOrw%2@md*At*tlP!`LEBM^GsonXllJGcqDxwfs4)o;n&Nu zRL(9uWs4#es6|~e-6moQG>(*i}{JiwAdLVSaR@#YhW z@qE^Dt`F|h*YVg(RI;Vb*4}uUKQ605ed+w+c>=VkLodcI_#X6jpT!v<|HxiBcOODz z+t@>LW_7n-zMcSSRHh$?se8w)GhGKh!!9Bu?Fg_$WhJPNr@!B_Ma@>3+Z{%Nn_v<#i;7f)XuAVpE zglwaZh z{%g(hhADX3l_mydxObe*OQ`X(E%b~$vqUB>O5V1@%~V~dY!kq zu?d_t?yOp>+VWrLtI+R4%IAEo4s)AQ;5+&))p4D9e#02%9IYTg^1>en)}xJ*X^DuH zVnxo%+?dLjf8_)RCZa_@y0u@EJh>@e{jUani|iy1CC)?pq)#7)A0MSm_IeR2$B0qx zt%A2*f{|H74ias+Z(eAV{W`3j?>(Ha79hBxbB$u>4&8hveIzFi3F@ZnmdZa+hB5Am zUfX^1qL0a#B5wzlE8e{X6#;Hob4?qZh1S!Aj-vK;lJD4_S42~Ck#d=BjX>jLRVD}t z3F-NRkM-`Gi!Xtt>xOi$=%Pq(uw%`21t&sm)A&Q{l39AHj}UkZZD1*3?H(2uI#81) zxW6Dw5gUT#?S0Kb;aY?c&9{y>WL#6x=LNl9XLI8OySrh-1xz7zTqRci?CJ|K{4jC* z`@7n8eT`n-dAE08;gvs{Hw2t%^`!S*O?G0%-hG%zPWTma+smolTckdC`=n+fW||)- zm{jO;{%|9F_r4je3stNM!AWFL|2gK$?Fx-u;u7r%E9(NL;0ONujQw9NTJ{MZe6-2Q z{`p^JIZx|}DQizgNs@d{yiw~5umu>6WubQ2T)dovK-CNiRM0BdX;u}|Zx^_fmQ1eL zsCatrhKm`RBpT-gO0fc0gX*o@IMkw4WuAv!DE+o$r#De_2U%XW9b2_3uUSv^h|w!q zVA8g`x=PNbg&ei7NzWT;X2R(v+tq(ieHTS0-jKbZ_`V;oH}UY=64E(^nSv&W2n_F^Su_-al?2Zy` z-Tft#wp8}Fh54CQlo=_|saTVy@@Eulg~XVf_EGM?5t^InTg=-j`Fd$t7)*bUF9szMB8`LW6a9X<-%dJTvp}zpI>lO?yMh-A z+`70$nnmTSc;bc$3dd`XL@KyMZ=-WJ^*VUzq)x6ZV)A;9nEf20G_<`}o>YySN-7$J zObQvE|4P*M^dE=S2~{ea_S4kH_ic*6g15IUudjqvNZDE7+6?CU#Q-0#*(!D+Aw;7j z0~+>nx63}HiC&7%-{Hj~9*h^;GD9=1LD*E>;w+nxXJeRLuXdh6q0LM~d#g;g6%JY76`0*A#7Hfd`WHedp`k zBS#oaXTL>fLwuWXv%ZX7;J=KO)CwTHeG8y4f0YATqfZ2Ajqbf<{fchn4)ZLJLew;M z%E*Q|?Mj=@nVx^g^f2>CCh?N`LmDK{&*jv3RT7U0lI1~j4*NmLt-I7o{;ezZeybnYmWBnMo<``qyyxkAK?4nz~uAbSGF zMY4D^ZAS^)0Jl62GK4b{9s7aA)XolNhha<9YWCx(c|eaMD4IR$pDXnPq6o*-XxYZS zHO>)z1F)uj+P$7Or@hw|8@Xu0u;S71fp1YNW)%#cC%yH?`|yinoM#7?)!pB1 zLAj|K?ghJ$rY9YE>?_Jos>`qngDce?Kl#A8=x5T*9`yD28i7AU`lWWQ#qcegvV9Q> z63oKTLb_Bsdvm2HxLxKH-VvQUv{Cnyn@f}4ePwtK*JIpvm<1j}BBPYafBMwmDfuEJ zi@X&zavwp(e;ulCZLjN#+W%^{F63=nEf-+2&f6PkPs5kw?zpcl{QLgQ5;32onuI06 zQLn+=VdvJ3KpWHEvyFB_Ow51iGDqurH7ljX?`?tM-eZ0_6(Yopsy>%ZOJrQQ-%Ra@8z^e za(~&ar0tWEFEn$P_v$_?HBZ9>GF?ZA8~AeI8ZmEA)^JRj>5$`CYJ?mmAh-XatzRYKoJOT2lXC z7f79A@T-lpz|?qb({j#ZCHl<87{wuNu@@PGRQd8$@ce zyY%;F}sY2uJ~ED)n!u6qra4Iq`Nc?fjGY! zC?%7^L<6!oi{dqZu{Muh48me-OANVIPTsl^b8XNt3Xar$bBFBBy=XJ+=40#^1*0{o zla}-L?REHno5uVi+e;I^^&8sZ;(~U}UrdMgKNsR47E(|=4&;gjs4Ss=Wct-0#I=hSw~oZKL|E(EkiW*9!c@G9l&0=15Ks)xjea$pkw2WgI7t+y#xw^JSJsKVG2je34RCfRK#)3;m^qf)`=JVS)o+ zGX7$aTi&mF|6I``2IPg z_~Ao>D4O8s_2`&U_Fqns`wukwqRY~>nboZtSOrMg%uz z(m)z9gpUv!Y#5SK%hG0DlU)Y3eaglQZM&B`Ai5F|(d9RL+CWOUW zDyMhnb*gq2ZVMnee+&~N2(=xtxG?s?AfLr)7Lg0daog7H=}@y}?x3)2blG0*DeAyA z-X@3$;3|CKfs|)r68^G`wx7w9KLg8&qbNG(7Q18a6(lVHJ>mDGcB8Z|l<;@yhpf6~ zY9D%drr)svDi9ejSTBv)p(=%iew{&c(jqZ6pI!+XQW9Kjo=WI7n1>ntjHrsRbnLre z$1@}52n_zTx4gA_4-4sJquPqqx*KUfvpxoqs3QcYt>>jT`~VImVp`f;Odp%pJ) z@_H>KTY9=lRUXklF?iR>`aqbSA1q5oPzRq;)Kv_d0sYrs=6m3_#k_TkFGrn{{6fM> z&%KwB%aqJUQclbf+f+G25^X+?5$tD5=w$n=as6q&fzQ};fW&RpBwnr-i20?^$#WDe)#0}VWstQ?dIS+uEk3>vR_Dv;}!YIARh2OxA8Oo)@ zYLVIr%QgpY2pgZx(s>}3>tQQH*A?Sy`pMd^`R{hg-Y~S08Z{D6zH+9$GmDtdkqeJ? z#jv{mk^Oym`m)hqVm-xOR+D-WWvsWB0zu|Yf{#|>ZrcK|*@SQ5hk`s-wMAK;e{jBZ zs2t0w(S4gar8fd@?c>bSd1BEHE#|~!wU&<~%kMmce-mAD?w~t8T!RV)rk=fYf-3_5 zF?fp^Bii1}Ut2Nb&1b~sh>(^2gLb#%sQuA~Wo(*BZ9)w<_5W-~4Zxu( zi2veV_Rxog1M9^uKYg02ZY_)F&ke!CNirOQucd zJFmT?+3-2si4u$vyaQOs5G!N72c;SuJl2<=6WgU5$i{&zW^K3gNEFv~EbI7wXBH*l zETmHgY?jEZ9{W`6g;)d@1jxt}e=7BAslC4DBdU>=$Q{0BxvhH`dfyGo#=Jp#>dXcY z#-a`Hr`wS>FoLz8S?&he>EWD+I7jBg(=ytR5NI>d`!q=zuJUKwjbp8Mc3f|!B(J4D z(#}PI{KS+}xYnFi8gSRr*;{H^T(wGhRTVq~#bWVE=Pt^&&J)KStsbo!=jFa#v}6 zL?~{)Y}P~5xD-mkY#yxODF*!7#`|RI-9dwJ>?ubJ%^J24UDqYkO7}iS%aD7e@e%Q< zuQO+-S7ee9&>@Y zj*ZfVDhe?X?BcCaoa<}jtOZO=` zF?)MW;b_uC>PDKLZE_iV!rm!RwRCMn5HT&-kO-GyZA&mBhr;K@L(P`1&Lqjo{6&?O zStTo{vv-+6nCi`rarmXvc`}Vp67G41k92}8oOjEm(rXPo#CeW6hRVG~y$(>TE2T}W z;kl_YpY>{dk@lr6-XQ150m@6AR=XrjS}3PT2&+>tzw#)#XZT7L8TF;CK&be~rvDpp z_kRJ%xhbk7x~u5m!`P_{BPK^8DK0VwQyWp9$uTE-oit#6t$-=3ev)S#kj-mK(VhJr z^sCuywY-*H4S8t~H{a%Ef*P`U#gGy=TG zK9m%W_yqs*SmrXrg>=BHU2O=WKM=HyWr?PAGzZF3l6!jdG#dr{%phA*0SW zy-s|qSLKP}ot7AxzNS!VrvLvfT7?WRaQGFidBW$c1{tW};@+E{?f-Gp|DT3M8~kWr zu41h3P5wDREGs%*pE>{l1&i?y$9*_-n~ZTUsiAdG0rWIOugP?81-j-2Ife4CZULZr z%Mt1C=-xZ`%|0BB-0MD7YiP##{Z2Jw!jm^wxm%XT`X}^_wRbq^+xkIG7iB{#R%o0+ zk%f}2^-AcYEOZ@WUal8EP1~2-526#|e3jOhhdBeiJ%f$`lKoG;!?R^&t%80)6u>-a zz88UR%`=&4Vww5zNIG68)spc-;?42GLZH_JP)r9~t;$74yn6dZFl*};id_npp;|kB z`)2z3O<^_wm)mz(0TdsvAhaaK7K6{x7mW-!BXFNx(rhs#C|kE97;7!H*652e{;qo- z_vHh`9F$z66YDvfA5dX=?t9+?+p5AE*K`{d{N3D08MsnTPxIVh9u)x@dPdpVWzp!E zt>2AvCioEw7ui?>qABBd#IX>i-$B>u8?AtTBTUG(2Yx1%2r~EY{3XJ34_^zg#cK4y z3xLNdD`+`3`^*ZXO`N7I?$>ZdU~U#IPGT3_%ES$2#~d)*0q zRpCk7{rUkwoRT}hd$a;u2=9(gFxuMy0Ow7yWjTa)qB!0wggC=7&IM#?4OqSFv<2(N zt&lgsM3LP$7d+NHAqyq-EH1ilzi$)#9z6I6m_c-y@1$!k4A<*`f8!DG0?s?`+s^w4 za4(&2&VPGbf~{_*Fb{qz1AW-cARD1wJ}-J%a;eYaY3{|+4FC{U43QoiC6mx@LpmYx zSy?7zsCS+$`wGxp;S}=V$#;*$`8Yw6qx=1`r(4dNtx6`KNiY1AA_M4?=PgJ}Bv&ko zGM)d%z3~G49w@CI4g}9V%oh0pw9ahnfOJFp`tGY>ALQjHU{cpUyjv;{JFyEjWbT{( zZXM5S=?VwTg6bTxiD)sE-mzY*l*4T!x@UQH?|hX+GHPk7&;gT@CCwJTr`FGL-Q%F| zv~EF9Tdk!X81NkcF18o~XMX(+aJ?=(g`W`W^78ocf5!e+0-AvQu72wURu);hiuPW4_!?OP;_z? z2&Q&{b}7joyU=_${j`)FG1zk7w-JG-cN;-Ymu}jP0nr+{L7*FfQAp zv-~VkG*FDVVzCEwzvea|HaId_-jI}FCt z9IL3Zzwcs--KnIbdSh0D331LAp#r<{ntHx+rL9JqX_4~Y!j6yp*Vevw)5V3 zht6``YoEhvH7aL(jwXFEDnT#DYiYRmsx}n=je3Qq%O9@p6;+2?AzEHPrf9p%)o`B_ zz4O8zyJ~6#v917@Wo%Ae-)D$Anb_xMiqKp|N#pUf>7Zpde~FwezJ~M_SaFkjy)o7S z^WtxzMD}5)C~kmptl&H)64vwt0M4Ho`;o0MEjYUj=aam$FQd#8z_`xbiw4ky;ikH$ zL~WsOD;uw_vWLWbw^H`ular|wRYtF$O07%>S02SSi}Pc)GoNDIT8%yWIBGe>lr+oICPsDKc|i6qmJHC#Q=xfe}fa1C(v?Qmq?Y|OdT(~3OEeMQuI>!`t< zve?1zb{RKdJ-&0qJq%}n6KBio$UxI*%>U*USLR7$`S}j5oqPLV-)**PM=o&;WYTv> z`F%m{K@;{aubY1Y&;b{0ytgSe<;6{r@EZ7e)oCJAVL=|rgimc5n)z{Bh81>P==lT- zUGq^3a49tjMe0nC#PyA%iQGspy`9<9CG}-dBGmcb+yJ0#AT9J0de?$`j$(U8S} z)>l*%_UIu7DzIz&_r0?D-=9(~Wt89^MQ^R`Jz%f9xs{y3>9I?K_GeM(@!WV`50#$5 zGxW|XtGtOChGK7JPgpD)UKRy5M_3hmj!uMx(uW`~q0rZZp=cj{w zk1u|xp34?Rno^$2Gg)RPnQ(DhpR8Li`_pae2^NUgIkZ@7;7Xx=RO_uOn)*kj1JX*{ z1TXy25>S)-UFJ0hEMehb5qf3< z{N*;Q4WE1JMZm}5V?n|c-2z_m2x&7HzbkR>Z^&we$ONg=;sPw%gefmecg8=^_{&{Z z*{YNBn5m_>*APzGjq5oVxE*S#U&1J3=wkNv%BRGlJ2F%wwo&j{-#4|_qI8ziS7Wm0 zltg&p5Tz{XR4f=Kiz&iKHX*>-Cf1co!y-3^ZQx;>HsUnCV~f-g7pZ603fK=7;WZFn z1dQ(#X)xkRyx7~2f-h$35^epW5t3)%_oypR>8KvbqfB6Z?e>1fs=HInPbQ2G)Hc!M zdo7rRl}pL(^H#VgQY-#N$N5bF3A@zN$X&ckg4IAXd9IrZTtKH0gCgGL39~(-clVcL zLLlDFjSh=W0CB9CDKPc$kWa3u{ic95^^?$TiNXs2nk84lUKf^ajsn^^J?9=qhOncE z;3R%sWTUT!BR6XLE&j-lHmU0JNibC2hk^XI{&>g@0FExv!HcgSkui^b8HL0?g-cRznk&H zjFq#knN%#=OkawZ^TaJwB> z>aEmPUc9fQ?vkuXJf7_Oe>}pu+1}StIi}D;foa3PM&)~aJ8H%pL^Wig86m_U0)M@| zZ_k$+yLIUQf{W^(P&?t9m(ex~2K&`%P$p2=mBxh-OGZn8IyW&ZO8zVdVlB{$n9m@&HOMpf(f z2Pfya+H*$C`+_FKAg!2$G?6B-GgH)KLkG5Uq{*7`NhZvaR)A5ng28PN)m)!QYD9?y zZKt}UMt*Tj2f>7F+_Js7=qVyF4>#hfKA2+jI}qW5dpRzNas2@jO9d53-l;bx!HwTC z7or5n-XeKGIvZk0+5Vp*6Dg`QT4_sd-&7rn);=rhrG;3%P--U8luUqC=IrqD>05EK z0UCHK_-~f{=J$Q(anpLF82!w>Zn+CIkr;{KzwEziP9JWxLKv_AEl$CmPV=XK4%~Emn&$C;3Gu^(pf&DJ z2Hh^`3-dXAjmu1=9rwo{Rf5w^SD_{$;ca1vcAO7i`M~>)?B`#mTSCRxNAtckis{I( z?6mkCy*qEDx}(Dj(}WDelPHeyFh3b;YdR}`O6vE`=v^bzTmwNDraQDE;h!@WM^r`B zD{+UN91b62c=uCCJ9VUpbAjp90ufO z#1#*zFuJWD#%~K_ql#W?!V8$MRe1U?aAq8!G$fc4!Vp86j!TN8%PVyf%nAr4Jyn!Pi&}u(f72^uQqdSvSP!rV) zO6DNw;P;k{0Q{g_tL(y+0_;G97@GSa275p}v9F$D(W0z)?6wx}q>?4e$_AB7ci5pr zo9R6LL+(a1KO*4caJ7NqE}%(j+DtFI-I;33OJW(FgE!5BaPNhX(L~mc*d=8wl>fU+ zA5*5dlvD?q-;ll9*uhF3K~Z8+`OzfS<*wpNVGT)om4&1EM~_E}5~kWi3F_@vi5G4$ z7d(68gxnG|-N0>1y-O#h;XO8KF)j{l(H2o$cZML$(MGNOdv#{?Fp3QIrP0yV?xDB? zG4g-*B%k~0$%SDV=f4#);!SAoX!eqFu+zLY{-=2I0@sZwX6gC{cJk`TA=t7X#Q?H_7tLmhFb zr%DccNxFCY3x97$rSeLR#uGh< zEG0P`NI~V+-tOuoN#a(yQ{l%p5$ky#6__lVXZwJo9{1|Wu%&QQTs4g>5C(Wox5WF6ygN~;-*>>Kyftjdm?3hJNQ7MB!XyRc5oZe_IN z#*?$j)Ds)h%htlh8{eZI7sc`O%9XNFwh023$no6hVksS58N9YlU82Lw_e#&iAhXd6 zwR}nD&*#hytDmf`^KbJQG_yomEo-xfUe4Tvmz!SV^BfUg{IuIAL!};m)a2hA47}cdJ9vc51(f}e29s)+caCH1d z^Oov8&=T_=ze#Hk*Y#GN)~~b!W%CqTQaRSAMgEGK>(3!JUcDpQbwQg_)#A*bE&_L6 za2OL{G=`n|RtcdwJmf*1e(f7XWYfqEx z?=JLjOZC|M&HF)UpFK95`VH_KJ14EE8BwG5f0kzcbOk&K6YuT0V>PP{AO}#{di8DW zMCiT=Em`7EpzFGHECbKz0wz(c)Lo5+k;O_|@Zdi5kgOVbVFBbK<#j+qeX(J(pg<@kJ+yF5piRkz`SazJ52 zgG&TSgy#Z!8Ym8Np=r3oOVC;(HIp?(5|7pR>^Y4k&qMxcao~H>o8aEmSe>G9seXKz zKw)CDZsl|70*5Z+C_h%eduqBm6A>9uwPG}e5OBGE{*eh7iH5Q<;6jJ1+Ptqot7LZL zZMX6Y;&iFWi?Ggv1E#mpqf~BA?6FF7sFhQO?^c}@D!523c7dqfbI?a5UJryI!Yz#K z^uKzF`CgH+RN_gTrNq}=R3Bqh=#$d}Qc$|G^}5Rn!f2S9&7Nbw`9k6s)4yDO)+dvj zK%JQS`$UhmB}{^v|DZ5XR77zdd*F9!i9NA=-VAp@%62{VC&Vv?)s^^0b(A=4!%s$L zj@)sRAysJ+Uu`gVw~`wF13nRcuO)$&GsUF@Q_}jpN`p+<0$q}tk-W*rN%T`HSyu_)p#zTo z)isg~U-{MnKoNj|}N*T;JHWSk4s%1OtR!{zt0H{I5L1|BfXXz|5zVoQN=A zGHcv*@~n=Z`eIWovxG0Y37M5k?lmxzaKvC@w9#Ut!N+eSJxigfD6k_ zgy%sY^Jbii9r~`_LoQmkcruiUdKm(Z^q>O$%35h64><}~-ngQ-}BnADMEz1fspkPnw;tr25 z%seo8v*kz<6%HkRd{LDbxa?JffnEI&SEK)D4qQ?U4Maijc@HuuNBiINhhB$$$Uk6T zbcWC^lFTz4Bw%zH3C9!V?slC=ErK90WD>@{;E0UF=P{ioIC19rESJzh)I6XM z5If11vk3CY4ukR>t*)5x+TcO~n|ECL@~BKpp^5V~(|iA?M$gz+@@~5@+Wj*0j<4%w zK=7IM(dss{Shu{}eSC{Hi#4uS5;L}#E-OuBuqgTS2QolNcHbg-P<;`ZgssCdmM@bS z0K;2SNkBDA)rLTRD1bo>uN?&8h&mp9f2F!Kqx0g1XIM0b-28ppgG7p*_>G}w5L zFE}U&+3d`SoR7iA$`z4k$~+6ZQKL}*#O0O9TJhl!lH6#&jn0i4Qw+fkmA#A4)trA; z5qkot2LNuY#sF~RVKEz@nnI>1Ni4A!SF_W=M9|zRkR_iMOQm)mNZX(*6*=lMqOg_7Sm%S(8KhhIml4Lhb}Y)^5p`ni!67$Xet!QNs9>{Bp&Iff}jw z@Fs~bqEozCKZQ&S(LGlN!n}Fa8L+ac2iAIDUJF7D58`98&}98 z<~wzcu@h13U9J)1~j5zz0>Rx9U})W>|r z`pFW8{jG8)@u*j;v6$R8(soV13l|mZGP`;8y(nJFDq<$K+%To^_KaYR)!K8tVokLQ z2WVo5wp4{&+7FA2yuA+vHSH=5i~?}Lcb%pWkRT=8qld^ZnXAjb9)9i<5333pw*qd8 zGy;lSbUZ|DFzN;nPp%&KKSfcy%11{6Nh1QxECa@wE&&iN;BJ}EGq{t)XLOo>L7=Zx zJfyF2betX`GAX%7mGVRBJ-LX_r6d1XBP3W?8KMl_FTEm@dY+4|PX~TYznsc~m?REt zbka2*S_nlJ?(s*uauMGM`7+I_f~GlGB;|3HtpZk`k`+bok;pnRq|opO1>Li<%d+qF zq3hk`jxoN;;TK2r6#|xtD?=S771aAE6H2R6q?~h~bjJ^(nw{s}{<(ht7Vbp7>UtHM zcYXd%!r;MAW-Eg8Uz|ZPm)S&HX>#y=@AA^ZhUxyY2fl{b$1!I895}Z8+?b6x#UDs>?EA4A!KG4iHUMM)0FrX zz>bL4tn;wOp^V?t`|v@9TjcM}(@1QWK1vUDN6g|W58@u(f1ReYJvPS~Au87zeo?|s ze>3IC|CkI$%0w4>1F$N#q(1ZAid{#sjJZ#dt$Ha=7SaNKh&uN6$CketI_T_-N zXgUJ*quI_easKA+RPM#mGcv+VP;#h4dvR zspESOV|XFvuCQtEv?F@-LxBcgM4=krVEIwzxNWNmdHQ1@#GEVCT!5=B7a2d?Q{a&4 zGH+(H9$ezd(z!BSki5*HZ zo&BWA6zwwa*6do>ywyav;S=JxjL_fCaOlYEFHuw?u+op5w3jeg2$MuA4LhhuW%&0%-2_DuNt2A^G@+nZHd?K(JKmN{SJH0b2e5KNL--q zY+~^y$ADz^r&b=jI82VaxZ}U7v#)77zK?sK6wOXI_E2m|)ke;pb??H90It}Nd2CW$ zx%N#`Od^jvrJBV1qRZ3pnoFgbFgp#*j(3C0{FVOhhvJV>Jma~1nxeVT(}s<8?Mk0h zD=TUmA7Yr#Dc2Li*$%JUk*pyd>L;hRgm(jmpR7qmpN&bcQ|vkIal>6r_abq=(w`K+ zTGkeb$*r9Sc;(Ce0LuAfoQg&9p@0IM)7I;8bv=)QearA~Ts{u1Kc*tIV2<=2=cQC#)?fbwfg&7J=O-!M@ndOq+dVbK$7<#s#WA; z7rEfix$AF?3HNRGl{U3_5d>%7>eJtzS$yWLZrGFSGV30U(7YxgqT%V2NcYeya}&Y~bq{ z!G18{gnxa(NyQSdYSjE_Ov-Y)>TTD9!kyq@dwC+spFK_j)hyEmtycKNsqIsxt~V}#f0k|%kIg~ykEB#cPOu5s&woCC4Niyo6Y zCy~vYb{$xB%Bp~sGAol}Zdnv1RGubGbT2z$qIF9XI3(os4l`%377pJDCL26Yz*nmM z#V>ShP8rgK)515oXmwUPelY~!C9FSO65yQvonfjsWsnIYeYrg(K&eSG@3gG=U()hE zP(Wq%CQIJQK0>nh6khtFn(uFyo|D|TkaTrMV1?Zwki?K57N9V&ZU=m$o7iRn zNg6~vN+lQ)L4gthEzfR(c^Z>$E}lt+VoAYD7XC%8HBcX;9(kEuX+rAo41-m8gVS6t zQ#Ff9D+^{^ETer*WAO*VBGDK3nuZX_(KacPU0k_x7UB8 z2w6cV))Rs}6SP?`KMrO*zWPzinXAYAq)D2DeMRgkgZmxi$sm??Q_*rpD3Y$l`%%)M zK)vH>j{me43)*@zxH3)-tQE@1pYg5@(bNDZymB(yh&yrYkXkjcO6H+4$b-dhZZ~`s zYD-++;bZHdgtDGa#eZIG07NEhHv#?H!c>IN^U_2N<s9(*fKCDQQt#YCDXPpMv4T=ru=)#xh@U+PG0 zGk|`lW)p(spo9rJ0&~?ok?4cSPlQV{70M)BnBd`;qhz7wavSE&#sh zNf(DH9gnK_nT29UU6_tSu`rc%--L(=2$=7H; zN?aZV%xm~=eZ7()vvYmj_AeoNsQiO@g<-?O#|0fHIdG;P4OM$&^C%q-^{3zEK;ELT6*$kyi6~Rq zF^ceNX~1HpheTi-iB;iz{O7$77;Indw1GYdj9@}~y++$i!K}BJ7Z~WZnH%K767O0S zl$XOM*NJVqU-!~(EZ^#j_oF!uqD6$9_)rlhr&8X)(zN?)xkb(spJ@1#>-E%c3#6f^ zW1qXFP@=S6P4i4n`p{8fN*KP0@@yr|nd%q72Hh674Od&uh*Zl!LkCU^5n2`p^~k)i z5I0bsLU4dc{6q>ib3Sg>m!(NJRJ_me^6h=FYb`Jl6yr?0K;w|aDy>Su7-7#HvHlK| zX8G0qBOWL?5S4Fm(yCoguW&!VY9{aE&9OFr4e`=>gt_77M`?LIL#h|15v@jTy`y0; z&tFZRANy{L2E3rZKm`yk8KeBYRwmCxZ@m}HBgl-08equ(?fp=ob4v06w()$UL&=nU z1PA5!xlNKxoj{Gyg9vHS@IT4o0!+rco-JJ_|tc% z{s7p`_!CUR5G2HJUrDk{lua7JS0mJoa9JMvsXCCOrNmXG`McMq$W?_z48fes(v*}h zPYWIOsoRY^F*BSi7X9pckq4T%PCUWWmSzu~SYsQ;{e zUZ!nZqWK(&p*Cz?P9-3dGJk09xRaGoDFFgelnGxkP+ae&`yWj-CJe=~1h~C(XK_DW{F%E* zeOLrCl?Xi7Gmj{rZ?(1W%(Ye-dISIPzRq+2lJb$RAn|34Z z@T0vY;U;gB*McM&X+nB$6g(2*N3$&pw=&FphZNwv8z)(zh+V|HC}&$K#!(ab67(n)fC&aBRNen-n`#*yjjkmVvY& zRnF!=ZGaN3_fT#Ng1EdUC>PJgS4TXMy7nfCq>c0ie5+`cju;KfR5Jl-cst+8=MiROP z?7&)mp?>IGi$%1jn!TAhT*xWdUMM%`Fm%`$;+g~PC==SxER-M9N;$GAHkFAl(YwR) zJGs8hHb&=kPjfcU%*_qP(YfC6ew6bLGmhacRAXJGP>x3)=kyCSC{+DN|Dd31E0<;Z z+ihFn*&nTE6tIqTX3!qI-<~({LTwPe`AWH9{{w7`;k3L~H+z;7PVl-BpV^^AMjjIp z$BRAsMyL&HIuYLg(Et$5D-2zB7$6x@SrgowMfNiyZv)O8_fxKI5UpQ)sK#~~;G4uu z?7|-N=i5(yIC-~gO&`UD&?A)2FaD^veKWb5Q4rQWAc_rG$$QHzO&u79o2j3JH~;;^ z5U~zdGB}#DA5ci}fI>pplBF7m=3_BYt(R|JL}1$8lY)0`bWFRW3&ri*QvL?&LefLO z`46p}Tv*T91~+6rNJ_cGd3a?qcL|wC%wj*3RXG#%cY%Vv_lXs*+_s~Hl)SXbQ9mIE z9Pq%JX6;ED0D5NDBNZGSVKlyrfn^mW={z1F@ScJtkvVnNjSX^6C}inv!7!fcW#8sX zI?AW?e0dcHaz1R#|DR^gI~uOG-NS11-bKsgRicZ|AOxceB3gnFBM~J;CwlMEdoPJz zq6N`=FBw7*osj5)V9qmnPu_grI_s=+{yA%%KdrIW%-;LiPr2{kb-8Z?2q_M^*GZB@ z-S=rR_UW~L<^;fMb z=(=o%1QM~48vSWq;=OTi0DuE_wd&S_QmwdMK8)MdBfT~GU#o*S@8@a^?;_4zsT~dW z9$NHx_uU9Sl}fuM!lkGc6D)S&ou>nOA0~Qh^MyeIrpM-pl#wUqdDNHTfc&wpSu8Edtr5}JHqlZiFO<+V zvBMp0>74ZRg=MJ6Gu)Ko(|^_XK&!P_5S|<8&45o8?6YZ=dSi~g!!ocSIuwb2HYA>@ za78_`c^GO*CPsk?;rDDS5F;^0Dybaum^%~B#ba9emXFPh;tjToKcJ8_M~IXZF*~yS z#e<@~|BAf9RK=0%YEV)LVWFl`8riE;dI)4C=MQy6QwFUR;w%ek=+V!8&*}x`xmAZP z74%!DAGtiDc%>siUi~>JJ>%%KWi01WqTW-y+oG5<(_b6rY@7NQq14YIS2{^Xah4jJ zUO!7?=|jz%_`nl@y9kj ze}`bH>cEv-BUC0J9rviFK4Ta1QS)@9FGxw(RJ|A^BhY*ri z0;5^;v_ap=f!ED*S-f;yc={oQ`aTFf$9xoWt|hIG)37_4`aIpXUVn4RInc;Y*xFc< z@~KB4J_fkU<)5_?Q1;A=Rd&hsG|e~Nw^T6a!+nF>D3&Ho+V}T&H)_T-DMkaYJSp&G zF3NEdg~(6K4$jvTyIn~!3~t7_o^~=b;fVIWj#aS3B?xFB$hmVg<;yN8s!Tw{7N5uz zCAJ(=*y%PJe)e>njP!n*b6+rc2Rw5H9Ii?_5eljyTE(|);+UF3>SwLE;N;&W)oc%t ztL!qN7uWJpnD+vyOi4%AXmE68=jX;R;sm#zjIpjl%TYWz@B}xst`M%!{0xqZB$P?i-c~;Q zkz_P8@-mmgn?C=eBQ;}>k|IrbS{2t5ZO`0g)n4yRbnE^Hoi%aB&o?f=S=DybVB0O! zk!72H0HsAjQAi<%4svf9p<;HG&x}f{r!pBOQdzqQop@}w?7p&lv1)mb*u8EsLPu^a zTKar*2B+P#7}lfCXG&wCHt$$^qFCN?G8?vj6G3hJcz0uX>lrKN+xml?G@7{svD+X$ z;&GIApfm{=atQL3Lj~3u46P%}@tf)1wC>}xHZ`ub!|`SC5e@4QMw@<>biLG%S-*-) zBV|Wm?xlLyF{g3SVqwqO=7_eV1!JUAP1O0~|B>bNhu)K`{hh8ql_dlj$|jR2c|#|& zQOlJ-LS^TzoBqBy?eIO0WOImjPU%4?s2x!gd`*Z>lf48oj-;N71d*!Z@tY*|pEVYZ zPhRT9Y4S)hA`KgBwI4`l3M;#Bwz+JnrMeY%T5?>Z#R^Rl_*&|Gi3KH!aMu(itOu~a zBW)E4|4pQ=EdRfav_%ag{uhxp`kJ`;9{v$|GB@~FZC7qdka#C|duH{(7&RB6oJe!& z2Fs#qU0HuQo11l{y|>=6?l)hA@(tqBkGsF?GJ0Kk))}tTC@&p;C9zSBs5`x)=b^D( zeOFecBNE+(wUV-U=BQ>yg47`Pica^2<=y|dnW07swmzxQxQXY-W#hriCM$8Sh#0){ zdy56F@t*Gqajok9pdtq#e+0%?vhS;oyU1a0a7FE41$7>k2*He&T<3 zv|QC=Im1%xlq6Y+{q|3|xl!reI{~DdpKtQxpQ_ml8WP|@iuJn=@4ctC$g9X`NyUGr zDSh_DO^$Ppo^j1IuMu`48ipQWo4)6Mcn(e-Opn!I+Y_TET@!3sVs@;?WWD``>IY&N z#-rEMLuZwDx$8+S!z^!E40Ul_8K(<_M&@qC;Khpal}7sWZ$*j~OZ?oKwc4>CC{anc zCsE%&LoW+)Bi>;Nyp}hYPtj1h+Y!p1$w9pBSdmn})BpL^k9nI)YHh==BDt)P*0RhK zUCE~E=FHH9P#;Xn09MklPCEQ3_BGvlN=C=6SV2c|G${<;guU!i=_eG4_h(3OGUog> z*hBm^yOA+t$&6Or2b7p~`YdFxL!rcMF^g4wsTS(ZOjj6jm2?~u3z00t6B_31cs6qy zN&+EWSz_zS=I)!dxf_uSp)xltBRlgiKm-ot5~0tJKs2E^%#vs$e=)>|E*39NhZdiK zHQq1e8AL?FLl2iNXAMFcX5BHhd5012)n-nx_N|jeZ=|aNEvsYFBn)2^uibWw8N$7E z^B^zO5ZXP~cVm{^=1#l`yY{uqKt9i$^nCJb`1Q&CZueIz>gNFq93^w*8rBWMSCbLj zdps0aQ4>}9|m!UDC_Zd#*&aNdvd z{BqNDtt;b+Mz(a(7m+YWwdff%2BiOtp*#Pg+|;E#WsEB*tHqb|;NJLgcsTh9fapSD6cXn^O1DfQ~lb^sVR%;Lk z)-HsvrZt|QzhUojAKo3})-C8&uyj@Xwd*ggzuPHh9JKB^0qQTkt#l$Pt(|fv0f(+F zw5YJE7lXEBpikO+2#)wK@kJDZKjiMhm7^{)yzz$Ib6lL2G}9eFrtpONvzTVDAqM8L z>cvhiN$VyB1df2Q!1uoI>^7OqDF8)+35QTpmJE-6Cit-5<=ZpJp+5!lX~XFap)IK` zHe{cohw}y2d+cg`iYn6`va%%3ywIMU=Ry(mEkzA(9#j6vQhVAgWV0{grY6y-hI`rN z)TY#$`#Y88PbO>K?h?(p*;k+&*kbVE&2b&AzJSdvCMtt2PCJ`JXq__>SpD_kUYI|6 zEv=h17st^^0H)_wjw9~-kj7MKJjvs0lmqEsVmCeQ1LZ|9RY}~n+3#0LiRHt@7Dmp@ zDsyFxvZ8VZSF;(Esyh5`cT(b_Kr4xZzMC>X8b{q+S}#x1{5n-Ux|iu|zW{oocBf$M zh1rYQPeG!Ork=tSg#)V~aD&Ge=H-SLRn>AEINdB{#e$RAkX9UTI?jiug6s_iP1Bp1 zyywEGN8P$#sf)jgS& z@#MtSz9B&X=D*o8QQ;`pEgFvG z^D&~AC{c9Tu=HcqvJh#(qO(SLL4EoWntc?ZCWV5<&US&VVnc;WX&;aSz@W;0r_YNFs<*3*KNA$WLw50OB~!%Lk(C=HzUQU(d?K$ zK*OsGzns;3|Ezz^RKun#q~|a6fV$2vhpo$+dKNQm-RjX76pX}OAgr2y$GWPU`fH!Q z?qKSnSMjPGtRXc}Fl)J7f^x_AzY8I5T*jHGlZ!oSDB;SYNA zvj0M(sDFF zANeSb=*A?f4L(_=DZdFdkGQ0!+z}p=E~C+RKjbpCt>x63!f@2aBOZ^9Y>hIX#I`+9 zc_UB%gxy;`z55RFrwG9}PT=F>JvXKkKn7*^ggR8OfgHOs+*CE0GUiI+Q`N zAB!0dwN@$!;QSm41Y)v0xIxX-{LwH^mjQQmz%(ev-Ij!=6-2~+@AB41Wd!}ay&r*7 ze+M}A8$Xv3ey{fc<5%-n7@uNL1(nEcYL!Zl+=MJ z*bt7X)Sq&Qnt+`d4eJI@l++n()xuAq3W97kPu^7%xk`ewu}?y32vrknjMLpE*zNVE z!=UR+Ja>fZF^=`*YfK&wk`T}GSI|e+O_mls^!N-LEFOowyk@bm$B^*y*AI+BzDW=F zvs6FGo}Sf=lztBVSuk8g;B$uqzlLJUSeQb1-GtX}z#XV(ZZ}mwklr}k&DN7A7CU8^eC%ln*Br(5WPw>7_-FgZU0n3}>X$i4d zHLdEbjlkf3I*y`;dH=lu%3k9}{6926zef7R^jrKeCBXCIT9LG=U0}RGl!s|pmbVkA z1pT2r_F8fGPFg`rP(J%P#B9>{Fva@bs@Bs*J@K-t6w9?D1!=M4DIGnpMgz0o4c5wY z$Q`_FqIb&_xYglLw|&iN8NCGAn@G~JS!w-;ol1762U6L8+k?31(A(Gbi34~!)(mY) zhqkK+d9Ta7@!=Ono<<`%BJ!#}3q<_fZS}@7ZF2@37?mIrB$JOpkeODYZ>#jJc6tDd zi#<#JQ^)&>E739w$`o}fBA7gb@I#4U8_l#`xWsRzkrEa>(1-O}yTvEMRBYPa6bc3$ z?&2zRBg(l#`tc!y2_xIa0=?z1O$W$rx34qG^6|KXu0W@GBkXXOcJrXT3$T}`AVjPx zmuz{4twy$GWOmDFN{Mxw=g9qF$(hs zElGNfsaO%iJpu-2n2afZWzUkD)f{!e{p$V>I&t7)r4GloumDGG2STd4gAfv)PN0yC z5_2zSQN`RJ!1EGTj?MgI!v&l(Wgu;+5ycNN@o8~_6G^eeEf9Kh_!_I_9T*}x`Zk4K zL%ZSXeB0j@G|rSm#8&%Qjd-~4&bU4cTn3$?+Y<8~CaUBp!hVaim&t=$9Uz6yp)P|a zslo3$nFv@Z{tP{SE-VI3ON*uaI1MZ6HbJs|z~|uQ-pzVTkWn-hUvIY6PB$C~A^Si) zvOxq{#ISxwGSd9|#kfMlO{FnC!*Tj$u>q7Or~lqLK&qd8k};$^5cxA2QgJVkO;o8< z0+T3Kh_R!qAUh??#E;dd2>cCD*V_IT9z2cIb;YqEL5*`Rld~|{`D$!TA1AC0dG9Zn z%rBy9RCAogwPl}FF3Q?B)9pbIhXg}co%13^9Ii7(6>uYP>6Hg|i3e-FF=r*`B&zyJUE2M+qT4JQ`OU~?Z6GdJhxe9f|X8Lr_@PJuF~p_0nF zXh2)+&d3B*1k#{&A7uzVcpcq5-SWM5Q&{EfpMN`(MTrUj)q;?yU-0%U-E}z>Ko*Aa zS4+Tg{KcOnQz;hJuiAB=ZpsAYy>0Es2VgVOZe!;$7iv5@^!u{giX+>!sj&Z+B?Gqu zPZI9rJx?|&b5NE143hgu5R*hz?@<-Ct3w&&OKsv5ncqWGKalKn^6j=W7fm+5l!YG)LMy&i!<<(vgZ zDGCGk0=t*(C7rn8Z4{>sgq8>0;I$X2Z&VkYdju{fFIw6iq!N%_?9%%|qRmS|*WWV| zG&=ymGSwB7ZOe}8TQ6-wQS3O-%4PuDGqVWYzmD-j*)Jd}aD-AbgMP(#6bnaKHG%R$ z_>Th}aL}ngDAfQgKBx;6X>+q>z((-pPk?nE{>JNrfW4Ou@sGXt7tXc6rRgoaW}&gYmRczH?Ip_p4AciJBIw4!F2LOUTY!7L9n_SK5Ew@| zQk$|WcAWU96pCcr_$bqF0WK3Ek4;eVJz;W3!xj)6LyOe5S8 z*VZ4U3`n82Qgh!#$runIXmB13H8};|WMHlq6*O^O3XEXjC?y6W>6<{J?ZM!-`Wofg zNp;IFTvtUE z{t|D9Y1NerBlP`K4>EQ=gE7~0f|)osz{;Q!4>0$_exAwSuOiUU{)NTOsy}bpLK9{> zsV>?*XzTYv(DP%7vBl-<#&5mjVW*2$zHq`0o++t>J+j)nv z$G2lt=`E{6CY=uG8)#nMU(Ghvl#j%FkBbj<-O+ufyVuyRTUW|z!lNf(Im}s}slQig zM;6nDSCc@If^uAZ^%7V=R63n1lD0UC91IF^Jp^qFNf~*0?@`I!X8rsSSca#>KvU>D zP<`-0p&&fS-QWb2jz!s>TPgC~-!ubzo+goG;|}N?k&N afn;v};yKrKlgAFF?^jmPkS~`r_4^M?gl*~o literal 0 HcmV?d00001 diff --git a/src/crewai_tools/tools/nl2sql/images/image-9.png b/src/crewai_tools/tools/nl2sql/images/image-9.png new file mode 100644 index 0000000000000000000000000000000000000000..87f3824342c3aa28496ac5a44f06fad67f0d36fb GIT binary patch literal 56650 zcmeFZbyQr-(l-o*1cDPRxFu+C9oz}-9^BpCLvT%ScP4mn8zguJcb5QzOK^v8IOpEy z-g9!^^{w@+^?iT6uxIT(-Mg)-y1Tl5RTHi#FNumofCK{rgDUl2Oc@5|*(MAOtRCVE zXbPg)b6n^b9ZOMBMJZ8HGDRnQGfNv&7#Nz*u5qn0B8J$#{z>Eos-p11pW-nz!vJC` z`e(sG7KZiR=T-zMtsm^gL`UbxvB&f=7={=z>p5Nsi>r*qd9Nxv@oLc5Z3lOquK&!q z^j&^tOCndCfi*jrF5zMaZuz2l-VAQs21YuLo_$P<66+lAY^cuy6Qk+ahaR+`EzQh# zTfb}9;PC(`6!`ex>d2eZ>5hsE7JO`Gm34ssM9EuJ?Wx~56Q|fI$!WgpW9D2vj8~Uc zxLx}z+95V;O1V>^^T8xO+j4DavHl_PyZn*VRQ>9Y*x~Nd{XS*SfdDYvUnZ9hKRyGJiB>O1?( z$Mkxb;)5>T(5N5h-dAqqz5MSyEs>*+Hd^+M4yeNC%hk(~NoxGT7m_q;0b2+;d0=tnUJ z?%%o3Hglf+`x#d6=|y1`Q7I|tUB%eR)YQ(|!rrB*HwRh=9KWTiri-SWERV6hEu*1{ zy^$%Shpod?5g0xX9_XX3sf!_*hpmmBGmi&9#UF3*K%bwInJCEqc*Vtb1c$k>n-Q5}8*%xbo9@XYR;xkqV~4Xf-VC8VavY?|99r!3;xlh=6{+50J#6V z$^XjvuaZwg;8Ae0gtlh*L_+}`v1v4o=hUROW>-b*2tOA4Pwf`8~nPKxM@-#aJN5ec7@;*BX_hYCjq^Y=?} zko?*2sbs6-aEgPih@=fK{-NJAn{oe;it&pAmS$7$Q>;$VZ!`#i!FKNdhg3w5&(IvE z&c1Ttf1qmwz~moN;X&_VBAW1iu226%zhQcaSbrNM^gX>dFg=TrzZ|W9=fV?v@PFeC z^u7Pfp8p@&gR$M?y|6pYAd&_?$>3L!F9d>Z)HtzU{3hld{?HkKXUl8k<(_u>qp&)0 zKzex8m7IyU&Le&^&_y^OIivHcTLp=4CGzOU;fHGgkFJlz&G9q12qDpdf@Y?eze&v@ z^(tqN&u9YUBvnzNdS@p{nJ3Bb3+n}GVdwf=?M7n$m!k7lJz#m-(L3Yzh|GKyoj9+1vUx{=hu8j|C2Pw>kEx6Eq;(PxtXK4*EOARH4-T+)8|AJ2aIUC# zO=hR#8riD#Ab~a&IrZOqkwppZ#a%C+ghnwai(6YZ`@xsN_i=ODYl^~92z%(>ta&j1 zgDjW-x0M#vIPVfY&yw!rpaoi{w{~S^jrB*`TC`j6)J_q#E{A6>hlIJN8mfwBQnXv2 z*hq+FTdBpZJzw(|FT{)waQP_gAUH*R7ua%d;JYHIY3?s*NxjY(&FT0{hYVEU>{uSA z>1qLLK$_YJ3;BjN*1!;j_HTNKMmM#&tKZB{D#iDTR8=*#EAOJhRq8iJH!I^IEAu00 z1WZzo3a9Sl;NX_xuR`tAu_mPQeCo6nyl`s>Vpel+YzN2h%=qxa@Gp;p&hcR?EO{;? za~3w`98fQsoZZ1eOsu7mvd!_SBQH^qQ2w%7aZ;@&VV%65ZpBQ_@SDojSL%wk?$QW;K`Zj(ikN5Vy|5`5*9k1^+Mrrko%@ReP zwu;5Af%H=5+nIQGe52RjS5!UW+fOpj;@nt%7NE>IZZ|ycr4%!7-W*~REz3&m^VBjt z=S0h4GaZI->FBv~=^>LYF?<1!WaW4LkSxBFHSL!RMiG+;R#vo70Sh`sII*5UnmrRG zT_ux%Ul}p6F=oTY(Kl7W3nhH2?xbk;h-T4nOgZ2BF|3H^K7xan0#vTwqTwcfo>Emk z(AG5>M`CkN)}ZI+`wJss;1UrV#aUu^mLvvJqJjKe&Q{Xas58Ro%C{)ClG!*TpMRF+ ztl;xz%WvRKj27!!E=Kdy_!;OR?Y(|id8**INvnO?BYd;HqhF_Vl>WdHQzXDG6xo%v z7$wro_EQeXye>8K^Pp}bXfM{nFTc-biCZqT(ZTCimke!vd;Y}F$zx^hvqdUkHPBAb zYOqb+ADH!1-Z7~Sq(&DId|l`=gw?fC*HIK$?W@TGrL20w3cx2;^_v!^|6`T@4mcD+ zi9Xk&dSEtP<`HD{(i8~!;jF%ICcvXy?|7;%XY%@yXoN0r#{Tg$=Y8DQ;GT0!g^}`h z`7F|T$0)yR3sSk;ntq+ZdIOCV9TBFLE8;2?tP$s_nVQ<6C0 zymZTr#!bCz2emD=+)P6^Nz)9=lD@sY{KU+Q-DM^WTtM*_`XCgncB4^o{=ajZ5oOY>q`IaR# z*vh65Ihw04CY6!V7iOFu&GKhZEsw{yjxU`JdH`fPzb2dpPakNHQbq!mKG^%J!zfg# z+jgiN*n8O37Ueh*kn9NRxD8kHIcT#Ds|;Q(a|sIS)fexYFeg#h`eJY(ImmKYF3_sd z)kTONhflCt$?9jyY3*oH7cNH0YWs!^Ce3J~UyMBTyo#+@?uU*~WSnrm$%MKq?hDFBC-;K9B5a-nZ zj>EFhEfr{yx#EJQTHu$!R~Mwp#aAY&gOv15Lg|}C6VxAv!*(EqxNnv6Xe14UD4VYi z+tTuuy^W|3+ITezun3C?-CRH7);~7L4jt|8xEOv?K@i>@22;+d?{cn>kt<`WT>hMc zlha{Oiik#{AEY0X$Wha@mCRzu`M|~!?KA&O)M7UAWdgIT#wU$?_t+?-LeLn0hU6L` zXz<>1Qm+=7K7Rj2*c>LQz%{b2Qy)z8e&7RZl)tomRuO1WWQdzAwoTvd5ylHLZEs3s z+!rJ5vLPgpn3$;I(t{EZMl^iLOA)M9aSLtjG+ zuZ@VoJDg?a$qMWk?xz~-Sq{KYtvx}}qOGhd+uxc)0Vm>S71fxF$Kn(HRi6!@g*#Ut zOh@3FPM~fxK2i65)1)h6U5;V%u3RZ!!o{}7#j4H0!54QZ^H;8FndH#6FuIRNc+`qF zNHow)=J1YdGcC(y#T#$H8dP~HQH0LMydA z2sA3tXd217k8a%O>l~)+*hdRHQQh*`Py%E*`MsH&`$br3$)&>;!>i;XK-1vp_=?|s zh+;PdL-=vdH6N+Sq^b>6RClZ`7Sz`$kU4X8x?SUxF;hKLKe6U-Peu=coGtDcy4Ww@ zV=;Jp6Y7#Wr!7O$Y<@B!2xGMW+$mdNZW!l2~6$-ke+!=7wM3PT0#0ZW}BZW zmMh`n=W297(E}mF@hP^useNu_%3Lndhy+XXyvdh!ZO@Rhgc(h=i^r2=zvk_jWo$B@ zuFa*Jd|F<(3eQ!dzvxfH8c&?a|HL}?{`IypqiwRld2`=34UQ~SH&EJI#+;O;83G?` z9|xyi_D=|jlJJSANg8UsGzU%_)MNwQPT6fVW?T)hhy2{w&tK=SOmN88zt-hy-X^i- zD9q?mso%*&>h;)HV18}F+j#i?ZSb7*QgiJ7cEPt+>S^nm0dATk*f5S$(2ES>*c%&%S$_1LMgNbKn>%0pPj)xzzJXDXhFh5x+(R;5ruK9nw<7Lo) zeM*ld^MXhM!`|POV#v#>QB$-%JL%4v?La5H0*ApbZuvO>-7&8l@2Gi1BiP)Qwb+!A z&nyKh*DUrI?T<6rJ6%Bafz^^InC9L-z~4k$UTEg)IkSwwdb9PiJ|E>+?$_W3-qlp)Cvgk! z>HT4%?f7h3v%uo#&qNJiqm>yPEQ}iVxi#SHn4!GFuF+y$T+ZZ-B@H2R@B;CHD-5G2XkRO25t< zFd8?LL{L)YYVG>S=2*w2m@Z3TZxfQ5ctUWh?x)_Ln?O&%{g=~kfG6BJ2@6+6AY{y` zo@nSWZ{e0Zy5N+3`Mb(`?U$;;gM$Hd%Pk27dGO1F1j*rO>wx$Mb@f4WewI#4+l%TM zw^7_GQ^F%}zoSlCZM_QI;TFOKTRnZfrCP^Bldv$$LWuDP$x66|*JT4U>Rg->zz@Q1 zpHGx49f>&vy~o}6YS1$nRh`G1DwhMl6Qd%N+=Nd@TAnB{W|ONpZ1n|G@{EPpqs1kj z)bEVWTk?a(-Y<6y@K_iA9CJ2qTU2tUSWyzx(-1fVdoUb6)S;DLjJ0MYjyxkovvw>w zN%_Ug5gWVbU*Yj}-wW56wpWnAOffUpf zSHkhnFZ2jxGzg~gov*T1#%aD|G=ytDwi*me({Z~=)7#oqx{WtqGG!l3cTxnOpA;D} zG&*TX4Nr^#KzoS3wa1$CQfY$bO+=3V20u;dQu&(cnya3jxlR->l#()-TOUg#)1t&T zHLt&T-csMZbPX2QcGA-soM;LXO&ch_+7+=h#5XNc)u+N4@rWY*Iui;Mb(ZX^&{u}2 zn}Hi=$;JSk5lx9F7uf~wUw_?uAf3D0BOV%E!_X&#ZMDk?($CzS`P0V3Dsf3$h0t=*kU8@l7TypQiy+s#q8Ojff6z&M^+O(y1Po{qM}Z zWXozaLavOwc&FzGdD$9$#5*axziO5Wem8&!c*?{~ynDsQTD)uZB$OpSQ^#q2l2$s$ z#MlEkB!stt&7Ua$HgjLXohuJ|ju_XcR4jhOf7$I{aoFy!&5#(;Wn^3J#OHN;*Stq7 z7lpP-!dM{Rx$EBWnRt6U7t2I-Fi0f5_|1vO9{sxIN}8Q2<2$!XjOiZFOgpU~S`279 zpe(ocZytr~_Rw%IKhF3dQa zy>Vs9NjRABR6w56oR#7A$_<(H6PC)sb#^3E{-&3tUsJbz4BDNfL@;ZhJ49seI%Sm+ z5BpYwrBwC)FM~%6!~MgdTex?L4bG&#Z?aBbv$gP^w>PqJ;4E}iiS)(H^j3aoOxF(K zEdmb+9iL7{q`I&+%v$!s#+Dsv#Z%?~s&@r9X%lPw?9IpbWbUmTI-)fLN#3u^09EuP z$nR?n@E`?QaVSv|N8^{=SqL)>kV9Pu%;%w;QxY>xXeE@QQ+-gmZv11>1ZxmSCv6$u z>`L5Ex)S}(tYbx`$kfAEBEx-rVKxQe^f`YpwzgPzM=(t&ABYbJ#?vEDf?}KY^foJy z-s^2j<>n+=Ppo+sC`RLw%*MUNan1^4R z_@avhSv9F3{NG#9A}Ht~92E-cm-chLTCk62#pel`Sxk>^?|8+086!R`Y3;DnWi@?- zIBPKELahTQUZC&ut)-|iu1xD=QGBo0XEraf**L_r-QiQN8w?X89QjJa+PwUNP!#5} z7%nWCW>7HI-=bI84E7f^7vr0$gG*1QV$cYqPtI1o`eYFg2>yO}t9$h|`It-nV{{CT zh2Fv-_>zmszVOz&O=hL7UMte6X#X8QukFin<8?zE+?gqH60MD25c?x#;S-NLP4ZT5 zXWF-R*p!nL{`oQskd)o~DdcpEM}jT>{@5bi(F&XxB`x`Qo7ckXo`Wr>_y_lpZ>kX< z-5O0sEQzy?S?-U$C^teb%->icyNw_A%O;~6I&bd$r>(V)6HbTpsVCH{`s$o$Lu|KH zQxc7D?0gCC0*68iT^$nJT4#HQr9GoVRy^+8s%-cmS#v(u<2;G{LoElARmnbiZWVmI zetcq}!lkjUJ~fQz>|^4iQ!fH=f2=}x?vU*y(4k4Mcf-*whe`#iI|ID4hugf-TJyJc z2$BAycVvcZ$JVkc)IdGsj1edVfPS-%ikW3%ir+%;k>1g?zQCK zOn&k39wgOSw27(8mo-qzBf5>YO9}Xn=jvjw2*yk8*2+{$1iNn_NI$` z!%{kzKgr`HL@@NY1iANCb9`zqIyv#%I-Rhei8$_zsN%q&F)PH@zA?CP#xk6 z1q(6e=XG}Spv|^bi>i9A26E|EP|1=im4v6FVHVGH_n?OYqw&Hv^O`9sY#?5k8>35g zsX-LsI8~T-NPP~Lh+(DtL)3BN%F|q|W2sTrOQin0wd#AJ&(oYDEJFrwkd^6g^<#1pAS)SP{I}iD~(F zb8`+4+&Sxmx^5BTB`F*zk&u34--F-J)w}gpAzBIRZ=Are(1c3?FMk6$a{WCAYL9iG z2q@l{=XQYmixKEg>jtU}OoV6GN$=9%+lShVJHi8WW>QwpVjU-4JrF#DT=hu`D)r~K zUD(#cfolXuu!4UdAk@x2n<*4Pvl@R$XOfQ$XK_OrR4nHrvkRb#@Vj4y$>dQ|HoeG- z(18qLApS#dINz?S_wdmJvbP7HPg?s>BO%&|K@FMXy_Hy{WzE1fbx$_GY0y?x5c~N*)h7qDP&n7@smT31 zAN;AImh%;z*XPgvdX9#2=o<&r;9qY?f{pfDogFJrIK?Hd#QpCavXO(@@5|E>f`6~^ zmpc@8xNi+s68ya*I#>Zlc6*z`KYy?BE)$9(_}d#!qW|8JX(;%y@PCK)AA0_O!nMG| ziq)%?^P;SEUS&@$0E3SW4h~L2^DS;aIy|2Q%*h^0tzR~aN#9ZWEH2KcjJZ7gyEcqC zwsw0X9aRh`vq&=`avQOZ1Io?&qVCZ~LhsQ}f@gU-tDBr9bP;fHbyxv+)5C}?B~&nT z#c9YQOENG{n|fLO)a|m@K&zUfPjPSFop$O@qAev( zEo}+4$b5yGP7}L#JXk9~`FU8E;)XP$VXv;@qs8g-*}v0r^*j=CBja^VB<@>?lD%j8 zlp#D!X1*IV>Jo?^xo_JZ9(0Aj!>GLAK-P z?n%XFf>}0K_xb***}1RV{`TQ38>6ifxZix$~SoG>{`bTp%n<2lVNXE0|1 z;fVP9U{4~qq6DT|0wqGvwxRZ2kCH4vo#AQb6I{7OEpZbi4D-251wv zlQ}$Ke7$tDJQ~yz&gN@Ur*yjoy___6isF0Rn^mBF?5Evf!lQ&h?Ht(C9pu0@mx8!;KYFDw`D6?a|s%UK3+5Wj+q2z!DSN|5~y3TfDw#^`pc!BG) zJ~oEaCf<<6t+ddv&5D?xl2CuDK077dPYJm5Ebw7(H5vUsNCfv0Vxuk#lb}w8FrS7V zQ_zvNz^?!yQOtllPHfA^7Ic%o$N9+^j*l60Rcd#YvTcXiYsg47c zInmCKU&Tw!x~;)A6Q>|8u0=xlcTiE*mI#5zfhXg|2o52G0azn~8xb~8SVu9SLX0Ym zfBEYzoSnv?Z}ABY{Xgtf9_axKz-sRnk;Z@8$F5#Mm*lod?SCj5umTnTLwC6dq`#ds zp;P65T`W*YpRR1Gx*pMzF8*JzT^W_6&wrDX=~YKV#=qcNROya`2td0~(8u%v-$CaZ zB!XJj-t_2{<;+bVr(zU3dVuImFvtdkB8aFe)xmh8WBL=r!Wu4)ScfZi1slz2lS|HW zFJe}F_!sod%2Mq9bDzTeyaJIG29^ucp~c9rYr@#_gt&EQ5sMSNL;1V^^o2k#@zVoxxc(zy_<=&)Cmkm&{L{e0j~ zqGebzw{|0Lb*x-ZQdm9CHV@zz1$i*HV&TZ zF46yn4=9biOQTJk@)_!nNxdal*JKD{u@$|&p4*?~_!v`!)Bjk0*Iw_sKqfusTL8Wc zqBg0na1F`OdL&iUm_C;?R~sJm8z^@2Tmx`!>MlIWDe#}FpPs`bl$t6oI>5tXFe`LW zW1n&oYfscEpyVsnG%Kx}#Za6QTx<-@)jpKIc>-ptd%5n7iR6480#pv1*^qXIKXGMt z7A>=dJ)vPJEgj5D0)`g6H)S3%DCW=ahi9+ZZ%K_U?~c6tzj0*m10V087TZeeN7jgn zEMBp!8ptKD(WZ(pNzr%ffYscFiNic~%%Qj8$!5MXd$>pW0>;KWl`HzRT^y1C|r$;jCehq;>h$cEMnGtKvK*^>Ih9y;yq71%)5l znjf++&K7U>Hm-At`?ix~FUD#G(Z>&!{sm;&>Q(^9Ud^nr+}b#qcMVEPf=@W@r^E)% zJ=8H<{cdBmTdzLim@!qffA)aSSQ>w{DP)<^e@||!(|`KRI(Ag1rtNbkj(XNL)^!hS zYlDA?{DJhk-M;Ygvjt_%K7(@Z>r|k{5+1@%KG46Af`xuN36E;@?d3)M3?B46Cg5RI zcgAHqmhxZ^HD|c;BE;J3ElVSl>Pp=?x~{a~sl>7MP9#(`a+95;4 z=P!w8eKc=|9yFm-=ESMzlbT2zV=Ycqo)CC?QRMx!1$1-hh7b5?gCf%qAsNXo z$0wB)T?+$w#4@~6v1pHqZp$f&KT03?GAg{i8fE?0ELiTVX5i>}l%&_qFN<2X6sAk= zeqdSeoNf_6w%2uN^To0CJtF%jJxG=2O~^S73>y}?~hJ-dwr9x8wKsUZEjzOejfH(k%Q7h+b9swq4rwW{)}9@Szo z>Ltt%Lh6hZB^+}87CA{HH8&@&wafWD#xCG%E% z6m=xpty$8+gN;YW%y)7gT;n;xwr;tpANmD~9KuzPU-hq9V$UoXVs#?bY<`cXZPsX1 z=QUmcmbQkVyk%B)Vj6IgMNvMDUjQZIRzsRhTF-m$EO5)TKlT-7x05uft>bqud z_35jPpIG=EMMa_S=dgvHzgt;CjrAL|F`B}R5l>F}%Br#bw>7Lywpt}rGIz2uX>*xg zF%h}E`q>4+z@wpj(v^Ag)4G#q04!n}+(!h>pL^kr>X5z&1U%5N7v`L7>VSnF!R&Ib3G z_)L2SZAf4G4g?Fp4#VpUo(qt()`B5`HqnCvDw1QFWcKkaMtS~f)N9?xZToi1<5gH# zs?jR%My#-?)|lcc$NO^#RybjeNrrmvkmboiYu#0P(wi|tcSfK)sb2XHuXz&58`_2o zU|@2erDIKe?1X(b37H?~>~qeSIZ_Q5;}wKQK)6_4C zx4nfj5D&`03V0j-MBmjyMDnyGH1-W3lTdbk}_3L1Hlo zoG-ikY_-EHbj{c%;Amx0`ak%g%$?!M_{i5 zZW84tZ5<@R0tu3!iuhz#8h6G1%f*+ZU~{9%D3Y^|(ByuZ);`_@PJtcPeinlA@o$~& zK0n=>y>-<)pLNVn~jQq3pt06T+&M0 z+aUN$SN-z@>m>nAFHe^*=8`m*bB|@fxNl@nFg0BmCzD)}xjEw(>xZqk%VojT;crFm z1ZsB&`{w{G@OI*UL(@aM&W7S{gW!TGR!zv>;T3Vq&ZCZdxctGCQTPDAz5W~fnwubB z7ZI1)!LOPnJrV4@HLO1z+kM<9QJX2UTE{uCoCXhF9P#;ieOXL_QwRQ38-q8GFfW&7 z1YQ47h19c*OQ%9Ju9GsfZ1uYlU|U{6)w!%}-8Yg3VCi9Ht<>dpJv^8tN#1E8r+E`Y zQ{RUmL8<4TMm(dh!RT()Ge5*HElO!2KIEq90&54@N*Ub*5{wf)7H=v8on7&zB*P`q z(~yF*b^AyRjTp@E0Et;6$YQ?jm=RbR{umCxyI%Bh$=-Mr%JqYm5UXzqK4w730Qxl5 z5V2D)rEEn;WE38(L$V}9yq6&Cd z-~IqQoL?!}R--GPFU!IT_E*oGGmi(tmX2|@_5F!r-lCTcjANgUA_YU;cGkmQ#NJfg zSS0%}riXH4!?Yp9A`)h%nlHaziCD?3p7;}St({14o~bsW=H*eDjR|ow#8H_8F3;#c z9~1uWxe#GOz|z_>(Q@1>>7R(?;pRnh{O-kD4GvqTV$%H~N`~vO)iKkP^lQb~P@k8vw#;Xd>&LUbMK^1Ml=1b^>9a}pryl)C zO?49|!ua+4Nvjp~+QnNFbZ=ZFkn1vmXn^2>F*yTH4R0wx+q9V&7BB-j2=Ye4b$Dv+s|vacryTB^yM*fddK z+J>9^R+=P7H21a7+Ep9kOOu0a zm22Ak%`$EtsU4#F9~dQnSksAC&x(H}j?JT=sg`q&*jb*oEI63}&ioh{YdtT-0VR;F z=TP#;o^hg1^Rj(uP+v(cK3w-=r#DxJK%L6UHm#p#m5F~4T*=jI(+|PL<|>&w&=!Vg z4?j9RIqZ#E>P{AGZaPIZqqq&$8X7y;bR1+`o2Mp84TnW*y$Iz+(JzUb@d3qh`fY(0 zWp&w{KV?9^2oB8s6l;y29sFZ2jPfbwj$>PiXPa{4{uoe+HQGMAq#FMazg8UjddDnZ zUBB&H%y2?xx%03U51qN!7mV+LUmVWwhUQjJV5& zLrlpM2bz7GJj-R1|ctH$dn|GuPUln`%vEL`!dZYqDPdEP!F~wnebL zW(@w=l{VIj473oJUOH4PJ9S3NFw+_IvTEea4|T7Yy@)q1x7Q3cV6fG)Y}zDsGyRC0 z{Y;CtW_J*rs&>tx!$p6s{h64a70{$&kD8upt`cc8w$2DJex?H^#=KBpd%+_ys6fnw z(@QqMAsTUa*nD$Lo@DanwDn*Gt1wXA6!0dT8k*Lh0 zeLT_gjo6#E_4N&leC-B$%JA%2*c%tn$CCo%*1z>Vbc$K*n%9XI&w47p7TwAt4M3`T&vp$ zYeAVtKriY3Lw*MHMi#lE>kcqbn8Ek$7b|eBK;!k%`f#7k80JKAXuirvTN|3oos}8u z%y`qc&8*$lH=evXW}UAt_x+=k$HLc%q{eyKYLV+a{=>PGD7c#Hu6p9AilVJqY5XWp z?eiAV;n}caR;y%slQQwrA`wHkclkd}xrkBOoM$^q)KWoC!^{V~9h!oNqlB)m=-S82 zXvW_x1vmDIpq#^ho?CC<)O?4|yr~~g>a?ljq^_c~Mu2!l0NX&nc?jP@Iqv&OWVm8P zoaOB1VWC@c-4O{ZByLvu_9KG&SVN_)TeGRAs?0$Kl>F{UxO>#Zt>3cwYm$(hh$BQ4 zE!Y@HyzP8V`3`g@qQO1yzehYCj!mDPaOA^s3z#=N3y)Fc?$5#gxduBv671JZmsb!R*T>x`%}2N{yiRW7$IYrUrnl#&B)Z1CFD zv7YuPdizjx9-g03H3lE{tRblyrl(M2f-_B@6Z^pfIW|QSM-&w6#(8&{F)TaAOX4in zm!~|a_GYvB8o_toLAnQ#SAT0bXN&7WUVJLXChD84=EsIVCmX~mr8qs&r@TJ~sn4p(UND$L0P58i7h#vFSJ_s_I$iIc<{ZY? z)PQoFQ-y~%d6mH?g2e^943W|zCxX%x+Ma4IKqFS(|*g3lLI+SFSf7*vYYShgn9Rohs(&+pkOZm2xO@~#Z;O5QoT01UN%Ix)0NRfjjy6*N z00Kb~=Lv=Je<95#!KscWcfZ;`@Te(Llunp_6X55vj6c=3q#yC|ot@-jVEV2^8X{bt zjzsBj6IKuAm7-Cwc@$}~Nw!_%zGJJZUu;Y;pBR5rmAPmDOhuw=<)M1{ww!UeEJG_% zpD{AES?dQ>AJF_PMQ{2Q+1R}(e5ahyZ+)w~aXDiJtIK>_#?Hf9xrTMl)j+cOs2$Hz z9Ks;=$aYI;MTjR2-47rPloh*rKvRP_HVWvWSb(N>afCAAEg;e1 zjf(kqm+tl}AD5Ao^QE8K-G1hLCtc=(TtR5x?sE36%c&X14798bC=B4jVMFw=s&^{p z%~(N)o{$qmxnlHNzlD3OIOg-FX$!Te7g!4aNYCUYj%fx_ddA+MuI%PRGUE0|K*^xS zRF4CId&)P^>m`!o!?H-WCuJacz^KSh{iKe9SKVY>8h^KQHq-K>_TC7(AT62JDT&QO8K$rq^0pgwT3cF2(3G& zQtr5gEG5hH_y%US7+f_q8~F*4sfyL&EDP=Gho*1u()B;6sR9aP*XcBTs#OUs*J!gZ znHp8oCBV~`r2D7(bo~=E!zC`#uk8{N5$xo@A9gxzK$k@wtDZG98fJiGwUpsu>l5e^^Wi}B ztCO~2(YPjm$z_nVxzDCr%2YnDY3K^?0dJ_1kU_v=G=C?Z|L01z)H5-B)$~S{Vbv_Nn5;M{w##yRB89qO z>XYCMH9On$3eHmo4S;J`u!jgq<5^Zep3}60Qy_0(fEpY$Dzt*GtMvkaf4w_7`FVo- zU6aOg`Xa|O{t7?SPE=ou!E-$DL9s|rni~Xn_!?ij>0_XAIlM=A^y46Rk|;gW>j5qW z9QROTnUR`DEl+T*^)g@j1|+?G>?2hC_?&wE>j3nFRs*HYdusfrQ$6Pr+=S2BhkrRr z;xo1ZD?d~?-Zc4q z!<&u4RD|rEquu#-2p!PvlhAO+59r5p6$O4@3~CK^wudnuXlU(qcWUM!^wGKBu>&$^ z1Oy0TAmZ&U7~gE~$(NR@7ZpGlY7Vqy0qZOAy5)u@ry%& zIoj&}nfEYGW(7}3dq5@>2(GMf?XERieifmcHG5(?G4tcB2idwbGTPzEgl+RZ{c@mU z$h46C&gFU@{Aq6!5p?^@YIl|)Dz7I?)Q94KM?Qwa?zO08ZB4VQ=qA#c$>?d)5$lB$Hi!O1Z2yJA8s zGR5*gJ|q4^rcjOm6mFggB^mz*ykCg_i4@dAZ~o?y{)9mW-ay09^hmwlBmaYc!8tT| z4U`4*`LE8rf5PCqq8$gfDY1{b_Pc;K%EaWviOxQCB_&}?UvOwG*=P`v@Gn0y(CYHC=E?xQ*uf0 z!bhPcL?OSuUew|+DA4%ve}z{;v3dIjm_zj6M&_W0)hIOi@WB^RwlNp|9M>A0xaTkR z?s+IjJW!`yF1PdU)s7(LbVmE>sPvPqc7o)!NOU^<*EjNg@t$wbx^#5vn7dtbU=T(e z0`As4jenF3cq3Fln=3Be=Uz55+4U#iKyi8jP*IRwTArfgXv-&h1NxE6c*Hj4$NjP6 zS}12Z;yUA!5r{eC!qKxkJZwKUSF>f|cVj!%%Y+?qeY7wVKO;)1Lta4!iqxId}p#+^BL$@51G#&BpF$?hCL_FE8ng?Tnt=~0cbBoYy7tUd;}0^+1M z-R&G3q(~_-@~hHPr&UyTEsIP`%pYJe;GeM1R8=#S1A-xy%~D{a$zFP;bO&$fl%N4LB#WTdI>tIFaGIJba~> zT(7=fs=9F{J=gjjg$(HMl!W4gp4W3usB29pUidyUCxHm?1Q9SB4Y}`cSA5BMA7+Z& z$}l$uW6Ib3O+RK_>!;6GNyM@7)+2#8Z7^v{KTPv&nzF2hpuZzLoC>tuNX9`8e6`gGCr#jK`s}<=7 z{DbQY@Cm>+4`zYR<;6ZeuF{Tr<}`Nn0%U??JICQ+yo5!}iI#~B1;)hZOn(Be`yXCr zOr-`?_SZasG)z`pSf zUayHJXR&sB-dL#1TjO(={0?M`aXH^&rGN_A$Fr48Rduf-`Rn$^hZnn}3}wpm?F!dl z+{&bjvF(5XpU7?N)=Yr24X2{?1?ZlO4DCZfgesjmF_XR~2v7x6L@w>QvfnhV^zs+c zcHg>K=}6{<@1foKy?M<#Zc$v3hqzMwy%IliCVI>*d%St0k~#d3STc)mztTkP;!3{_ zHce<=rzwB%h@J4}X0LvWnz<}4v%U++(v^kcZq7)X>@S1ne}E{Bw4boF`9}8C3;TYD z2=j|>sG6;;NAEZ^JraMs$Y8F%tfao?)f_#6PzYyXkGUE9f8aOQm-+@o!e6xy9Rnxj}UHKWu7+yGuG<<2=hdQhIwYJ zadd~Lo)8RRJ|WR*jVwB+T`K-?he<2IBwZQDXXi24#xKqJI8z=tkCDLx5?c|UBP(x~ z7YiH@4*+3+r3jr2fSxF|O3T~OkQ zcDQx36G_ar9iZkUrux-PysZsx6*1VLzRWFh%MPG&&UPxG;y88et`L>GgO08YJnJf5 z;OJRIv$Y9jwQ%qmT&D$`vY^l~B3i)v^j15W0~ekCVqET#h$*=oibV;V><_sr zg8};?=Dljg38!_xwv*WJ$?xy+7aF`$H?E}8&xhMcDD!p#(V3 zmzF+N7H?@z@&=1*Q*mymMti=5kI(cE%pFrDZsHQsKUP|@R?M6Q7%jFgpy|@$`I1)p zF*C*tY%Tk1n36sUICbNBqoHYnSQld>41sKItNOi!m~6~wZ(6Z+KbL{m4uStsyi15n zJ=V<+sf;X^8}3DuCyh*&?AwALeh$=BJgkRhBj!5GJ#-&LXbMtZ7L;Jy!J`pl36@T zqRYvkvlJQ`mAwv~p!oPM&;W|VG{GOQ-`rcaoWWxA)C?|=ig!HTj%1_mEOBkfp)NasTbLXuJr}XVd^3*4qf=6UQ}M|F_7Aw zP%)rGMq@S%Q%xhsrYlS5*9GDCSeXEXY18qz2#z-Kyc&2k_!0_5m%CaN#zBVMRfkrr zF9%q!bz6oFCutP3@6Ils(Qk}rLIK<~qhT4Z1Qi)W?)^+jb#?14vS#l$uA~Q=Gg(c7 zvAFHE;b!-y*yRrnl^^tnBLFSmE`-H&)DAaevS>tI!mpp^c%9uQ;w)uSqI*JO;|E@a z+N!DgZ=(47W8=$A!E=|Xbe2$0Wv`9M0aM8wr1fXikd8yMxn)%TzTjF4m8v z(5=<|<*Xh{?A}DyjsB}r%;n>C@?AE7L>5Bmp0xR5KtEl1nX{Oj65dKtFtw{V4KzqK zV&_0H4?seQT@04Pq}<)j>fO8h=?%rBe~fLrljxBHT_i4%W)Axzs>U3tJD?c$4m6Ui z-^z3SDb`iZ6IZo{!l+8rDi^2cv@0fL|VNpEI6!)i}?dzg`L-ci$S{SRiH# z!O)fZeTb!9#|f5~;e5#%ECP#0W*J@J#N zxfC5@imQAqB?*MpchU7ee6Mt0`7fbX*T;HnG=dPDa#jH6XOSFv6TVA(-t$+?UE({h z+Sh~%WJ{NK+X%V=W0I8>dxT_fmiZ3&O+z`Mvj67I-IHV2bJzv?&1At*{b#LOny>=3 zrzu5m2qizGg^k(Odn`5C6Ry>@fNdYi)qKQwrp3(O;wfI^8v=?uU7MLZrRL<8iYs-L z9k2TLK8=LY9{v&Es_BpadNho&t)-IF>nXq$KNILeAX&l9EU76M8-@CL<{G$_LIVxD zU^p7a*>QpTTA;S@2oK2k+E~MOPSDjhU6CD?KeyHXB9;!-@i)uUMrY^vH!S?-vr2%I z*5Rwih=FKTKhOG;ldV}BG-UCY&-iWNJ>cs}O~4Yc`t9!csINkpq8W|bYmA1EulQ$< z)`nT*3g}vqvu?eQ0J#@D61xIWqTjTt-yL< zLW_PnMH_^up4rdGn3@9_tzc}%Jvl5&#&dtP*NrBqeE@A20d5Nm*7qSF)q1JTnLE09 z_}H<(=l0^Ueqx8hSKEQ!a8<7936y5iYyTv0f9r+w2FTaS?p=(sWqtH`Zhk=E?J)_3 z^gr`|*}wZm?*p7!bVI3NDl0aSbi5_ouHjt$`S>R@PW^<+H<6+w` z2Cy7&aUOV#R-j6I`6>K$bLp5klVJi_K%$vl2r1=*Lf2_)eTM%EE-iY?WM3eN?wkC; zx~T#MuYq~%vqJDZ5%THONkgMa98V$gT!%Nsfrl{*SFC}wht*ohJ3}n;_cciV?B}2M zVbm;}x<(^T#qd3(LV z7}+*T7^_jB_O!8&q3Vd<&u@mQXmzUVPUdo>4-{XZB0dGo2q-)`4qqE-Lw*?dsH!_9 zfx#|sn;8ojTN|?Dzi9c@XBge?;Rfr}S?bX)%sw<%XEI8JSQ89MF~1h$fySvON{lt5 zMdi(n#1xSj6}0dqGf1q>_23^4p?DCIXX(xVKXkoiRGm$iwVM#!6FfKscMI+o+}+*X z9fG^NySuwXaCZyt?tX5b=j|R}kM6@iU~ed@?y6dAU29G_1R8NBp5MQx$6pEE8;gBL zN&l24JlrrWcV0Vll|QG?)E?KElzO{k&5G?f)cJF!*3Sc$XcnoV^pAQmYETFR-{BST@cK4h@&)wst+}znz@!5 zZpiJyo6&a}KjvhJEj>e?{MO{mAp>sF9Mc3e64ifOFkMqGIf|B(`jZSC+{sND#(Jd| zI)U`g0-wH<0$;^IYcIH8x;R9e4r}JuhkH~5MD3lq1v%SOP3;yXI%JX9%Zu;^#(2>e ze=InNjBv;(>A`$})@YzaTe0#jmoT5XY_<@^yAS;UmAs*fgBQ}r?)5#CESh})6B^(9&ORn6}}=qT)6 z9?N7jv9YxKw^MdiURtuei(e)?0cvMevf-OW5_yPvp~KSatH~A>=i!iumh{$7Gt{{H zu<K#2E9Jc^+vLh)cq<6B$?R{iub5tG%94B|!Ys=4VB(By) zZ{uu~1w9b{tm8ldmRConJ4I&b%W;kA$&36w9S)~VKmiK&SNF{2e@+(c)&VxAyp1(m z!+*~6|9aTr$k5I@5?go0<12xg?@M(nF`lLFV0v|XmaymT0$UzObiX{OFq$@NeE-G$ z>J;dn__+N%T2f{<=|1@pDDxtd(nJ3GVC>IQA!q}EIXidS{}SD~wpV3`Q(6)8+Zp9D zjLM#FU#IP27YfG*f%>6$dOH1)O*k|f`K$&B52#EHz->&6XDaw?}V zgp&y~l*$j-J#tHP?d-SqnlmoWom?&Y`EBmCX8{qQnp)##+@-c0Yy)P~OEE7&$xyRp z`FedBkpSfd?{6bFM$rM#@VMp8Xojv?U1*&OJeitZp2Az1JS9<4HkA8a47-6a@844l z7hHfL4g^fyeqWJc*LcdL>I?bVdc)Zgh7qoB8V!fDO^I`MLGcwkWI)DX{V3Xqr&{f5 z+VDHgI>j(mW~ZFXu|t{FN3Z&lzAG}8PZ?>kWaQoC?qu`)R*3^}*ugGAw_k56I*!r#WN@sNMJ{{b;hSK5i!28Le;-Z06~L^+{pUVqk6R!=RoA zmp~L{YCYFl*{Wko7MnGdS|Zrb?^$lPWlt;|I*fJ4nWK_F%N%)&`n6wt5-n3+A1%_6 z5x}!cpg{zz+63;3ePFXir+^!m3|lg z)SmQj_3KUXeVVXmM92E7s0-`H;%8UQ;+1sn$wxLX>$^;J;r|!gCgZ5tqY%NG4a)eT zxb8=QUIJ+l!x9SHb zc3zNvZH+)<x`F6_x1_@V!R61Vi7?Ifi+_EBo&e?zBwUj zM|!%Nyy#$!{Caj6O%B5fm&X1g8I;d5+W1H)WXWEf8M>q%|jW zXHKjEU0~}*5~&bLe8bO4{Nv);H6A~JSDsRYfc*9r0cjyNq37inz_inlVkIBD-(m`@ zgdtQp(z69Tol8pVV0H`OB1Snu_1iU}umLX8Z1zh}dZdX<7Ck zw*pd0TXgp}pCjKy#Wa`j*z3`#TL-N+jK)E&!9nn>xxQAV>yD!a6uz2d&4YJ(IPw7f zp%?c3aiVv%-2^zdWJqes)gvPuWSWb+%LxV?wVBvNm6%>J4Lg6lz70fQ_CDJl%KPYb-cbAAI_y_G=c$p=Y%pAF zb2*eOo)sww*s&^T#P0%77Yb~ZE}pAQuWIJI9ZKLrmgnsYxI;*8AS626)0};%+@REg zJ;uVg9Eb)iYEUVE7NvZpTHG%=T&ye!PG;(f%{q>J+aa%?!{)y!9IK_zSr^*8oE_N9 z)oQfB0kbQiR>cCj%FFZ}SKE3`jr`58rInh>m6GWZ!xujwuAXO#lHC4{HY?=%Uk>_) z*^hdnpUnMI03}T_5ar*luSYQ~(En|71Oc9V4n(_gTCwy~4f`XUf;GQHxDqNWe53EE z&OqY&667tmk#}!4!1=hDeiOZErM0vtUo=D1l7Al)TUZ?3E&2#_)Zc84=JzFps_-(i zy+tv$ykasO_zun^Qj}Ims&Ks>`AgJ!h1X`+b6#NaMCS(T{?VX`+Ky7|MEg8y^>;0+;a!Lc)AhAOEdA7aRs-+Y|GO5u1LK~)#$Qw zioJB{xu?MD$;{=}ubcAz*S2t^a-&+kLoD2<4QrqMQqt3Qx^RwKjy}QQx^ZY=2Z!pP za?`Q=XKq#?galIm%ZJvq$6!cCWu~&W33V8hSy`=;-Ua8MxwcUZf*>DKU@w9$7>rUn ze8oo7_17b0i||LPQ_b?b?Q+ZJR4JlHGP7beRBA32)$(daz*}RUPpeW>RklRs`<}zf zKa?>GJsEA-IwA1xQ=_aVN~4Y5pkJUJfSS`DU4ewxlybfeBItJXk<~x`j_aq?)k}y8 z+9PLY(X+5cI~hBN9Vy@9qA~Jf(D&fm!4~6Z5yN$_^1hf9Nlt=;JlY?L4GU8~+M_f7DNlwwToi1i+8Q>jXISb{ z2R81GQo?A3L$KP_VeO38emjA&-BSVigAJMnwP;D+YIk|$IdI%IX2~j}t}OJT6kj^R z)vmBBr};lO}9SsxFz0MxqOeLvCc4~b6+$CgkyXgYqr%tT3ym06-S zlsJ^yL7O>adMYDWXv(1ecNtU>(v+mSW-LaX?i$h*wA|E9NCACb073XOkcl!Pe_oL= zuqq1K=K8<%sStqh^K=rL^vnN}JaxH(Qqr>%b0kGUFjQ(0^ptgGpW1+)yKHe+gopaf+zy2>Z@jlh( z#>k|VVEs_UK!v+n-Q%ojIR#9oS~-{3W=dliFH4PCxc>?J0L%xJa%(|Qkyq0`9Nkta z&E~z1<-RV|5f(TVvSzB2`?{#U0{g}XSnJ6j%JNxE{4I;m3qo~VWtZuWjR zY*1DF)xQ{C9#*E`cZ_SwH?VU~ZaA2CRNpDV#X|}QJ2HqF*fi9In1$Q-82P&smwVQ( zMr>1PiwGTXmwc87{H0?Jed015LShO!&3hUAX$NwAH?l zv*)D%v0x-OeJ8$U3lEg^iOTIc{we7nzmu^1Zar2!EU<*XKw+)r|HvpUE#JUL##3$g z%qH66->)5_hrrG3X;%Nl+V1%u{Y}7=wgO@DI-SLT_U6%SkhJ$<0S$_teQFXSs06ej zImEo-20za^nncO>0JN0P?Pbk++e|7n;<jHJxo2~o;>Wi{Q0YgTZj#<>RIIW6jxsi(eCLdRluYqLD+tuASjlo*V z>q@HmgHxKIt5obIjvU)N8I2@za#KUSXK9lv3smb;fb3KOc?+TU8C6{f5M!9>3^Tk= z_!^%jNgXVHZTGd^Q6QKAi|rRj6V4z%s|y65YEw&?Dv5L~#;f_JBqkrazwT5Z9Z44v z0lrWKM6FZY6;C0wDyd<|A z%XrJ9y-!-v4D5CO*u#JK_N=N-+K7xg9!Aw3xvdn4G0VE!?FprZk`qgO0c_k3wdmSf zSJVUv7|5mIEGLU`obF~vR#?jpdjZ9hF~IH#DGZs8p!xAw|FQ9|)>m@D{qhHSc6dRY z-Q$eo{&Ked=%{6oukZX$9Z|aeMqG& zpZ1-be+Yl{fa_UR#LABAFmo_eh~}qN`U^Ma9U{OPR@!q43Cw)K)hr>~&DYgWHv$|b z49J){m{rN2+%1Wy2bs4)Fd?LYzBl$>CW&*ADMn968jRKK((8X_MrzGd-c=7S-R~&n zGO14GJnvRuE$sdZrL%Bn6#gTJeh9&GZb>r)U{8ii>HB{9pS;2q&_}@BZ!yw)MR;^g zrC*Y>C%uL}!SOfFoBsFP?;9d8`_Z&!>Bcy;fKc3)_L%w!Q5Ye1+hcZ5NRhCP$IX88DSV; z2qIoPlGbMhT&Hld=5$wvgS`&&$&Lq%Zdy7cC)pl5G8@y^(_2G8Qb8wQt+YhH&_u?{ zjpTkKAhj_gRtZ)s&YA;dL}JLIN5DxOVZEzh6$hkbMF!4xEw6J>vuz4U$sw7c23A%` zGb>V)N)wj$!@=wb1(Gt%MePpK@TJUE38AYiH%?K_) zk;2g!2B@0`3UucRGEo?H+>NTea$5-!b1vik0XizW5OHOJ%sMRr;+k3&oCW_1T zNl7>3q|EvN5ls>yB)VboEu!WE;xcFdC+ix2xcX8yRb7bNkR1ROTL54{qUUUqW4F`Q zBK}>r8ouF|{bH97(6<;8k(to#-%QjqF2q0uy+T6U;+WoBB$LJUcz6n-D9w9D27QW{ zN4B?`JV^a7BuL`RAY(&E`@bLovW6}fwMG1p_$x9FO*T+qY?NK4=%0=_$cPwM#_2a2 z1Q*~2lz?)pbPG53Z0`q^W_7!Grc1C3_y1ss-fn0J3I{v#q{uX9zVW7`f0s_UuWP#> z=$Ze|(kT*iuH!X*2FMr{)S@cgxvp~Rh}@!?>~OLrAi%RFFNn4Wly{f%PcEE>_LbID z)PR}df`hZ$cWV>M*CuVu{|M9qmV*wnhMOZ6t(~W!28D0NCL#+%&rWi4*s%Hg2Q72G zTgqD_amy;@Bufm&UXX3xxngf`?@JJ0vOPa&!`GL8XhUhrZddM(RAr0Maob-GUsdM( z<9AkB1DZ&n1Gg6LWD->p?0P!5)c@)KAq!u{{{P5=HF}r)=fT?09dY!G)1HdK1Iak* zgck7*4KIY#cPcr}RO{G^S?b3H)D~HBo6ds@uDC>%U;QN5=d=-j%qGhF;*-^0HsPht z+RV`wQouEE07*>RSj28%$tcwi${9_rIG~!n%3JAx&E(S$sYL-l&gLC^CPmVyTlArMAK>Pv=O}+~!J*Y-h zt>R!{<_I2>!6H@bdd?B@r^negsAK(LgHmmn67F@e-?yHoGbu9uP|k7uq!P&)+bH;R zn5<`XA4Pa$f)Y}SI^-u@p>tF|f%9wmh(mWT1+3DH%kGKNLOD(#SXaW}KNg(Je;m() zBl15fvWz<02LYD?R+IV>f6|CFX+gMQAuH^(ODmOn^5SM2zc~x%)$XopAmYen!8yWm zwlS$M9Mp~h+T`&)buKU3@i_yv;ee-6f7&(xOX8n2R#oP=q%zfsBaZ@w2Rc2M=<+z9 zoY01~f8sS>7c#ay=2u^(C9Yi1!|X0rr2&ymdi-`-Cqg2FoBE9V-MvHI=1NIs=%E)* z8P%6fd#jDkkm0`82o2%He5qa|$N0^c-Uh4>aG~?*yb1OiSPIPN|9dI!Mi*|p)$!jeDr=jO10ZJ;!tpXMNcud83Ls(aoSH%)yinZYiFNzAqm~J0}OZ zfLzN%Pr|vJ$y{Ko$t zgeuju7E^-KRp8qsJ#uv5j)kfXpVo1#X5?Z!rB#OYYLuUcabHf_)=(p47xx=;4`5w& zmtf27wm($LHVaaBWO$;wY9Q%wcSKEJs{bavQQBpv0$nY28WG9l74aWLY(i^B~Xj;WkMO&EDPUXutO2yB7yJmJc zvJX34PoQq?>_lzYi+!zoR`Ss29GG+|7A(0Cu!qk+&cMN+D6=jyTT?$jhuJ@iVW=p@ zUhaW6xbSzhI-s8?HX68kSZ2^#d3UL$H_`tD^gnhz3jQxTF+Q7IP`b&iCuGq-J9yTc zYcgSFGN64J33UF81WPO_y4bT;=!P?r{H?i~=i%Vn;ONtF4(fFgXxdLQE^J$OyHP6k zzJA*z(p@&HO^A3fWp6mCb2nL2w&1k~T0%|cV|#byXa5zevBM(UyeXz#8jOcn4fq_P zTvvR_H3r<%>TfR;)=1cwsx4wv;Mauh?T@UT&#Scz8ZaOtr!lJYWwMl&CL)CQ!^cxC zbSMK+I$L)ZoF|&NPEF3H4^wh_MOmFpX}MY6@joaA+rYkjyBRW*rsDO#7z3W$qeOgV zEGM3p%VlWo%R#KsymTvdHaP}Ss8M`j6h&gqDd`%Kxiip$A~delyu4*dth7d!HLc{d zV?blnr+OjD=`eEK1{-uwul3Q0s{i8I@wlN^pDbxE_}1Gz4$ZMLsom>jZd@(BXnF9n zj8g(-B+hvVuC)VY#O9q-A3c>||d;HYPl6Q^@> zFh`bPeG8&M!L0!*0wd*%^3Em7gG**!@pRUrPA4x~)>c0L*?lvKmdGwaQ+b3lq=pThtpe}S zeJU`8P`v@s{56Ug@_=u zzAA~(;Zon(GM^1xKF(kUvHuE>h-=w8f=iHlDG6z=)dwBeGC!tF?eu&~ZKSUC zZxV6VVD;yv4c}2aFD>h8TwC7H5<*3)EXv(gjDBs#D7+Fw2#eXaqqn*U`sY4hn~ zB28H?gF(ctm!iyfZXu`gU%5)>FPfy2Mh9DNSaKu6V@E>g6B-gESLuZ^j(~`*6{V6! zNv>2uTd$f*+T6=y*zihmkkqOW+N_Iwee<=Mu|SuTI50l`Xl6T5BRR{0xo=_G@pza! zQvCACv>}8`w_rWGEB6=x8j2+StABbN`ohjDSIbC*SjcwG>peekiAWb9H7hdiOPDFQ zMt9P4qO$Nw`kI|qz5Tm)@o(G|aK(ff&>cG^zFfTuO`MQ52(|u7b%-WlvtC^GQY@+E z(`hgdGO_|Wi^EosI|JSE_!Ga4jS*InsFN51gqM6}VKrhKxg*L|iy>j@Hppp>{yQoX zCkbtJ$pfMH@wOxT=XJ#<5S4j@M}G3HPDu8VU`sK`hQ_K(%)N|f3o)% zpZOVakqu(k6XhVyLqo*ROA5D+s5q1XF^VH8zk~Y;>KPoX9IX3ca^?B^V~M`34GS*N z5?_1mgL!n&3A)-b;QpYXaWE_lF~bCyW}pVh7?eV`!dey*Ewvw2$yX_PRVl%k5eV6X zYfb~6*%^zUzYbF%OYcW2cTD1UgwK$k13EAR`i9Jmvm|A+j7&RO_ogGc%OzJnY_Map;9=RUj7rz3;V7L>Ds1D(@oS@O~iL2L1qz ziQ;Lh^!E0~OB^O^3i<8)*8{I2hq|@pkL$_rv!`kTXo8HgVf;VY(Q4h+8^V@&5Y1NK z`0(6sO=Z{SVv|Sh55}>I6F0<_fRE0ng;debl6y^!QV*X&QMS9QLecidP7kUUR19JQkszc{~Xu3M%3)?^PvF_tFY0n^95;>1s11?C;sVbF3Lz+nBQj+D8sVkX>;CUcfmJG{>?WB zTIgFSPM&B@Yv0>ga8?0jX)HE70;Iq28`C$T@e`3R0yXZdn;?xbu&{FE>*&SKc#oak zkiG<0j3#HFT5d5%)E>7iX*Fez7|8i#Xd+VA0{pta!0c)ePX53pF|{hN2!l|=In@~d zUP?1MgmEXKcd0GLI{CcR5XCT0?DjOaKw@P2Hc=Wa62LFVVoGEr@tI<3puop_k77@? zYY$VPp&eh%jgHN$M;oXD`sI=_U2_j64yA8o==UE?kqBsN^gteVoN@+pJ2SQ4^{#l4 zf@ExpsT#)cChv^3#3Fo;J7lb>8HJ4B@WOWYP8S`VC3ImVrv*2DhPfo9{_$L3rR4%` zD?&^34sUVLU|vSs8Pb16iM=BD^jl4=IdzgFKDnsIZEE8xIw~?Bg?7}*Y6gpV3_p4{ znC#VJW;fCPhFcKK^$)U(|L8Y(5~qURqGjW2^eC&L!@(?~6hGVUN7+B?!%Pd1Sy)RY zG%Tp*F|fy2pK6<^(xZZP`ffqEImV9P;qo%#NGu=B2-fVsCfyxrH(7P##eQ;6dqN-E zc%4&GrgwwciJH>`NXEeA;y0>av@G`7I`+1O_^86X0a&Y7id(30@yeiJ$-5U1Uc#bC zznWAvWn{rjhimq$wo`5HCr{@uUIuP4r_tB4g+F1pTE7fSl7oaf_-vb^>F)X=m4`+2 zYiNrY<6?h8dS8`d;})g#7C+Ue-7jVD?rkOd@n#d;2Unb>X10Zz%F*v(`k-aQV6lB{ zcT=ZnJFo1aBRO8B^*t0qIMHX|uU2>(KKI>SDl@RI@4&ha4IH3d!uojgba8!+V&Y=o zqG23R>XsN#S)#Oi4v~w|{mw7GCxbsOo}gq}`v(SBZq+7IosrC_&f$>muhM{v*Qw+S z*JhyYAywede8k!z<^G?hWz>%1toqVr%6@j4(o+7BtQ3K7naA%A-s!3yu!KI*lvD9n zX0uW_68FhV?BEFeZ=gxdakN)1<8@Z7EAZVon7A5S__~hD}x=Gt( zqC=hX98*J!)u!A8e=_)*jCsKpm5SqSP#>MUu~I)HJl2OJJ9%f|9V<_TMKXgR*8HF} zTrc+$6!#3O!*j}W&fsxhCk3ptk1;~n{Hl)7C>|Mk7w&HX>!HTPqT|k2{5;4bOx28n*}$ijrUl=0yExDvXo9?r zq)$ZR8E)&f#!zpVh@SttdalB#%I-$Yp7C%C(#CA^Mcc!4sP-q1oX7p9qtLHoH0lZ+pdYcxt_u%6OlyV;K7!Ns`hXL&>Y*)lok=*A=gBllF_-LO6l1;0C z&`8EA_<#-hy8Lr(>nv7c4^F8p}}szCSmtpKBlC9K^$er|krpRhTnnHa&LvFgjgL zdsTf_7bEPOK(2XXSz+kll#5W{c8vOW&yW_nNk%R=1??uVP1OC*_5#tQn~j6#T_9nW zutl7${9PubR>6f+-+m54KIRhNsQ<%nwB3Y-B`0MKvDtW`nMp@%k3Y2WG3dt&_zQkarVQHT=t;dQ8Eaf8P;wVu=wP;z2ITI!+cYbLUZAa$A9q zsas}ux@mSZ1<>NnrVjKrIo`JP)A32DxJzPMIqBB#AQ=2uT<))RAUU~k#jNoKeVW?V zy6fjNz0@1cUA7B5I}@b*T%l2pj|%$fQH4mGtfXbRp?Fs$@+tz)D0~VnVW*7Fc{11Q zG+$C}IJq93Q3zKphQZ9G-xh9DN#Zp^c^=SPe6>O0V5bIgG#Rb{OKT$6b0#Y_wRS-N z%tcy5_1AOf1u%Z-X@3`!Cf!YnoS2ivFr28l3vxp|uD`c@BU;LwQ}wJ$D}UmHi_9tG z>5BeZL=5eym}0J?Urg1EA(#;IYhf;~Kh?T0Y{t!i0=qb88B~*bU81*1oPV6Ih23*D z3K((m3dV>k#ea|3dVtF@?w9blCkXneAQWg$i`2pCcHW`=U`UUVu^iWONCu@bMre{bVFc;@ZuX!%f&ryN8uYk zX%9^E<;FJ}pNh;uS;o(DxnMq3Mq+~c-vj>VcXmB~opkSCeZ)ti(ub_OGUbh37;+R= zNGw%pV1fR>-vj+ILK&6o3;V#7W9Coyov3+mAY*G9L#RB)f3tq5k@fkK40efatO+Lb zHu@iG;*w+7O)}tj!Y|u6b~Qv{vmf{ocm0m4C7v8|oD{$}a-4*c@CL&-lE{h`L{h-d zYS!hl+q6c1)kkO%obUC)jpnSZzh*d}B|-0Td*;{Q;IY0ph4)WR#J71TVrihtA8#-y zk+$~k3B8~2)BYATp0xhG6<+d0lwsU*viZAvE;y*@#2*p63@G&S-G#LoxSXdzzR@Uc z$gqpwdsG_Pq|fwuI79v(#}m8?{#A@ophMkAsw40S!pi%f4%j&S3^mHz;MqIay0rqn zGPv@DD&8XOi#*%m?K-16L(~QM#Ilo}?MsE&k>h*zvT}v4Bh7I`>RQ5l2I2`k_Iz}B zOHqHnFBl=V^BF~t8V6%E^OXiGW&K&%yLAZ+Odj3W<|4$mXUrLBb|y>~W{{nG8ON}X zsv{VY!9+_?khcNjzQ6HvBDtNx8bHt*4Wpd0VaHF$e)rl8d9U=)oBrxFc}+#L9dPA; zm62kVn+R{>$J2HLJ@Se*`{YGk5umW9_NTkQ58CQW323LI#pV)9ZuVdiZEPb=Mg!EA zmnf}^#K>q-2R^OWwZ5tW9Nk97%Ci_LSyS0A;pqGvbxVb8guj>_#pt@;seiy#OPsTHyk<$GfHCBkDn)K7=Q@?NM z{dKu^aIRheqR`yB`FJuQQh5^yTx7l)rBc=~<$U)P(BIOUZza#c*9 zB_cQ4fMAsUDAIK(*;TR+8|3pdPuJN!-Tj3rl-CB=T$yYHEzsA@1f7EI`s7n6CH7=8 z*8nCThp8EcxxKlPnCX{ZD#=Mh=dgyLkv0`%jkzeHIxAcUV) z4O*K;75=wG#~=ILpRom$aWQW=k!y5`Nl)E%#Xh8OLxr?Fy-Aj zWutYv)=9v@-$^T4Xem>~F8*wP8dF`JZIFQLoWH^w9)eV+V6U*Go-Ee#cF&c8Ml%)~ zJ)qt#Ib`PayyhFvt3ZVMINH8|GHTH+c#{aC=;5A!;f=P)Z&TO(*={gIXDlS1N<4lF zfn$-{n;!>q$R_reyh=;iO!rORG`^C(;%9v%JYH?p!shg2%dJlP*Y;g6R1@LrJPfrv zaXaBM_Pki`rlz01H+kws&BUoX)ozn@!<`NZ@CEn|`~D)FRFUGW=< zecD8h;CW1oSyN0lORb-L?qYCU?1Me29Y@?GAz*soQJj;gLryo$1P z4)<)W%t#?M)cj^zPR_cAzpSlh(iDTeFJ` z^{rQg=s9t2*N#JOVVy-^jRP@aLm;GwTJQ8?P&laWPzep3vUR_xFY@8N;NKNv#FZ$? zfYyS~7mP=os>Ob>d$&*V+7#e=X?VgCcWvgbyL2ja*#%Sj+Z))>Gi6asYwgas`X;Ru z^rK*$D?8(c&2y7G$9cM84#rEVENZ*{fVYMqQQ+m+Dqwbt{WyI^=Vi4cL>N^+u2)Vb zll*E{`{#E0ChgW{#m!s$22qz6&N}6s!-kfXugNvUq+Qs{n||P~w5;)gTX7UN$rTMs z?+N_KAL!if9e9=YU&I>UwrjO{$|J1=qh-lzw)Y_Czhn4>Y?a;wvlu!*HN<5^TtYeb z;H7S~H3v@o*6}(jV9|PQY>ycMwd3b?N}JGHGx<5rt1SMh50)t@){qFfC*U5T`*nx8 zd4HvLwi64*?nkC0&L?4%$kKa(({nxq&)$lInFd!(%RscbJO6t6V2QchjT7GSlPXN? z-$Z5xg>4VeZ5$UPY!)ee>tMYtkJIjTt2#e+!tn7UKCWB_Jne;$M?bV@QO|hDJ0m+s z5n!k9=b?26ykm9!psTm{=k{Xqso0u?J%dqN)q1ueX!E#_w6>jaF?Ge5UsDRP<=Q05 zuL&Nu+Gy?BT!%u`&`%%yZ#DMjvQ#7T6T(bUs-Gu%2usj{z8ho5TOomtRX?y4rkYl(wcbRqCaQo zWf0Z;yXPAI-E;LGKJIE1vA~{FJt_Oxb92B^hlvR6Ido0q+aQ?lyk9M{-~Fsb$Oex| z8Z@%{XKzw7I$iLM$`<-$K6aVP$1cNV5meN^Fu1un59nT$&m9~;%atCslMOB2TE32; zZCUo)OUyNNDhy#w(cBUvlQJg2MLZ)ia3J&%mZlplI!702)uZ9&$wLv)f&kHg{$^Ej*f{Vs2E(eyN%XN( zc=!>k)cUcM#9!HmLu^+`0#I<}!Uf(>p4ZzHTH@-sG4~>0639myB~C?uqpgWl-lFSB z+xNDNl#V;=Jl3-RO+*pijLjqIN_@yC+$8@tJ&}o%kIM+>u~}-vh|qZC>@`3v_=(un z$K8QI4f^tCNNz=YrDS{7h=XMsfemSc^aaBI=I#NvyWAh9snJqWsd`nlg=aH=Bu4j4 z^yR>%C*D}-fWeil{teAuaeh_?uH)nlcYQ^;98Q-+aQak?mOEP8Lfn+LcaB5oAQTtt zP)wrpJZa6>pf{jj81K1Fhn|1fzDj^J8TM%n=7t-=ez8FYlAIJAkQ9nNK~@y$1TCmJ zrHWVu-K9oFs=CqDn6*R!P53dHL7cbA7}CbD$#*w}`P!S=cr^?wq{)y9#jDcgKNOaL z=L-H=_&DVK;VxO48k7`wZdl0ZaC5Z6;{{jEbm#fAz{9QGk~}K~5$=UA@)`}OVH@-8 zNdlfSabQ{BHFE!Ju4qW7JP8k$*1W7LT!*`cj>Z=v;QX117@}BrXBAiTRS^=!$?8N8 zWy4;cgRo4W7J+6_;M*~|j^Q6rICiXvf4PFyUG;fPB^De0M7|3nROug&oMm80EbGC55$mkG9`#AMj7e%1^a9Y<4g4 zTE+2hOB~9fj&>v_E-B)bud`Uma_jV5E^e--ZjedHYGoxLM)Y^bu*-qiznjJ`owwu0 zG`0DsS#)J!n#v3s$9aOjY@}x`1nV}P^<5o~Z9tMr40ZrThmi)M4ehd1tW$bdS7TG+ zW5&7Ej4t0ex@v^mtk^gf+ShIuIbj~VejqX3_{7aDb|M%(Czqns9peyzui<{QQ-4L@ zW_*Pv4K;`+vYv>cC0$Cb#NQgx(Y0Z>{wI z7elB>idd$gbqz<(!tRZ2Tur|(VQ z#L|nsFuV(AL2(+nuWu3tC&a%!jP(v$dm1`{KlnL|c2gp4*Gj0uPYoYEe%uTYom9VO zjf7n5!sp5tWXxD=8)zQxlw(9VpUUy6k>(r5R6Qx0v-CJ{#*^k%$I*on^aJD77(ECj zofwkzT8j2=nMhmp$$3F2(;~#DW03fRPhapGr2j;T{78N<-v_)~ciVT8?v* zqDMO~Q|%{{6T?+*j`tW_uw;$vZGtL2(CzJdAKpa=$59Ciux*9Y*<5g^)6i7c|M?3fwYS96H(^LR@tB zu1hS5^+r!y^jOs3M);ng1`7S07%hc?YAmX(sGl8P=t5EHKhQ1IJY$H;xzp|h~ZObZA zDg`_y%P&4vM-y(w+60WPZZ{i^y^Z=XSq#tAd~|e9hO=p9Z)NT#g*^xiZElEu0}nk` z+-cx9-8)M4G--^ae>c@{ZeBtSq~j>KxxF7+8RX2|B^^T!p+N2;x02u=|H}*D%ZPRt z^D5UEvLHf&Qu?LJh}N=@(v_CFLt~EflZlJ1_r?e|j6w7h5ZV46;U;OT#4>n$v6&GC zNoYi6pd5kl?zIyt)m$e8o6~b~#jTIofTHQ^p`@n&sVY_Wat&WZT1ikaaIgytF6F0r(uSzgnSF3YQ0Nt;6T>zNW))B{dpG>T0F$%DhULo6sQMGPhq!;tq7FGT8? z`263aw#HI9h)y`)U|rBei*?5?yBkv0sc$U6V;l6WUC^U{UO8~&Ov z>*6a9j%>E77FtEoK)f}EZl(4onGRuVl^$e%c-)fS<(0PjsawiX_G@^xFU6WhP&`h@ z0v#KwlMk_f8lAvD0kAbO=!qiTpa=f@{T`RI?Gh=J6;@ud>^Q}oaAQ75iigvQf%4cb zQNiiMCYk>O9pNR#cJe;-!Mu9X23f-OHsdmG8*OP=ndSU1-gxWVlyYs7m@NAk-Elz) z<~xKzBU(Nr>w>=A%KNZ;OTENAz2Ru}{t#2omY19`f3{c!rx(2J4H8`a2t(Be;BeDY zKz$4hj>Z19h}LLlSgw|c6N_wTM%U|V4YCEjgq93;(F>JmwRhex1~1aKE8qdw1fUzR z|A}t6_-=bc14W1F-yA!f`|ZOJ>>>Dcw*zd}Iw?8fFENpf9Sv={(w37YJ?1Z+B4|c9 zHuaaE1fL%%$;QsjBDS&QM(5tBZ?^6Ymx-CCyOB(v`2g}VXEhMD!Z#k~g1DdXk+}#~iH|7cB>g==N5%WvJPgBK566*-z}< za%29gb%0fpb$xqX+D<)Yz~b_5-N8|?K*I(K^`nMV%4J{d?`%Y6CN<{_9=r}{g47-Fz2Q-$^M4N>O@TuWEOg0 zU!z=qAv9}kSUV{`P`*C-((-hC7oPSx~oS7Hols*t{@9YF=f7C%!h zi?(#uyuEz6&igN<*4BbE9QhEr`5V!0gOO>5SqbHnSaV~77A?g{$Fu141T3v+|LSr^H9le91 z7&+Ue zz=Fz1^}e)y&WM7w*VBUEv7-C_+sg6JN=(Q-Y{><`3A z)X%L<2K%YOYNiJHLq3xaW&C^-Doa736Ir=jU0_tE;nFYlZ;CcW;u0k$^nBfP@}fBLj8QAWancSevE2s z7#75W2Ba8R*q;NXN3pT>yS+3(X}^(l?$al9d0REn6yyC5+VUS@G8u=67~&##WZ`B? zVFShEC7ah%HewxQ9$n)&g?rbR{rCSsEQpYd&>wL{>z@R)L;dHPBZ>u}YJ!+nE~83- zvn37L#_5u-aYeDSK;7DppY*L}8*y#tLL8da?t$7q2pp2tF38f>88|gr-O)n8jl;b6 zx;Gg2q~uBd!-4)Yvj4(DAfF3`c&<_G3WY`@zAp@qAY#0+Es6H2_sO#BDpe8u-)}NR z1g=hD|Eb;?Y}1Mj@89MQQ(j{iq&4A)=p3xznonocDWTL~X4<(J$?+vIL%gFl0tiUX zkZK6^v*-{JZRgQRLyaI=k?1GUAt71KVB`83u%JGVp67!;GYhkzh`q;&3Ff<1Kd9HlCzZjJ6=#T73d3lozF>} zE=v;`$~Sm@(&1uG8M#W%IE581&S3xqz+vQ;9l}s>mG47-X}`STC3P}wx>PRo54&}pu=$r`6ug}T-He-zRUgI z0^HD|>@)s_C|FPsdg~Sr74`ROt1Ju}|PQ0Xc zef@NL%3Qp5@$4jo=0FawU1Q@jOJ^>MKnKqoW2_TZj}U`mwvyrnoZ%AFv_%JOa_6Fl!A^oWh^V;7zZ92nkZ-$kjFcO%+Q&~ZKy!Lh0!k=t6=-0r~ zZ#L0psVua%%Ms+75=|wtK{skNC0L&x6Ax0^`~}mSEhrcOdgw~wtJ+nW>W5|>>0)JBKSskvm?LJ zfb-_B;RhXkF~rz9k0jVz^Eii3ta0?T0dFX0fnr*)kz*QRzwBuf>oXphz7Hg)fkdk8gZTLEZ`Gmq4SO@FCRRk4oB#nt7*dCX`K zOiMhq`JwwNB?4BjiT)y;KuvgXp)P~h=iCaynHRQb+VKB>guQiGRbAWmOE=OW($Xa$ z-6h>1B_-XBbVy5gH%Li$cQ;6PcXusdPxOA?@43JI?qmO@YaL7HoMX*3<{0PoJI`xU z;6Ejgc$U|aM-mCsey(S?vJjzEEofEYhfaocUp~pLsIT0Qe3nv?n}ohT%ZzltN}=4Q z30%FKQIiJ^-@C?Va882OG;12z$OJ#0QxlU}fzpsOZ}$17cNk^eP0?Q|2g! zC1$aZ*oxeuh5Lxc@yxJCW+_z`$If{wz(hIUYz__iIxPmk&DH1;uBdu?7ZZC%OJzLZ zT1}yH%L++|eO#}b-feIBX79@PJpasSe$JTO8;38d0O`9HIGC_UO|vKG&ydfz57{Ty zb&T@OUtG`V2;*QNSXg1kF#8jE$ANzvBp(KKwlVkJ?(;s22T-o?IQg0ID2(Ujl&q?4 z1I_1dLW{o~FF96nfALyJF*N$GIts<@BRoo-Y*;p5mYUHFcU{+AgNM4UD_uWTV172A z@>XGoZZCJ2Zg-c~{*zmq)b|&jX7yi(t!*nXlRGGxyKnEG2Hp&1{Ty#k`7BO$AVU12 z$TWOX9$L&eh#`zK&@HFuj*V1seO^MPNF#EsLiK@-8j2>k?Dx&a-Nyc+q8WEDZ_D zQ{Y<>rHIlYF?PL4F3&N=QHB0G?OOX)R=;+Nm6N>E7MCfCK{!C?yD9j+3QMPeehPbfL)I{dR9z zVAO5jolQtT)6*F+#vJlsGfwy!yXhk9ie_;)1}?1_d_+Wb-hLrzN2{jD^{Zq3qXL3@ zlZ}s0l$W?hB1n>df5Efzf+E%~AiR;ZEOs+(f(%vj=o6=QCw3(QwK(xt1aB;c6QAEc zk{{6$!4Ms#V}jSVh~;vOZo$~mKcnuuenWPKc5ZJzr4Te!XH8x5M00#OjjjH2GCPXc ztPn*p6{_~gbGy$$#Zc^1z1eW~m|XSSfJ~c+vj~rn%JSc32Z`kX3CL!XBR8x1E>K;v z>v2}`eLP1Wmid3C5B!#uyOMH89^p|y?iIQ{0pj}0h80G4 zBP5IGGb+&CDP(XQVd&Rzv@-6S3&^WwLkCd8f78rhE(;@)QVNS9GRJLOagSa!uQSqK4)@sh&`J6 z#g&yK^PY$FDhVb0DwlcMDH4lA6c3xo`{?q3f?%#?gNhVY5@hpIYlv_?(6h90gmAaD ze{2UHg$|b}m`PeOr|V}+mDq&1v+lfyH<1S?I;5@?4cqdmq4j+DF6nxJEdJXJpl~Ktko~at7MK(inBikJ zvsaLXj@?!jB02XeEsggu$5Z4@W^UpB#tXV#*CFPXz)i!KsWonO(?~PxYM4I0V*Zwc zvxz+MB($bm`EYr2dfTB>CrhFM9_CFMzXG17XJ~(dU)dPGB?>{{e;cJ^bnB4;*XC&@ zKX}e1i*y9KA)_dvFp?a!5`WLoIwP+(D*zPgE=>Eju&g&9OFBqM$x#>wKjw^=@}0Hw zZ+(>`|H;74z}fwQJt!ysVhF73Ei_`STnr}F=RurMZA4rNm*O8BT;LF{{ck!uHICj| zN4yoFxOX0GQ-gV_H|H&WF0$D6iLuJkayMZL&GOctgT1x%uInxKJzcr;6}2$_cJ=Gq zDf|JM20<=*NH+0@a?OPbo((rE`DU97X4)F7?~*DR=L#|{!y_<<<$p(dq`t&8?U2Mc zzL$#iDIk%vD0M!Eha)0ZHwmh*HEtPcKdF8x+Zq;xBGsNb`4-sQr(aUx{#!KIVdoKoB~ ze&q#Y6(jN3G>o8~6I-dF77k92{yd^cDn|Em7mouRWDtJkX>FFk(iaqm8 zjx#r=^xR04p$r{ARtJ$TAvz6*@OX5%9#uIixWnpuHU0JtqQex-+u~Uob{{9d6N#$L z#Hk^!zrhi&*8sEf+S#X%P)H&>O9_lv2MwPfy<1O%Zj}xlV>6BFK49njRaoaAnBvex zPIyIAG$XB}r`G2;V`nr1OaiN@%*JudZn40@txUh#3J|Ia|8+AqB>Nb7&V zjizuxRQ494p#!q0kSh8>UE={)u?8l}?5{HR0;8pb1a_3ZxFpCFjE++RIp?b`v}#!- zuBn+a+OM$lADetYe%CX| z8kYk|@XpBpk>Iz#C$@UM7_Gls``P(3_lWU_4XxP9=O-X3@d;|ET#K49jEPx(LiFOw zE??}%A6t5did!Un+Z)4E^@FOEE$SH|{!mfb+urlAQ=$Mg19n8w#rUEs>#Q?v|kGb0hLOY{Xdh8{5 zr~{ysq;w;A>)=Ceuzf9Oy@CMr514(qaxEQQWx-upa%ZTHh*xc2?jQWt*3_BX^s7wM zw^{l=7kz|FBXXKhbe{TK-11(>MeBsgO#>*`(q=EgV2@YjM!)ZIQVuWHV}%SKvNaA{ z{Zww?j*$=BAfCE1Z-fZW^B9Su(4_S`;&?LaaE(y}mk0ioR?L^*OY`1m6sHyjRwW%! zT`$h+54L#~GkN${=O_Q=gP^C!4t)f-V-6N)Invnco3F4iJD!wu9KT8(5I4awEU%SD zH$X4b#r>IW;esf=N8mwO@>B~EKQsWyx?2@hX zw?l=mJE?bdBZQ-X2F|JT=d>4uOE+fyeMjV^YfCGm^1|8q zr*|7jsf!sONdZ^|hcnxnSFiMY=F?0!Ds-gU(pWUf3gD74ecb4zW!8ME{TGz42d~_dSjz z*7kFyDLM;jyl_XFAvCCeho!$&9`3Y94i+B|{jc;revawecaC{ms0-apES%2MeNG)B zEeYHvU7Ifi)G}=AW-cNUSYF&RZGVLCg|I_*OeT)0*KNlk%c#W1sc_)wrU$6$TqUWT z61VJT%*6y_Y;;^d4_@*jt3;1Q&l!QuTxQfOPmr24x{iirr{7r+N4s;RXn!Co=FT4t1bSB)dw(DWCD>xo#P*?ytDM}{duYB3Pp2au zXIqsDidb{9d9jscYcIqKj5xAi}gSgNei z?S!O+YE5cO(8BJ6A#T@DpK^NK)^lOldl(EdmqGNFO_ghCh_5PWQ8$G~ZDWI%cM(Mh zQz$Lhp`cv?YDqF>Swl#QueJfBDbJ_MD~@h!=}hCKfqNS>V=f1xiS%$O6fee91_X|* zI>3-1Ldd;-%i32Fj}l(^y1W`%*s>4Tr@Gyp_5A4*V$^H+%m7yTd#okm&vu{nKskm6`$gF$fR z9-X6RGdnh6@c8ujw`RbP`!p$N>-QDMAsA=pn{6}dwn91&Eic?+(xPQkg-Ni_#T=A9mJlT(9 zGvs}1r#Y?chcil_YSRoHoTX@_B*p%y0YSEuk}L+Hm9-41TBE7rNbho~fpw>up<=&! zjzh*D;D5Xy0RG>rXH18ut8-zC^39~F+?k9kRxw!5@S%B7Fe%!I3@8iuW5(HrPFV$w zmkqSkSmddztB^xWbxrRB6n}fqv_BMEqv!|^{dE59w<{cEGdX8l`+pSb_eRB*R#q^+ zfq1N}MyI-lxG%jN!|E;x2E7qQHui3w;N<=nA*@Yhh4?k`t9d>tCY0Yx4Exyo5#4=w z2RK{^5E$YngCsXSJ2I2CyBBsArr&MQe8M~~Mh{V#wnef!Ig#`o%93a%H`_dxrE>#i zg+fI)Ijf`#Ft*CcwA{$gBzkG2Eq)m--VezGT6`S5^tzUoY!4?p0%AK1`QwL8pfCL@ z9KuN|@~e()ETCUb2PzzT&it+PV2qyH{7Ou8*2)P z#l(I`mcyrotp~ZwGGC|@(9o#?$eu(#QVZg)Fv2Du(X%e(VX)HNtWO6i(#5sR!n^x` z30M}qPN$cvpT|o6oRUn<%W>f{K)4sOLovMW0OpD^2$Na zy&%>o>_15U>xce}(1!qj0@UIL&$$Kbk?vt4kJKoHst;XR8H&2c+5ATdIvq*>`tU#h z)<*ANM1G&Mi~2H{k!)yg7t8VSz#)FXY_HMhertWw6P_`Q%1s^*(=mE9JEO6dO`c|W zPW$xM>fLxa&Z^xNwB~sT@Dbm(`E0a}r2p5c{pV<(p|)Du@oS)ctpBfb|DTr}j1b8w zm30?p;DqTX^7c;wJ#Zc2ei|VvR$wMN0tE+lH6$dhX>Y)d<8<;5skkj70p$J&lhQz< zKMBL}7XcgaCxK%yMkUbB2cBD*Wk2s#Y|dz~;RyAtgk5F^t+z+4*#$Xz?xeZG3Of^d z{W!vdJJBA=V$8uuzcKEbu%vFJ86woC*Hs>^`tIgK+G6{NVzYxXv+EFlq%I=D^j7Do$l&`y}U9Bg$up}-@&G``TTkY)(j_D;~SJL zbyeE3uJAJ1vxE-rOje}SC5foM49wlnLZAS97AYBZ71;j|w7&SqugxF0yqO|J4=+z- z$79~Ho9e$7dK2Zl%91OKJv9g|sg38X7>H2z0Q(8o%}xHIWj0;v6Fo+_m7H?>Z_F*} ziLaF06*Z|$95wilYH43ycCHD>-mZ>1PbrP2qA^TR?22VRv**B6ul7u5wq&Cp{w`LD zu@vA1(YFd}pAF~dWX{foyR*s25wE^9?ox7;b5@M9nt#mxD+`-WB%8A`{zn!z7mv*V zL=gWAp<@a>OcZoVHZfqlv{0RhK3yTQ%>S48TRkxNDR1OeDOdA_kqh@W=Ep;(?xuLm7`_toid^inm_HLFf*X=6)y5*-`Y%<(19naVcB>jGTYeM0R{R`G z_pvOBLhn*A8qzGQs@^%AZqi z{CF~^{OF#8T4&5NhEP#X$eOcgcS6>7Al|MQY|8bh%eChDmDg*TE(ccPGmKWC*>5K&fR%k8ZCfjl6|1ie0P%JK01&^s<{h0}`7b8^ zyFT%6dV`H`uX^tn|AMSFlkHFW4Q^vZV>t;|8>$htN#-HEyBf5{+Ma}G4*?S2;w~0? z!(6jr9q`a8Se46wtNlunm7l8aTwQMg0dBsHWUZ{E(w_uhE)oyS5o_geCarp}LHR zW8j1L2#B}y2u6Hgm10h*PdDpBTV2=Yvn0h*VDsoFT$)_g+7e>+)Gs%dYY0rLviu2B zs*P%CAhdf+qGJ>n9H~7s-Y;;~$XDO1ClfMvl1*FhA4L}U&`gfE1!bmDEX2L;jJ(r4U?yAzskQwHVa~KU z$t%2x0}U*ZO>b||a8!_{#ZQFj@Gj(;#QLVH(HdiD@4=QlE#;~_mD})dp)%hD^1qd( ztUVW6fQfmrIas3&K2T6}lke8-U&#h(ra1?BKM8G1^0YX%_`#j}Tj54x_dP0z0 zJX8_R^BXL3Wyaf(bH+Ql35V%@8hDD?RH|JQ$q36(LIe4k3x@a(bKh3~4Uk>G8>sINp&uNIoUP6g z;Qo9t>Z*^Urol!>0j3x54zy5(j-cE17`XNf$k1-Byi!j?3QS@brn}%au@of_H2ojE ztyh+EsJM^hmN9pWUTRg}DP6Cz9|iMSDzS75$`;2}5jyq9d;;KuwI(l+tj3&|I~2eb z2j@X{8Dco`K8z4<4GT6-pUhROj7HkB8Ui}LmMUTZ;Cu5<5oj;Px}1o(jZF^lj4RM& z&c$ai(4q3ncrKHtVLm)Yv$+lN_-P;CK0Tls1iw;Lu_|ydVDV!QPXJ2~&>$p(U6Iep z8JdJtSGlhuW#*ZD$ofwSKL?#SIdOm*FW#0I9VziXq3=*k&+&-ia1|An7bsS%9)Kx4P(Zkfz z8Hc5=|8!Dw(g_+_ZA;K^g!*PiyX}VR#7NfiAmb_ziT-6k9g>jViUdxL)EJ4%d+C}Jsu{0%W$TK-bnrb7xzyXR!4~KZXGQF6)i-+V#&}>!-eX;C{9uUeNb@i; zMG|$oO>HeVxWN`*@(L2|JDD3o86QhP8;dk)Wjl6EQC#68zYR zf)a;me?{Huc+G|XM`6~<|ChoH?r{xariwk@O*q0fF-8FI!J)Ne1+KcG6YPi3LcA+L z%&~-{t;H^w9N8h>8|W!(7v3ABViGQ(D+^EO_TkM#1aq-yC9DA%71rf=X=SIMAnp|g-H4wfGVd~%*uM@2*2+(SHZnCdg z3F^Y>^e5nE;QAxZ0cz@`fGw;aH=9@-sC590Q}VpIz7MH;t}^k~`S{M~JyZ-hogKdX zJD#=;*#$p0Z9HOD=#k!x(?$gCy?ajl0wEe2W**($aZ#iMMsLhydr8m`xq0_Fi+Qi@S%!2T*GCXfy>TR{F7 z2$z7&QC1yltuZUQ6>Sp<2W&Y5{BB+!`Xaj%pxvoARpa?nsxX4>hD^Yl%TVCOeYcin zwYNI}-+h6`LsDZ@c!e~SDvXYR*W)u1a=_HWECGX3r(0C^qkntoZ2(~1gK3jaHob}y zR@u0xIu+}u@enK35*xTDh!~{!nbcjUB)>_L3D7zCVpkThm+Ug}vEcus1iLduIod#7 zMH2Y=5&1I0_%mgFtd2;;*8kpNhhCNW3Uh;`JiL>zZZ|Z|fsve&s3nsmkLMRet8B0X zR9|P;dy_#JSrayg1cYim*R9nq@(?nh$?P%6BY0;Ypt?yqVfUxmL`YfqD&kU)A3rEm zN}6w;ez=dj9FgKTzw^o!Yn%5?J-7UJQ(dwZ+NWGpF*ec5ObBT#cSvqL8lGa3arqvw zy|%h`Sbxt*A&;oFD<>^3ilur~4I1F0={V+`908DCgKN>k@{Hva65e!nk3P!)=inn8 z;xr1Pg2&Df9MZ=j@70?!sXW2Bg>P5NiBlPrd>NKcJnL~a7??#M#cj>^ai2WJh@3=s z2c%BgCDP`oHF?rd(&`0dlLjQKlnXP(_k6yqTnr^1vc8AdDZynZVf&cN!&_km#^4yi z!A0>(AqaaD`QBNM&0X`ZX25_LxhlAkH8mRBr`p~Y|6uHeu`*SWpmQsxKa_B+{^}Rw zOUM^+$=t-;PT7qi&m`O#G!~FtiSquj3g`@<&I>a`#{KNuv=d)K4I_!byIl{P{# zje+@y5EKEZ`-}kyh-9co|EyC?t5 z+Icp1V5Tjz8*04kLQ$_GagIbgV8nVCF9^if09x=C6C@=@6EeInEcP2F zOybo7MqMy=R>KLQAO+`Bu(_ra{ASdu;O^l9pQxHrwzynX-4`QHE_T{gfT-rVD2rmW zGm5{MqATF6c_Z6JeqBUBBhDgfWxagBt3BPk{PcyG z@pu}DL-G-%@V?`?W=d{*EUIz{4C2&ThUbVUM>o%`1R-#VUYu~Nahxc1{i;o_gvPz- z-UGa?#IzM~2p@;mUnyvclvfJ+=dU*z*1gN5GHfgLo{)~7<^a+(_m{0E;?4!b&NDDLTcC1V}Xn@E$2_c3WTQqvxJ5#LzJWEbrHi|CanH8vak z7hmdFl(>m)qSN}o4Z3e?BL5r)*{S*tof`rqoazFL}S&s+Xbg#Lp^{?mDJ(w|k$ zoy8x2ZEYfOqbK(=U*3IEWT4q`UyB;Hhqh_hLZBaNi~NcIk!lS7v69To0jZH?vt`jl z{h8LEYK872&(3B`a~tPF21+u`V+!7>>hLW2@0$=XqR-`g9~f|k+yrM;Wi`#ez&UJe zd`UFt4DlPSRX-Y5KRUYw{i6q~n+@t&geY-O7NT%G#OPyMrH}iYJiYHipw5Ue1-zA4 zXsjy3MA|%`6;CFs|a3ZX&6)rfdU0z;sB=gW*2>Pp>1w4NN-8ddKQ|^9)-?FsI@NKDf zj_Fx1O1OB$z{>gnu-Q!oQe@V!ja-r{$H=v5M;QWpZml3f?S{)bH(A~HKj}g8 z=Osaed;5N|>+l`TVEiiRM@Qy+KX&sMTuJvn@w=##y^i)$g%}Mzb*ggDX=g zXXVkY_-P2g6Y~~hPb-lM?^qS{kkfPN-#Pg$Ebgf6|j?ijGdjV%Ztt zx+w^~EL4| z8X_#)CvUr%Q|XAQg`1OfbcbkZ4=FYScw@-A$if*=F?!WzPI4s@!4<=+E_~2tHJv!q zyO?&Ax(+bMd-xBet4RB=(C)IdMs?$?ie12quLmF{NLm`$fnEY0Q&QfyJR)b9nsYKw zTNw{l#ibT@$oD6`t3IkKChbr1cu!exrvh~}6TAG@W)rC2A0FY8_HBqEuOC!%is&fw zvRCAnCGYRzrmT%?1c405qq22qS3I95?9|# z$ILU)Ow%f=lLkO99Jskz7DY0<9piQ+0DGo!d8ng>*p{p?^xOZ>q!Wc#@i*rxc1;QR zBK~H!{7Z#RMF$^|nRklHwAeQ&TmFX%?pdOa!U(gIgz^J%8P>5$=J*}4APmbqlL(C_ zHv=Fsn9%4u=pBj|R3N`Z8wv_c6wZVYG*aMcOY^@S;!kLd0Pm}Mq&-%Zx}!+TAPjYT zIBS-9P`O`FeKVydwmF+#j?w%YDD$tAdwtpRYN!z-AOA}Qmzx!RY{4t|_Vk~$l_C-Q zCm?Ob>^+aa$JA!ujY%41Zsaq52)2a32*O9hoc^(ldPn^>X_zoy9eOL1% zE{+P?5st+zmsN$Rvj@4Dy&524<4nB2&(ZK(W({bAZy*g8TKE%@*Q+S?!D@u~R0ub9 zF$FqsGkSl^Pa^YEUe~M3@C?lF+nevDUBV_a(}RcQn?)EPL!19iqn7UN*PN?+P91p4 zvyCqHsOI;(Maa~bK>Vod*zgl!^f?~B&nZukb-vEMJCtxdTlwjQBKl@mVDOv{nm=B4 z>g@0@oorA$xREOPW7S61mf$q#fC!4I&i^}L-HS|9M1p0{73%ax65-@gP^ujrVe2o? zP38Qevo}(P+}JijXe!0IiDznhq;EJ43ZDjk5wlL1b}B?%*E*9*qX%A<^)#sEazMb# zrWx4S2LouFeE?8qs%ZRGD{1W|=e&`lfXYp+XnQygtG~y2BFNWu`|?hv0qD3KeJ9SB zNScdB>s|kBIB5WsIyH8qa-rkCT~u$?X*p=+;uEPQYbfkC?0f`b2opXi$9`u2(k6jY z6FL6HGvm>n&Vj&4z95vy!){T;c{ly~d;A+8e4^KY7ZM*8hA~gJcVAGq@!=h@8^Xoi^~Zz3up&vWQX0WC{f$76KxLcC8R>T**ndexb4bk$kg^jQsYiQ>bBM(Icl zo*T98jGluwf(6=J(aH~xf7^)c?#uN~DGa)ZUoyn9{IW|BRmptw5-Z8?1&Zvh`c1WO zQc2TGyNx5e)iug6+CHkBzrNJkSfujhqRHc4;vp5G@>egMY72AB<$Pg}zQHP3q4Z#f z1@&<#yed1+-lyVs!wL}!x&|VZO$|fu+k!~Hf1s&aArRCuPE-t!G)ScfBCr0Y!d&b^jw7zz83s$_;v;GS@g$&PJ-G0js)AVk`^&z|v#+d$+w7?+!_3!_ zv0WEXrn4QoXB%~ZK<=XK!fKnq)D7P8LdpEX8T$q2E3esLvTJG8mO8ERoVM5@V$0pP z_*Uy-$ATzk*1xxzvTs-x?Ym@n?Qo?V8dKIlw@Ag`Ta1p&b^3W2Pb+@znQ| z9umMK>sOoY0vYs7VEms?k_uhEfy#1&%`xiV5&Rw?()?lS?5bEgaFNm0*mYOyI#w}y zGXZX(d#k*`3)W}Lv^35T z&1IxZq-1(w)o$GJJshv&CKz`L(^=Bo855G5C8cr6LBb0@PTX=FUR)^;F;4H=)$#G8 zbqd0)?1Lxc39TaIK1+`Tjh&46Wfu>P1A0Qb8W;M~B1^ty=)1W$8+_b5n8#RQuTm<^icP z$k`KNHbzPGHNTdr#U1M-jH9%IYUku1_MZ>q1@WGyxAS1k? zY0og`LnK_GFL?iaeb_dsoXrLvfTN^o=<2i~+TWlie*R-`{>^iP7njWDFXkE6{q5)R z-qp07I2=uH6ru$Lzya7<>|_;qZvRMQ^FLtp`oN!T8Cw42w*O?y3=#h)Tc+ygRrY&O zXTm0#k}*<1t?s#Q{WnQ6sF~znV{OOW+1)kY+pLnL54&;nMSrX!A0qwhI?_sq)0omi-SHC_f5UEb?1#Ss zJ87D}-dsi_^{DELWm)GZZQF_Apm&}X2s`0Q!j2txpy@PoZ&U7 z#8o1!K$Up>3bow4r@5vM^f5RJk4TFJeWn=$g&aD;RBkZItv8R?<*C2_3_8`t3MXaZ}l6 z$4)6pLqe>DRww_iGTI35gfdu={#eEy0&CHvGb_R+tz?-H&PC;bv!UU9)b{1h_RK}d z#Ka3|8mck6`#@ieCBf^sFX-%MnIGb%@?^f2++j~Q(Yd{nIxtoeVrQM^zEjI9nu3gR zEXNYj`D~daF_!8n)D#h-@OuoI4Fn9OyH7uj;S2mapdgHJR~-AqR4cLwn?tEI*CPTY zM(ZMsuN~p0iq{-bCepo4c$q^|02KbA54+Mel*P;u#fH>-J+f4F_PgqL*x^Sj^w06i zT?5BBy0X$ZdDsC=U`lh}I?Y*2L?yIcj%r^!m>D3Vz6V zbij|rQTb0rV6Cqus%lHZH;f41o4TyKGcn&yNLPi^G($HUl2&Ndp4D)KtyAIyQ0WSv z!Err5BwJqD8z~3sCQRl-zwQZ&sEc6mNsP_L(G6d9Qzs?Y85vkc@ow1i%}_AR;t z^I~hQ_rl^ozZ!ncC@&4b5`Tu#WbIHk5%oAlGL}Fm*I|5u+0xC%t*T0K_CsANK0Gw>4Zo+#$2Rn}tPFa$CCI49azC ze}pxHkacBx_aK0i+`Z<^G4S0|=oiTgcBHxYe6$O zJVEfelF5FY0Ij!poK3`N>?2?_^~GAdsW~MP(ba+G3C^Swu;gtjLYGBV-E6i^mzZ0A zHg_4|W$ZSU`OR8AWyA);BboVOAl@mq+;xPVJ^48IT%#C230@qSP&VioQw~`dF ztDz}0GNG@9RZ2PQd=Z~XgB&KgTgA|#VvD~)HBF2Qk6I9^&sv;pRpw^l*XJSE zXEU6=*AGrtK0s*A zll=Mmax*$RQADNC$Kv}+Qy8qN#KGA!}pTft{#gi|r( zMFgr%uw!%ysw3lK8A(HvcB#=xm|hZ{p-iaE9G?}^WkLntLaTy>RqspMJZtI}=TGROCOQagw2SrF9no{xgkwxNOG#8(;`>ZYy zQ$SPNd5}`Spa0|wO3@3A9O}9=)2R%y{W()%b6(#y>_i|&Fs)e~`)70o^s?yN+^83Dz=yQhx&DcgKV6+B zC5}czzT&~cqdM--My8{TchFb+!i1J{b5Ut^&iK!x%RQp-wb*gMiT0`@LxTHOWd$?? z<`KL#G1^~1updo*QyVvOg_CGU4ACk{iR0Bf4AHvQLNba~_v%xR?&=h3LG9;&IJ90l z2|>=F<`W>ZhW7p6%o?MJ3C+*?EM218%ZcwBJp7or?54NPOlBu=Y;uDqTsDXno*Z3H zzp~UBGmO?REOjcWY$!$0#Cf$oZwc3oUKi`-cg|IX99dYWaIj)mChkq!M0R77gU-a^SC z3jiTPf^3XNP^6ybjPZee+NAHlt|XuTreRVVoW$5>A|qZ3X@ia*ea4bKk|m{G#nFA> zN1`25%dF9n5@`@LAi6$D1*kiJSTIU7>ww~QoWs6V*cUuOPc4ncaRSaWF445UIl^?Z zJxbE0<<5WNZ^Ax~`M6T_8Ve&U(fk*q4iN! z!fV`RYu{AJM%xh#T+k#c5{Uav2Iy<8e^5UB?|W^udAV)3bP=nQ55zjJx6D zy8a8FXTkHmtG+geB(yA*8sySmgRHu4Y9gf%(W!UUmQk?`lNrT8>0dk+_YXs7N)-xz z-j+T$?g>?0SBlsfmpV4jv97X9f`?`-p20!C{8eN&M~)uGE4gq0%gm6jLi3WQ*MQ@I zAvpUpSiBRx%~KP~@rs(?iULmED!G{^0xj){`>iJUnR@+#f%HlTp`r?WMf%rnInl4$ zT)RVKgL7R9wD+vj*s;4)T@BHCgiGoTA59Mkqjovq_DHxn)caC0bT|s}F;LU~smDYW z+x+hLBRqMI^w2KksUn~|OE)*@#E8=*lVfmlHPA6R?uPT_<-6#xag>zV2QqQgxU9uH zzj%&JhpC8#+C*O=B!O+KcAR_3U1N2@Xx{ zdlnOBG}#!)B4W9jzqHowPr7bH;N2R2$PbAILPv#P^_6knw69zG%cfFZ&v$UK*vf1L z(?B^hKoR}keS2xSUBzRmpT>zFeL7X)I?g;YK^=)V0B;yq-4m0kZsh@9z@ZKGt(}f> zJaXU<26pttPQ=N#XCv*v|W? zPE39WxoM9%i?NOLeN0d^V3rJZ7_IdbZk-IDZQHO2TGf*^wUSlIRB5eNT5o!{!VAY3 z{bbGGPuuvmFfE_XCyfbcV?fdi3U2`wE`qh*jX7Hve8%)$FwC9qk*E3E<%fL{f_5AD zH#|les=jNl1ajci7Y$|`eTxUqgoOJCOmfWXhEOYrsT#CcGPA#a4dh))4o(#%{dOE@Nw`#%6oSt`~pO&f%KKi z!dqMPw{KLpxNkm-s`d5Hdh~wt!#3b%WGf1O9^qESjg?;Ep}GJ1M6q7{8dRf>F3LaQ zdvt#+0qQ_zC;*ab0`;&6bugJ@kb?6&LCrVn&5B_l-EJ#=NnRn9+ks!$q@SZ*Iz8qO zNu@v=hb7w@$q)!!UTkZp|$`;eRp6$&v~Yv3^kE#DpuRya)K=Ma+e z@8x^h8FPiczaw^esP-ezC-RLqF_Ziqs@TsrEUo_4Wz^wloD__ZqA2vPV-Ak9ko@zT zCe$Lt?&Jxkob~`Z9sCqFh)Hb%q+9y82IzhfN<=yLO>JC=c-RDrRNKzreSCZu{5yZ! zAPfh%U%2h;Go(-mGJkwNi^mgV%NM+X?wT9_Cp^btXrjba(ZPv&^3%phz1v>AfipZM zs#qV4s)1Q%b!Ya)kiG$3Ze#tF(6_m=Eu{;(4D} zqoT>dOXD|h%mDEyj!U=m3#SIqW?@cWCozWkhz5{IgqX%#B+Ry?fhFRrfD{G}P`&@8 zR9t;(x5Bc|li&Q~B-kqeoCMkPQBSeKoJO@bO3B8aZD0A%Ls*K>MD3z_lpCW%>Txgy=go&PBbb(e1^QeR#QrzrlAFrj<<=>B`qyiGCGy}xVV zfSI(KOhC%}p&z~NHHF8jEud&YytTWUfS)HfvVWgvJAa6wEpgSwhWnv|<-smn&vVoS zkCPumAv5f-=P@ma@qs@llghe1g2__HR`Me`TPwvV@O8KoOWBLB-SufaC(;JqCxn#! z9{P{rz*gjA`can_wn|HLigs67AP!1g?3blWP>5=A2!YQB(NG)oSn3_Kd&3wPj?u(y zop{0o$DDrY(Lf~wMO2SbnGff8O zT@LSm4J^}t53Imt0boXMx;chq@oBL2xbyizOH{Vp`0k<#X^@)ej{am5<2zq`=(xp0 z%Is}t8HE00 z^`*|KjKogs8(6!X_DB`{JfCB;Q7PO}%8kM1Nimgn|L|dj!zHWM*>v2#$?TRTqj|)eH?Yg&^jY~@Q^m`3Grs1Uq3eW* zXI1TI<+(u&LwH!aS@Q*Gvt}r81LJs<{X;0{Bq`hc>f@eu=crqH>cxexj?2fXG2v^R zjcwt!CNHl!&)poQ59(&d79KhcPxEGJJ@rb%B2`?6H}3XDzE*vcvS~HUY@NZmN(?kAiTuj$4p(}0-^|Ga zw8N>ZJQK<^@#A$Gid{=iQyD4oIXP6c3fmISIaE%e28+|I#<)Igaa6jh+sg9d^oq&_ z+-3M1^6D+?I1MSUf2v?y?7(3FQJPXdMudDXrSgQ}v#nLse1jhR63WZB&BHv&x4DX& zfR(`11ATHpG?Zbg>0X*vF>*6;xcjJVT)4fV_-*F+{)X$cg-^Tfpg4wO(S@iE7;ura zZ_!aRb+|vd%z0@@;T=9i9*NB#lJxWhZIl^B?1zv}t3IZ;zPGJ9Ui9F5#;l(C9$wdE zst*dRvqZ8bsVU*+Q){ul-+Bz{6Lcfwl$TT@yqz9&*XJoa;>QoH1N}f0(6T@)$jQM` znX-1tPeKk2L8z@P|7xU*Xbc9mL8-A(X2qj7@?tpFq3B3(YQT7Zf?slUrRSw1z1o^q zF9L-Z{j?^#(h1|LV{@k#Ec>874Y!dCkK z{9jOTR}z_5y`cRKzQ({{4cnean+zTWjilz3iXM?=3WlSZ5&gKD)757i9rdQljA&?A ztu}JG)sqLROvi^G{GS3DM)AmL13pA@Kix^LslnqRB8Ur`wL~zlH>oA(M&d z4in*DZ$iPSXI?GlT0UGo*c|4W3vZW5-tE_AyvdJ_0NpLW=(m{{e55X8rY^*f+- z>t~WwPkSEsBfL`t#1AQ|s|JG^l)a;Vl!UHX=cyH}2yd2c<=a-QD=N7-PYVcYCQXm0 zMvby^+YqOdxRi{qourL-4<2To36x{@@`WjPt6ZN!5?|j!87(KSK87@Px$!rYk{6Yg zdQhcuJMkUyTy5uV%3+7y@0Ph_K0FK-CE#g@+w5uc7U-PTIals)HRm`^$>!H!)R|v5 zmjdk_xk4an#3*+U>J5ud+4Y+qJCZhW8Ob23o~a%if+UAN$LZ}Qr-*&v zdOm#&XVGS_7467!dvh@RNUx$wxl2%!mXy0TEhQ_qa@9@CIX@LRmdyBHLqH1dpWI*NkPO7R?G zNzDIeGrqdt^q$ilD+tNs(9GVrP*`KH;rcnNO$5$|r)_;uoov5P;es~f*W21J|MV>` z`*b?9OK|08k;GPeuum?| zVanxRaev#%oDcPkTNjH|DK0oyweXF^D<9r3P7ZUwI^8s60~H4~6PnD!QSVYl-S@lJz?RdRb?=JaubY1O-S6+FcWbvFGqYVF z4l5)fu5oB$`rKuitDhZL{QL~J{QmcIAD()x&%b?Nz3-35`te-L@7WkXez)7S>{xT< zBowD|sAvUbCD%Qh($ literal 0 HcmV?d00001 diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index da70db8ac..ad93848c4 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -6,8 +6,8 @@ from sqlalchemy import create_engine, text from sqlalchemy.orm import sessionmaker -class NL2SQL(BaseTool): - name: str = "NL2SQL" +class NL2SQLTool(BaseTool): + name: str = "NL2SQLTool" description: str = "Converts natural language to SQL queries and executes them." db_uri: str = Field( title="Database URI", From 225ee060306c67bcf3570209c318c04f475d0e07 Mon Sep 17 00:00:00 2001 From: Dheeraj Unni <116740203+Ryuzaki1415@users.noreply.github.com> Date: Fri, 26 Jul 2024 17:05:58 +0530 Subject: [PATCH 121/391] Corrected the acronym 'RAG' --- src/crewai_tools/tools/github_search_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/github_search_tool/README.md b/src/crewai_tools/tools/github_search_tool/README.md index 55e01dd50..c77e494c8 100644 --- a/src/crewai_tools/tools/github_search_tool/README.md +++ b/src/crewai_tools/tools/github_search_tool/README.md @@ -1,7 +1,7 @@ # GithubSearchTool ## Description -The GithubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub. +The GithubSearchTool is a Retrieval Augmented Generation (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub. ## Installation To use the GithubSearchTool, first ensure the crewai_tools package is installed in your Python environment: From b343c71b9bcecf1b77aa8a98eb79e0bf5d7897a4 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 30 Jul 2024 22:17:23 -0300 Subject: [PATCH 122/391] feat: Add Dall-E tool to generate images --- src/crewai_tools/tools/dalle_tool/README.MD | 0 .../tools/dalle_tool/dalle_tool.py | 48 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 src/crewai_tools/tools/dalle_tool/README.MD create mode 100644 src/crewai_tools/tools/dalle_tool/dalle_tool.py diff --git a/src/crewai_tools/tools/dalle_tool/README.MD b/src/crewai_tools/tools/dalle_tool/README.MD new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/dalle_tool/dalle_tool.py b/src/crewai_tools/tools/dalle_tool/dalle_tool.py new file mode 100644 index 000000000..a4f7738a6 --- /dev/null +++ b/src/crewai_tools/tools/dalle_tool/dalle_tool.py @@ -0,0 +1,48 @@ +import json +from typing import Type + +from crewai_tools.tools.base_tool import BaseTool +from openai import OpenAI +from pydantic.v1 import BaseModel + + +class ImagePromptSchema(BaseModel): + """Input for Dall-E Tool.""" + + image_description: str = "Description of the image to be generated by Dall-E." + + +class DallETool(BaseTool): + name: str = "Dall-E Tool" + description: str = "Generates images using OpenAI's Dall-E model." + args_schema: Type[BaseModel] = ImagePromptSchema + + model: str = "dall-e-3" + size: str = "1024x1024" + quality: str = "standard" + n: int = 1 + + def _run(self, **kwargs) -> str: + client = OpenAI() + + image_description = kwargs.get("image_description") + + if not image_description: + return "Image description is required." + + response = client.images.generate( + model=self.model, + prompt=image_description, + size=self.size, + quality=self.quality, + n=self.n, + ) + + image_data = json.dumps( + { + "image_url": response.data[0].url, + "image_description": response.data[0].revised_prompt, + } + ) + + return image_data From 0070df7451a1a79b33e655628ab2f0d324d6c3eb Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 30 Jul 2024 22:29:45 -0300 Subject: [PATCH 123/391] docs: Add documentation for the DallETool --- src/crewai_tools/tools/dalle_tool/README.MD | 41 +++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/crewai_tools/tools/dalle_tool/README.MD b/src/crewai_tools/tools/dalle_tool/README.MD index e69de29bb..a315c7c10 100644 --- a/src/crewai_tools/tools/dalle_tool/README.MD +++ b/src/crewai_tools/tools/dalle_tool/README.MD @@ -0,0 +1,41 @@ +# DALL-E Tool + +## Description +This tool is used to give the Agent the ability to generate images using the DALL-E model. It is a transformer-based model that generates images from textual descriptions. This tool allows the Agent to generate images based on the text input provided by the user. + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Example + +Remember that when using this tool, the text must be generated by the Agent itself. The text must be a description of the image you want to generate. + +```python +from crewai_tools import DallETool + +Agent( + ... + tools=[DallETool()], +) +``` + +If needed you can also tweak the parameters of the DALL-E model by passing them as arguments to the `DallETool` class. For example: + +```python +from crewai_tools import DallETool + +dalle_tool = DallETool(model: str = "dall-e-3", + size: str = "1024x1024", + quality: str = "standard", + n: int = 1) + +Agent( + ... + tools=[dalle_tool] +) +``` + +The parameter are based on the `client.images.generate` method from the OpenAI API. For more information on the parameters, please refer to the [OpenAI API documentation](https://platform.openai.com/docs/guides/images/introduction?lang=python). From 4835c2bf68bc98bd28e29bee22a4d7d5b6b0cd49 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Wed, 31 Jul 2024 17:10:26 -0300 Subject: [PATCH 124/391] feat: Add Vision tool to the CrewAI tool --- src/crewai_tools/__init__.py | 10 +- src/crewai_tools/tools/__init__.py | 24 +++-- src/crewai_tools/tools/vision_tool/README.md | 30 ++++++ .../tools/vision_tool/vision_tool.py | 93 +++++++++++++++++++ 4 files changed, 146 insertions(+), 11 deletions(-) create mode 100644 src/crewai_tools/tools/vision_tool/README.md create mode 100644 src/crewai_tools/tools/vision_tool/vision_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a049cdc5b..b049d630d 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -17,23 +17,25 @@ from .tools import ( LlamaIndexTool, MDXSearchTool, MultiOnTool, + NL2SQLTool, PDFSearchTool, PGSearchTool, RagTool, ScrapeElementFromWebsiteTool, - ScrapflyScrapeWebsiteTool, ScrapeWebsiteTool, + ScrapflyScrapeWebsiteTool, SeleniumScrapingTool, SerperDevTool, - SerplyWebSearchTool, + SerplyJobSearchTool, SerplyNewsSearchTool, SerplyScholarSearchTool, SerplyWebpageToMarkdownTool, - SerplyJobSearchTool, + SerplyWebSearchTool, TXTSearchTool, + VisionTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, - YoutubeVideoSearchTool + YoutubeVideoSearchTool, ) from .tools.base_tool import BaseTool, Tool, tool diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index a72fda277..483ebda21 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -8,14 +8,19 @@ from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool -from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import FirecrawlCrawlWebsiteTool -from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import FirecrawlScrapeWebsiteTool +from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import ( + FirecrawlCrawlWebsiteTool, +) +from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( + FirecrawlScrapeWebsiteTool, +) from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool from .json_search_tool.json_search_tool import JSONSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool +from .nl2sql.nl2sql_tool import NL2SQLTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool @@ -23,17 +28,22 @@ from .scrape_element_from_website.scrape_element_from_website import ( ScrapeElementFromWebsiteTool, ) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool -from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ScrapflyScrapeWebsiteTool +from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( + ScrapflyScrapeWebsiteTool, +) from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool -from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool +from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool +from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool from .serply_api_tool.serply_webpage_to_markdown_tool import SerplyWebpageToMarkdownTool -from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool +from .spider_tool.spider_tool import SpiderTool from .txt_search_tool.txt_search_tool import TXTSearchTool +from .vision_tool.vision_tool import VisionTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool -from .youtube_channel_search_tool.youtube_channel_search_tool import YoutubeChannelSearchTool +from .youtube_channel_search_tool.youtube_channel_search_tool import ( + YoutubeChannelSearchTool, +) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .spider_tool.spider_tool import SpiderTool diff --git a/src/crewai_tools/tools/vision_tool/README.md b/src/crewai_tools/tools/vision_tool/README.md new file mode 100644 index 000000000..bf7ab7486 --- /dev/null +++ b/src/crewai_tools/tools/vision_tool/README.md @@ -0,0 +1,30 @@ +# Vision Tool + +## Description + +This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output. The URL or the PATH of the image should be passed to the Agent. + + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Usage + +In order to use the VisionTool, the OpenAI API key should be set in the environment variable `OPENAI_API_KEY`. + +```python +from crewai_tools import VisionTool + +vision_tool = VisionTool() + +@agent +def researcher(self) -> Agent: + return Agent( + config=self.agents_config["researcher"], + allow_delegation=False, + tools=[vision_tool] + ) +``` diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py new file mode 100644 index 000000000..a9abd5c43 --- /dev/null +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -0,0 +1,93 @@ +import base64 +from typing import Type + +import requests +from crewai_tools.tools.base_tool import BaseTool +from openai import OpenAI +from pydantic.v1 import BaseModel + + +class ImagePromptSchema(BaseModel): + """Input for Vision Tool.""" + + image_path_url: str = "The image path or URL." + + +class VisionTool(BaseTool): + name: str = "Vision Tool" + description: str = ( + "This tool uses OpenAI's Vision API to describe the contents of an image." + ) + args_schema: Type[BaseModel] = ImagePromptSchema + + def _run_web_hosted_images(self, client, image_path_url: str) -> str: + response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": {"url": image_path_url}, + }, + ], + } + ], + max_tokens=300, + ) + + return response.choices[0].message.content + + def _run_local_images(self, client, image_path_url: str) -> str: + base64_image = self._encode_image(image_path_url) + + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {client.api_key}", + } + + payload = { + "model": "gpt-4o-mini", + "messages": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{base64_image}" + }, + }, + ], + } + ], + "max_tokens": 300, + } + + response = requests.post( + "https://api.openai.com/v1/chat/completions", headers=headers, json=payload + ) + + return response.json()["choices"][0]["message"]["content"] + + def _run(self, **kwargs) -> str: + client = OpenAI() + + image_path_url = kwargs.get("image_path_url") + + if not image_path_url: + return "Image Path or URL is required." + + if "http" in image_path_url: + image_description = self._run_web_hosted_images(client, image_path_url) + else: + image_description = self._run_local_images(client, image_path_url) + + return image_description + + def _encode_image(self, image_path: str): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") From a2e38b61217d5ecb6687b94b648e38543e0ec97c Mon Sep 17 00:00:00 2001 From: rafaelsideguide <150964962+rafaelsideguide@users.noreply.github.com> Date: Tue, 6 Aug 2024 13:56:44 -0300 Subject: [PATCH 125/391] Improvements on default values and description --- .../firecrawl_crawl_website_tool.py | 5 +++++ .../firecrawl_scrape_website_tool.py | 9 ++++++++- .../tools/firecrawl_search_tool/firecrawl_search_tool.py | 5 +++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 5c796189a..7c0b4c7fb 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -26,6 +26,11 @@ class FirecrawlCrawlWebsiteTool(BaseTool): self.firecrawl = FirecrawlApp(api_key=api_key) def _run(self, url: str, crawler_options: Optional[Dict[str, Any]] = None, page_options: Optional[Dict[str, Any]] = None): + if (crawler_options is None): + crawler_options = {} + if (page_options is None): + page_options = {} + options = { "crawlerOptions": crawler_options, "pageOptions": page_options diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 8540b13ff..29d6e238b 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -6,7 +6,7 @@ class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for page scraping") extractor_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for data extraction") - timeout: Optional[int] = Field(default=None, description="Timeout for the scraping operation") + timeout: Optional[int] = Field(default=None, description="Timeout in milliseconds for the scraping operation. The default value is 30000.") class FirecrawlScrapeWebsiteTool(BaseTool): name: str = "Firecrawl web scrape tool" @@ -27,6 +27,13 @@ class FirecrawlScrapeWebsiteTool(BaseTool): self.firecrawl = FirecrawlApp(api_key=api_key) def _run(self, url: str, page_options: Optional[Dict[str, Any]] = None, extractor_options: Optional[Dict[str, Any]] = None, timeout: Optional[int] = None): + if page_options is None: + page_options = {} + if extractor_options is None: + extractor_options = {} + if timeout is None: + timeout = 30000 + options = { "pageOptions": page_options, "extractorOptions": extractor_options, diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 89843f797..5b01ce8c5 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -26,6 +26,11 @@ class FirecrawlSearchTool(BaseTool): self.firecrawl = FirecrawlApp(api_key=api_key) def _run(self, query: str, page_options: Optional[Dict[str, Any]] = None, result_options: Optional[Dict[str, Any]] = None): + if (page_options is None): + page_options = {} + if (result_options is None): + result_options = {} + options = { "pageOptions": page_options, "resultOptions": result_options From 50dc37ad5be50119ee9dcac4f7f258ce34390672 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sat, 10 Aug 2024 23:35:39 -0300 Subject: [PATCH 126/391] adding dalle import --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a26e3609b..1dfbc7df5 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -4,6 +4,7 @@ from .tools import ( CodeInterpreterTool, ComposioTool, CSVSearchTool, + DallETool, DirectoryReadTool, DirectorySearchTool, DOCXSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 24fba3ba9..36e904bfd 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -3,6 +3,7 @@ from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool from .csv_search_tool.csv_search_tool import CSVSearchTool +from .dalle_tool.dalle_tool import DallETool from .directory_read_tool.directory_read_tool import DirectoryReadTool from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool From 5dd49762e3533511b6ab24de8598425615b011e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 11 Aug 2024 00:46:32 -0300 Subject: [PATCH 127/391] Fixing imports and cutting new patch --- src/crewai_tools/__init__.py | 2 +- src/crewai_tools/tools/__init__.py | 10 +++--- .../file_writer_tool/file_writer_tool.py | 33 ++++++++++++++----- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 11 ++++++- 4 files changed, 41 insertions(+), 15 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index ac51c0fd1..b5dcc81b9 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -10,7 +10,7 @@ from .tools import ( DOCXSearchTool, EXASearchTool, FileReadTool, - FileWriterTool + FileWriterTool, FirecrawlCrawlWebsiteTool, FirecrawlScrapeWebsiteTool, FirecrawlSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index f90aee951..9016c57fd 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -11,10 +11,10 @@ from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .file_writer_tool.file_writer_tool import FileWriterTool from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import ( - FirecrawlCrawlWebsiteTool, + FirecrawlCrawlWebsiteTool ) from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( - FirecrawlScrapeWebsiteTool, + FirecrawlScrapeWebsiteTool ) from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool @@ -27,11 +27,11 @@ from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( - ScrapeElementFromWebsiteTool, + ScrapeElementFromWebsiteTool ) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( - ScrapflyScrapeWebsiteTool, + ScrapflyScrapeWebsiteTool ) from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool @@ -46,7 +46,7 @@ from .vision_tool.vision_tool import VisionTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import ( - YoutubeChannelSearchTool, + YoutubeChannelSearchTool ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index 21db460cc..eb185aa75 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,22 +1,39 @@ import os -from crewai_tools import BaseTool +from typing import Optional, Type, Any + +from pydantic.v1 import BaseModel +from ..base_tool import BaseTool + +class FileWriterToolInput(BaseModel): + filename: str + content: str + directory: Optional[str] = None + overwrite: bool = False class FileWriterTool(BaseTool): name: str = "File Writer Tool" - description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path as input." + description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." + args_schema: Type[BaseModel] = FileWriterToolInput - def _run(self, filename: str, content: str, directory: str = '.') -> str: + def _run(self, **kwargs: Any) -> str: try: # Create the directory if it doesn't exist - if directory and not os.path.exists(directory): - os.makedirs(directory) + if kwargs['directory'] and not os.path.exists(kwargs['directory']): + os.makedirs(kwargs['directory']) # Construct the full path - filepath = os.path.join(directory, filename) + filepath = os.path.join(kwargs['directory'] or '', kwargs['filename']) + + # Check if file exists and overwrite is not allowed + if os.path.exists(filepath) and not kwargs['overwrite']: + return f"File {filepath} already exists and overwrite option was not passed." # Write content to the file - with open(filepath, 'w') as file: - file.write(content) + mode = 'w' if kwargs['overwrite'] else 'x' + with open(filepath, mode) as file: + file.write(kwargs['content']) return f"Content successfully written to {filepath}" + except FileExistsError: + return f"File {filepath} already exists and overwrite option was not passed." except Exception as e: return f"An error occurred while writing to the file: {str(e)}" diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index ad93848c4..7ddcd090c 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -1,10 +1,18 @@ from typing import Any, Union -from crewai_tools import BaseTool +from ..base_tool import BaseTool from pydantic import Field from sqlalchemy import create_engine, text from sqlalchemy.orm import sessionmaker +from typing import Optional, Type, Any +from pydantic.v1 import BaseModel + +class NL2SQLToolInput(BaseModel): + sql_query: str = Field( + title="SQL Query", + description="The SQL query to execute.", + ) class NL2SQLTool(BaseTool): name: str = "NL2SQLTool" @@ -15,6 +23,7 @@ class NL2SQLTool(BaseTool): ) tables: list = [] columns: dict = {} + args_schema: Type[BaseModel] = NL2SQLToolInput def model_post_init(self, __context: Any) -> None: data = {} From 64762887f0c36b597166e70cd27be13fc860c8a1 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 13 Aug 2024 08:39:20 -0300 Subject: [PATCH 128/391] docs: fix issue on Dalle tool docs --- src/crewai_tools/tools/dalle_tool/README.MD | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/dalle_tool/README.MD b/src/crewai_tools/tools/dalle_tool/README.MD index a315c7c10..5924fb7d1 100644 --- a/src/crewai_tools/tools/dalle_tool/README.MD +++ b/src/crewai_tools/tools/dalle_tool/README.MD @@ -27,10 +27,10 @@ If needed you can also tweak the parameters of the DALL-E model by passing them ```python from crewai_tools import DallETool -dalle_tool = DallETool(model: str = "dall-e-3", - size: str = "1024x1024", - quality: str = "standard", - n: int = 1) +dalle_tool = DallETool(model="dall-e-3", + size="1024x1024", + quality="standard", + n= 1) Agent( ... From b47926b1d9bc8b9e202c74f6ff9dfba2f5c6e7c1 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 13 Aug 2024 08:39:49 -0300 Subject: [PATCH 129/391] docs: fix plural --- src/crewai_tools/tools/dalle_tool/README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/dalle_tool/README.MD b/src/crewai_tools/tools/dalle_tool/README.MD index 5924fb7d1..a33cb55af 100644 --- a/src/crewai_tools/tools/dalle_tool/README.MD +++ b/src/crewai_tools/tools/dalle_tool/README.MD @@ -38,4 +38,4 @@ Agent( ) ``` -The parameter are based on the `client.images.generate` method from the OpenAI API. For more information on the parameters, please refer to the [OpenAI API documentation](https://platform.openai.com/docs/guides/images/introduction?lang=python). +The parameters are based on the `client.images.generate` method from the OpenAI API. For more information on the parameters, please refer to the [OpenAI API documentation](https://platform.openai.com/docs/guides/images/introduction?lang=python). From a228732423a46c8504c3a3ec05b8ad2c54518786 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 13 Aug 2024 08:40:12 -0300 Subject: [PATCH 130/391] docs: fix --- src/crewai_tools/tools/dalle_tool/README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/dalle_tool/README.MD b/src/crewai_tools/tools/dalle_tool/README.MD index a33cb55af..8f65e78e5 100644 --- a/src/crewai_tools/tools/dalle_tool/README.MD +++ b/src/crewai_tools/tools/dalle_tool/README.MD @@ -30,7 +30,7 @@ from crewai_tools import DallETool dalle_tool = DallETool(model="dall-e-3", size="1024x1024", quality="standard", - n= 1) + n=1) Agent( ... From e8b185e607c361a84c6a3d097c3546e3a608fa00 Mon Sep 17 00:00:00 2001 From: Thiago Moretto Date: Wed, 14 Aug 2024 14:05:44 -0300 Subject: [PATCH 131/391] Fix NL2SQL pydantic version conflict --- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index 7ddcd090c..22c3a299b 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -1,12 +1,11 @@ from typing import Any, Union from ..base_tool import BaseTool -from pydantic import Field +from pydantic import BaseModel, Field from sqlalchemy import create_engine, text from sqlalchemy.orm import sessionmaker -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel +from typing import Type, Any class NL2SQLToolInput(BaseModel): sql_query: str = Field( From 8007938d6b7c68c1314e35260306af1b614c089a Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 27 Aug 2024 08:35:56 -0300 Subject: [PATCH 132/391] feat: ADd volume option --- .../tools/code_interpreter_tool/README.md | 11 +++++ .../code_interpreter_tool.py | 42 ++++++++++++++----- 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md index e66a82e39..035f910a4 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/README.md +++ b/src/crewai_tools/tools/code_interpreter_tool/README.md @@ -27,3 +27,14 @@ Agent( tools=[CodeInterpreterTool()], ) ``` + +Or if you need to pass your own Dockerfile just do this + +```python +from crewai_tools import CodeInterpreterTool + +Agent( + ... + tools=[CodeInterpreterTool(user_docker_file_path="")], +) +``` diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index f341e52d0..2bde9a54b 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -25,7 +25,9 @@ class CodeInterpreterTool(BaseTool): name: str = "Code Interpreter" description: str = "Interprets Python3 code strings with a final print statement." args_schema: Type[BaseModel] = CodeInterpreterSchema + default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None + user_docker_file_path: Optional[str] = None @staticmethod def _get_installed_package_path(): @@ -34,23 +36,31 @@ class CodeInterpreterTool(BaseTool): def _verify_docker_image(self) -> None: """ - Verify if the Docker image is available + Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - image_tag = "code-interpreter:latest" client = docker.from_env() try: - client.images.get(image_tag) + client.images.get(self.default_image_tag) except docker.errors.ImageNotFound: - package_path = self._get_installed_package_path() - dockerfile_path = os.path.join(package_path, "tools/code_interpreter_tool") - if not os.path.exists(dockerfile_path): - raise FileNotFoundError(f"Dockerfile not found in {dockerfile_path}") + if self.user_docker_file_path and os.path.exists( + self.user_docker_file_path + ): + dockerfile_path = self.user_docker_file_path + else: + package_path = self._get_installed_package_path() + dockerfile_path = os.path.join( + package_path, "tools/code_interpreter_tool" + ) + if not os.path.exists(dockerfile_path): + raise FileNotFoundError( + f"Dockerfile not found in {dockerfile_path}" + ) client.images.build( path=dockerfile_path, - tag=image_tag, + tag=self.default_image_tag, rm=True, ) @@ -69,13 +79,25 @@ class CodeInterpreterTool(BaseTool): container.exec_run(f"pip install {library}") def _init_docker_container(self) -> docker.models.containers.Container: + container_name = "code-interpreter" client = docker.from_env() + current_path = os.getcwd() + + # Check if the container is already running + try: + existing_container = client.containers.get(container_name) + existing_container.stop() + existing_container.remove() + except docker.errors.NotFound: + pass # Container does not exist, no need to remove + return client.containers.run( - "code-interpreter", + self.default_image_tag, detach=True, tty=True, working_dir="/workspace", - name="code-interpreter", + name=container_name, + volumes={current_path: {"bind": "/workspace", "mode": "rw"}}, # type: ignore ) def run_code_in_docker(self, code: str, libraries_used: List[str]) -> str: From d146e4a961812a5bae25279f2438fab918350eef Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 27 Aug 2024 09:01:09 -0300 Subject: [PATCH 133/391] feat: change the anme of user dockerfile path --- src/crewai_tools/tools/code_interpreter_tool/README.md | 2 +- .../tools/code_interpreter_tool/code_interpreter_tool.py | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md index 035f910a4..bc73df7a4 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/README.md +++ b/src/crewai_tools/tools/code_interpreter_tool/README.md @@ -35,6 +35,6 @@ from crewai_tools import CodeInterpreterTool Agent( ... - tools=[CodeInterpreterTool(user_docker_file_path="")], + tools=[CodeInterpreterTool(user_dockerfile_path="")], ) ``` diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 2bde9a54b..6f427e440 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -27,7 +27,7 @@ class CodeInterpreterTool(BaseTool): args_schema: Type[BaseModel] = CodeInterpreterSchema default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None - user_docker_file_path: Optional[str] = None + user_dockerfile_path: Optional[str] = None @staticmethod def _get_installed_package_path(): @@ -44,10 +44,8 @@ class CodeInterpreterTool(BaseTool): client.images.get(self.default_image_tag) except docker.errors.ImageNotFound: - if self.user_docker_file_path and os.path.exists( - self.user_docker_file_path - ): - dockerfile_path = self.user_docker_file_path + if self.user_dockerfile_path and os.path.exists(self.user_dockerfile_path): + dockerfile_path = self.user_dockerfile_path else: package_path = self._get_installed_package_path() dockerfile_path = os.path.join( From 595757362556b5a7cdecf288b0dcf9992aa2246b Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Thu, 29 Aug 2024 17:41:43 +0800 Subject: [PATCH 134/391] Updated github repo links _ readme.md --- README.md | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 6af85df02..f622ec49d 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@
# **crewAI Tools** -This document provides a comprehensive guide for setting up sophisticated tools for [crewAI](https://github.com/joaomdmoura/crewai) agents, facilitating the creation of bespoke tooling to empower your AI solutions. +Welcome to crewAI Tools! This repository provides a comprehensive guide for setting up sophisticated tools for [crewAI](https://github.com/crewAIInc/crewAI) agents, empowering your AI solutions with bespoke tooling. In the realm of CrewAI agents, tools are pivotal for enhancing functionality. This guide outlines the steps to equip your agents with an arsenal of ready-to-use tools and the methodology to craft your own. @@ -13,7 +13,7 @@ In the realm of CrewAI agents, tools are pivotal for enhancing functionality. Th

-[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/joaomdmoura/crewai-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb) +[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb)

@@ -27,6 +27,19 @@ In the realm of CrewAI agents, tools are pivotal for enhancing functionality. Th - [Contribution Guidelines](#contribution-guidelines) - [Development Setup](#development-setup) +## Available Tools + +crewAI Tools provides a wide range of pre-built tools, including: + +- File operations (FileWriterTool, FileReadTool) +- Web scraping (ScrapeWebsiteTool, SeleniumScrapingTool) +- Database interactions (PGSearchTool, MySQLSearchTool) +- API integrations (SerperApiTool, EXASearchTool) +- AI-powered tools (DallETool, VisionTool) +- And many more! + +For a complete list and detailed documentation of each tool, please refer to the individual tool README files in the repository. + ## Creating Your Tools Tools are always expect to return strings, as they are meant to be used by the agents to generate responses. @@ -68,14 +81,15 @@ The `tool` decorator simplifies the process, transforming functions into tools w ## Contribution Guidelines -We eagerly welcome contributions to enrich this toolset. To contribute: +We welcome contributions! Here's how you can help: -1. **Fork the Repository:** Begin with forking the repository to your GitHub account. -2. **Feature Branch:** Create a new branch in your fork for the feature or improvement. -3. **Implement Your Feature:** Add your contribution to the new branch. -4. **Pull Request:** Submit a pull request from your feature branch to the main repository. +1. Fork the repository +2. Create a feature branch (`git checkout -b feature/AmazingFeature`) +3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request -Your contributions are greatly appreciated and will help enhance this project. +Please ensure your code adheres to our coding standards and includes appropriate tests. ## **Development Setup** @@ -122,3 +136,9 @@ pip install dist/*.tar.gz ``` Thank you for your interest in enhancing the capabilities of AI agents through advanced tooling. Your contributions make a significant impact. + +## Contact + +For questions or support, please join our [Discord community](https://discord.com/invite/X4JWnZnxPb) or open an issue in this repository. + + From 35fe222ca11de7b2eec25f30a4a3fce30f38f3fa Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Tue, 3 Sep 2024 15:57:29 -0400 Subject: [PATCH 135/391] Move off v1 --- src/crewai_tools/tools/base_tool.py | 16 ++- .../browserbase_load_tool.py | 12 +- .../code_docs_search_tool.py | 2 +- .../code_interpreter_tool.py | 3 +- .../tools/csv_search_tool/csv_search_tool.py | 2 +- .../tools/dalle_tool/dalle_tool.py | 5 +- .../directory_read_tool.py | 68 +++++---- .../directory_search_tool.py | 2 +- .../docx_search_tool/docx_search_tool.py | 14 +- .../tools/exa_tools/exa_base_tool.py | 67 +++++---- .../tools/file_read_tool/file_read_tool.py | 23 ++- .../file_writer_tool/file_writer_tool.py | 27 ++-- .../firecrawl_crawl_website_tool.py | 42 +++--- .../firecrawl_scrape_website_tool.py | 41 ++++-- .../firecrawl_search_tool.py | 40 ++++-- .../github_search_tool/github_search_tool.py | 6 +- .../json_search_tool/json_search_tool.py | 2 +- .../tools/llamaindex_tool/llamaindex_tool.py | 51 +++---- .../tools/mdx_seach_tool/mdx_search_tool.py | 2 +- .../mysql_search_tool/mysql_search_tool.py | 6 +- .../tools/pdf_search_tool/pdf_search_tool.py | 3 +- .../tools/pg_seach_tool/pg_search_tool.py | 6 +- .../scrape_element_from_website.py | 109 ++++++++------ .../scrape_website_tool.py | 104 ++++++++------ .../scrapfly_scrape_website_tool.py | 32 +++-- .../selenium_scraping_tool.py | 131 +++++++++-------- .../tools/serper_dev_tool/serper_dev_tool.py | 134 ++++++++++-------- .../serply_api_tool/serply_job_search_tool.py | 55 ++++--- .../serply_news_search_tool.py | 58 ++++---- .../serply_scholar_search_tool.py | 71 +++++----- .../serply_api_tool/serply_web_search_tool.py | 67 +++++---- .../serply_webpage_to_markdown_tool.py | 42 +++--- .../tools/spider_tool/spider_tool.py | 32 +++-- .../tools/txt_search_tool/txt_search_tool.py | 2 +- .../tools/vision_tool/vision_tool.py | 5 +- .../website_search/website_search_tool.py | 6 +- .../tools/xml_search_tool/xml_search_tool.py | 2 +- .../youtube_channel_search_tool.py | 6 +- .../youtube_video_search_tool.py | 6 +- 39 files changed, 752 insertions(+), 550 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 4b60d93d4..dee3b9317 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -3,11 +3,11 @@ from typing import Any, Callable, Optional, Type from langchain_core.tools import StructuredTool from pydantic import BaseModel, ConfigDict, Field, validator -from pydantic.v1 import BaseModel as V1BaseModel +from pydantic import BaseModel as PydanticBaseModel class BaseTool(BaseModel, ABC): - class _ArgsSchemaPlaceholder(V1BaseModel): + class _ArgsSchemaPlaceholder(PydanticBaseModel): pass model_config = ConfigDict() @@ -16,7 +16,7 @@ class BaseTool(BaseModel, ABC): """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool.""" - args_schema: Type[V1BaseModel] = Field(default_factory=_ArgsSchemaPlaceholder) + args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder) """The schema for the arguments that the tool accepts.""" description_updated: bool = False """Flag to check if the description has been updated.""" @@ -26,13 +26,15 @@ class BaseTool(BaseModel, ABC): """Flag to check if the tool should be the final agent answer.""" @validator("args_schema", always=True, pre=True) - def _default_args_schema(cls, v: Type[V1BaseModel]) -> Type[V1BaseModel]: + def _default_args_schema( + cls, v: Type[PydanticBaseModel] + ) -> Type[PydanticBaseModel]: if not isinstance(v, cls._ArgsSchemaPlaceholder): return v return type( f"{cls.__name__}Schema", - (V1BaseModel,), + (PydanticBaseModel,), { "__annotations__": { k: v for k, v in cls._run.__annotations__.items() if k != "return" @@ -75,7 +77,7 @@ class BaseTool(BaseModel, ABC): class_name = f"{self.__class__.__name__}Schema" self.args_schema = type( class_name, - (V1BaseModel,), + (PydanticBaseModel,), { "__annotations__": { k: v @@ -127,7 +129,7 @@ def tool(*args): class_name = "".join(tool_name.split()).title() args_schema = type( class_name, - (V1BaseModel,), + (PydanticBaseModel,), { "__annotations__": { k: v for k, v in f.__annotations__.items() if k != "return" diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 52722520d..514664557 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,13 +1,19 @@ -from typing import Optional, Any, Type -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class BrowserbaseLoadToolSchema(BaseModel): url: str = Field(description="Website URL") + class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" - description: str = "Load webpages url in a headless browser using Browserbase and return the contents" + description: str = ( + "Load webpages url in a headless browser using Browserbase and return the contents" + ) args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema api_key: Optional[str] = None project_id: Optional[str] = None diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 899943511..49ef2cb3d 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 6f427e440..f333a676d 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -3,8 +3,9 @@ import os from typing import List, Optional, Type import docker +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool -from pydantic.v1 import BaseModel, Field class CodeInterpreterSchema(BaseModel): diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 9d0509f88..dde9d4673 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/dalle_tool/dalle_tool.py b/src/crewai_tools/tools/dalle_tool/dalle_tool.py index a4f7738a6..da6adb2b1 100644 --- a/src/crewai_tools/tools/dalle_tool/dalle_tool.py +++ b/src/crewai_tools/tools/dalle_tool/dalle_tool.py @@ -1,9 +1,10 @@ import json from typing import Type -from crewai_tools.tools.base_tool import BaseTool from openai import OpenAI -from pydantic.v1 import BaseModel +from pydantic import BaseModel + +from crewai_tools.tools.base_tool import BaseTool class ImagePromptSchema(BaseModel): diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index 8b569e5f6..3d308ba45 100644 --- a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -1,38 +1,50 @@ import os -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type + +from pydantic import BaseModel, Field + from ..base_tool import BaseTool + class FixedDirectoryReadToolSchema(BaseModel): - """Input for DirectoryReadTool.""" - pass + """Input for DirectoryReadTool.""" + + pass + class DirectoryReadToolSchema(FixedDirectoryReadToolSchema): - """Input for DirectoryReadTool.""" - directory: str = Field(..., description="Mandatory directory to list content") + """Input for DirectoryReadTool.""" + + directory: str = Field(..., description="Mandatory directory to list content") + class DirectoryReadTool(BaseTool): - name: str = "List files in directory" - description: str = "A tool that can be used to recursively list a directory's content." - args_schema: Type[BaseModel] = DirectoryReadToolSchema - directory: Optional[str] = None + name: str = "List files in directory" + description: str = ( + "A tool that can be used to recursively list a directory's content." + ) + args_schema: Type[BaseModel] = DirectoryReadToolSchema + directory: Optional[str] = None - def __init__(self, directory: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if directory is not None: - self.directory = directory - self.description = f"A tool that can be used to list {directory}'s content." - self.args_schema = FixedDirectoryReadToolSchema - self._generate_description() - - def _run( - self, - **kwargs: Any, - ) -> Any: - directory = kwargs.get('directory', self.directory) - if directory[-1] == "/": - directory = directory[:-1] - files_list = [f"{directory}/{(os.path.join(root, filename).replace(directory, '').lstrip(os.path.sep))}" for root, dirs, files in os.walk(directory) for filename in files] - files = "\n- ".join(files_list) - return f"File paths: \n-{files}" + def __init__(self, directory: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + if directory is not None: + self.directory = directory + self.description = f"A tool that can be used to list {directory}'s content." + self.args_schema = FixedDirectoryReadToolSchema + self._generate_description() + def _run( + self, + **kwargs: Any, + ) -> Any: + directory = kwargs.get("directory", self.directory) + if directory[-1] == "/": + directory = directory[:-1] + files_list = [ + f"{directory}/{(os.path.join(root, filename).replace(directory, '').lstrip(os.path.sep))}" + for root, dirs, files in os.walk(directory) + for filename in files + ] + files = "\n- ".join(files_list) + return f"File paths: \n-{files}" diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index a06229081..e0938007b 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.loaders.directory_loader import DirectoryLoader -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index b60dfd0f5..8be327af3 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -1,26 +1,32 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" - docx: Optional[str] = Field(..., description="Mandatory docx path you want to search") + + docx: Optional[str] = Field( + ..., description="Mandatory docx path you want to search" + ) search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", ) + class DOCXSearchToolSchema(FixedDOCXSearchToolSchema): """Input for DOCXSearchTool.""" + search_query: str = Field( ..., description="Mandatory search query you want to use to search the DOCX's content", ) + class DOCXSearchTool(RagTool): name: str = "Search a DOCX's content" description: str = ( @@ -56,9 +62,9 @@ class DOCXSearchTool(RagTool): self, **kwargs: Any, ) -> Any: - search_query = kwargs.get('search_query') + search_query = kwargs.get("search_query") if search_query is None: - search_query = kwargs.get('query') + search_query = kwargs.get("query") docx = kwargs.get("docx") if docx is not None: diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py index 237af8f84..6273c5f7a 100644 --- a/src/crewai_tools/tools/exa_tools/exa_base_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_base_tool.py @@ -1,36 +1,49 @@ import os from typing import Type -from pydantic.v1 import BaseModel, Field + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class EXABaseToolToolSchema(BaseModel): - """Input for EXABaseTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") + """Input for EXABaseTool.""" + + search_query: str = Field( + ..., description="Mandatory search query you want to use to search the internet" + ) + class EXABaseTool(BaseTool): - name: str = "Search the internet" - description: str = "A tool that can be used to search the internet from a search_query" - args_schema: Type[BaseModel] = EXABaseToolToolSchema - search_url: str = "https://api.exa.ai/search" - n_results: int = None - headers: dict = { - "accept": "application/json", - "content-type": "application/json", - } + name: str = "Search the internet" + description: str = ( + "A tool that can be used to search the internet from a search_query" + ) + args_schema: Type[BaseModel] = EXABaseToolToolSchema + search_url: str = "https://api.exa.ai/search" + n_results: int = None + headers: dict = { + "accept": "application/json", + "content-type": "application/json", + } - def _parse_results(self, results): - stirng = [] - for result in results: - try: - stirng.append('\n'.join([ - f"Title: {result['title']}", - f"Score: {result['score']}", - f"Url: {result['url']}", - f"ID: {result['id']}", - "---" - ])) - except KeyError: - next + def _parse_results(self, results): + stirng = [] + for result in results: + try: + stirng.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Score: {result['score']}", + f"Url: {result['url']}", + f"ID: {result['id']}", + "---", + ] + ) + ) + except KeyError: + next - content = '\n'.join(stirng) - return f"\nSearch results: {content}\n" + content = "\n".join(stirng) + return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 38aeeeb2e..265dca54a 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -1,19 +1,20 @@ -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type + +from pydantic import BaseModel, Field + from ..base_tool import BaseTool class FixedFileReadToolSchema(BaseModel): """Input for FileReadTool.""" + pass class FileReadToolSchema(FixedFileReadToolSchema): """Input for FileReadTool.""" - file_path: str = Field( - ..., - description="Mandatory file full path to read the file" - ) + + file_path: str = Field(..., description="Mandatory file full path to read the file") class FileReadTool(BaseTool): @@ -22,11 +23,7 @@ class FileReadTool(BaseTool): args_schema: Type[BaseModel] = FileReadToolSchema file_path: Optional[str] = None - def __init__( - self, - file_path: Optional[str] = None, - **kwargs - ): + def __init__(self, file_path: Optional[str] = None, **kwargs): super().__init__(**kwargs) if file_path is not None: self.file_path = file_path @@ -39,8 +36,8 @@ class FileReadTool(BaseTool): **kwargs: Any, ) -> Any: try: - file_path = kwargs.get('file_path', self.file_path) - with open(file_path, 'r') as file: + file_path = kwargs.get("file_path", self.file_path) + with open(file_path, "r") as file: return file.read() except Exception as e: return f"Fail to read the file {file_path}. Error: {e}" diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index eb185aa75..ce0c4ebd9 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,39 +1,46 @@ import os -from typing import Optional, Type, Any +from typing import Any, Optional, Type + +from pydantic import BaseModel -from pydantic.v1 import BaseModel from ..base_tool import BaseTool + class FileWriterToolInput(BaseModel): filename: str content: str directory: Optional[str] = None overwrite: bool = False + class FileWriterTool(BaseTool): name: str = "File Writer Tool" - description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." + description: str = ( + "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." + ) args_schema: Type[BaseModel] = FileWriterToolInput def _run(self, **kwargs: Any) -> str: try: # Create the directory if it doesn't exist - if kwargs['directory'] and not os.path.exists(kwargs['directory']): - os.makedirs(kwargs['directory']) + if kwargs["directory"] and not os.path.exists(kwargs["directory"]): + os.makedirs(kwargs["directory"]) # Construct the full path - filepath = os.path.join(kwargs['directory'] or '', kwargs['filename']) + filepath = os.path.join(kwargs["directory"] or "", kwargs["filename"]) # Check if file exists and overwrite is not allowed - if os.path.exists(filepath) and not kwargs['overwrite']: + if os.path.exists(filepath) and not kwargs["overwrite"]: return f"File {filepath} already exists and overwrite option was not passed." # Write content to the file - mode = 'w' if kwargs['overwrite'] else 'x' + mode = "w" if kwargs["overwrite"] else "x" with open(filepath, mode) as file: - file.write(kwargs['content']) + file.write(kwargs["content"]) return f"Content successfully written to {filepath}" except FileExistsError: - return f"File {filepath} already exists and overwrite option was not passed." + return ( + f"File {filepath} already exists and overwrite option was not passed." + ) except Exception as e: return f"An error occurred while writing to the file: {str(e)}" diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 7c0b4c7fb..80a8392e9 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,11 +1,19 @@ -from typing import Optional, Any, Type, Dict, List -from pydantic.v1 import BaseModel, Field +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - crawler_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for crawling") - page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for page") + crawler_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for crawling" + ) + page_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for page" + ) + class FirecrawlCrawlWebsiteTool(BaseTool): name: str = "Firecrawl web crawl tool" @@ -17,22 +25,24 @@ class FirecrawlCrawlWebsiteTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) try: - from firecrawl import FirecrawlApp # type: ignore + from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) self.firecrawl = FirecrawlApp(api_key=api_key) - def _run(self, url: str, crawler_options: Optional[Dict[str, Any]] = None, page_options: Optional[Dict[str, Any]] = None): - if (crawler_options is None): + def _run( + self, + url: str, + crawler_options: Optional[Dict[str, Any]] = None, + page_options: Optional[Dict[str, Any]] = None, + ): + if crawler_options is None: crawler_options = {} - if (page_options is None): + if page_options is None: page_options = {} - options = { - "crawlerOptions": crawler_options, - "pageOptions": page_options - } - return self.firecrawl.crawl_url(url, options) \ No newline at end of file + options = {"crawlerOptions": crawler_options, "pageOptions": page_options} + return self.firecrawl.crawl_url(url, options) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 29d6e238b..0de3335c7 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,12 +1,23 @@ -from typing import Optional, Any, Type, Dict -from pydantic.v1 import BaseModel, Field +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for page scraping") - extractor_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for data extraction") - timeout: Optional[int] = Field(default=None, description="Timeout in milliseconds for the scraping operation. The default value is 30000.") + page_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for page scraping" + ) + extractor_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for data extraction" + ) + timeout: Optional[int] = Field( + default=None, + description="Timeout in milliseconds for the scraping operation. The default value is 30000.", + ) + class FirecrawlScrapeWebsiteTool(BaseTool): name: str = "Firecrawl web scrape tool" @@ -18,15 +29,21 @@ class FirecrawlScrapeWebsiteTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) try: - from firecrawl import FirecrawlApp # type: ignore + from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) self.firecrawl = FirecrawlApp(api_key=api_key) - def _run(self, url: str, page_options: Optional[Dict[str, Any]] = None, extractor_options: Optional[Dict[str, Any]] = None, timeout: Optional[int] = None): + def _run( + self, + url: str, + page_options: Optional[Dict[str, Any]] = None, + extractor_options: Optional[Dict[str, Any]] = None, + timeout: Optional[int] = None, + ): if page_options is None: page_options = {} if extractor_options is None: @@ -37,6 +54,6 @@ class FirecrawlScrapeWebsiteTool(BaseTool): options = { "pageOptions": page_options, "extractorOptions": extractor_options, - "timeout": timeout + "timeout": timeout, } - return self.firecrawl.scrape_url(url, options) \ No newline at end of file + return self.firecrawl.scrape_url(url, options) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 5b01ce8c5..ad92e2661 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,11 +1,19 @@ -from typing import Optional, Any, Type, Dict, List -from pydantic.v1 import BaseModel, Field +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class FirecrawlSearchToolSchema(BaseModel): query: str = Field(description="Search query") - page_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for result formatting") - search_options: Optional[Dict[str, Any]] = Field(default=None, description="Options for searching") + page_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for result formatting" + ) + search_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for searching" + ) + class FirecrawlSearchTool(BaseTool): name: str = "Firecrawl web search tool" @@ -17,22 +25,24 @@ class FirecrawlSearchTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) try: - from firecrawl import FirecrawlApp # type: ignore + from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) self.firecrawl = FirecrawlApp(api_key=api_key) - def _run(self, query: str, page_options: Optional[Dict[str, Any]] = None, result_options: Optional[Dict[str, Any]] = None): - if (page_options is None): + def _run( + self, + query: str, + page_options: Optional[Dict[str, Any]] = None, + result_options: Optional[Dict[str, Any]] = None, + ): + if page_options is None: page_options = {} - if (result_options is None): + if result_options is None: result_options = {} - options = { - "pageOptions": page_options, - "resultOptions": result_options - } + options = {"pageOptions": page_options, "resultOptions": result_options} return self.firecrawl.search(query, options) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 2ec39c8c0..f1b372d8e 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, List, Optional, Type from embedchain.loaders.github import GithubLoader -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -27,7 +27,9 @@ class GithubSearchToolSchema(FixedGithubSearchToolSchema): class GithubSearchTool(RagTool): name: str = "Search a github repo's content" - description: str = "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." + description: str = ( + "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." + ) summarize: bool = False gh_token: str args_schema: Type[BaseModel] = GithubSearchToolSchema diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 930438c88..68dee653f 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py index 5aac51052..af5c93e1f 100644 --- a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py +++ b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py @@ -1,50 +1,48 @@ -import os -import json -import requests +from typing import Any, Optional, Type, cast + +from pydantic import BaseModel, Field -from typing import Type, Any, cast, Optional -from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool + class LlamaIndexTool(BaseTool): """Tool to wrap LlamaIndex tools/query engines.""" + llama_index_tool: Any def _run( - self, + self, *args: Any, - **kwargs: Any, - ) -> Any: + **kwargs: Any, + ) -> Any: """Run tool.""" from llama_index.core.tools import BaseTool as LlamaBaseTool + tool = cast(LlamaBaseTool, self.llama_index_tool) return tool(*args, **kwargs) - + @classmethod - def from_tool( - cls, - tool: Any, - **kwargs: Any - ) -> "LlamaIndexTool": + def from_tool(cls, tool: Any, **kwargs: Any) -> "LlamaIndexTool": from llama_index.core.tools import BaseTool as LlamaBaseTool - + if not isinstance(tool, LlamaBaseTool): raise ValueError(f"Expected a LlamaBaseTool, got {type(tool)}") tool = cast(LlamaBaseTool, tool) if tool.metadata.fn_schema is None: - raise ValueError("The LlamaIndex tool does not have an fn_schema specified.") + raise ValueError( + "The LlamaIndex tool does not have an fn_schema specified." + ) args_schema = cast(Type[BaseModel], tool.metadata.fn_schema) - + return cls( name=tool.metadata.name, description=tool.metadata.description, args_schema=args_schema, llama_index_tool=tool, - **kwargs + **kwargs, ) - @classmethod def from_query_engine( cls, @@ -52,7 +50,7 @@ class LlamaIndexTool(BaseTool): name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, - **kwargs: Any + **kwargs: Any, ) -> "LlamaIndexTool": from llama_index.core.query_engine import BaseQueryEngine from llama_index.core.tools import QueryEngineTool @@ -60,10 +58,11 @@ class LlamaIndexTool(BaseTool): if not isinstance(query_engine, BaseQueryEngine): raise ValueError(f"Expected a BaseQueryEngine, got {type(query_engine)}") - # NOTE: by default the schema expects an `input` variable. However this + # NOTE: by default the schema expects an `input` variable. However this # confuses crewAI so we are renaming to `query`. class QueryToolSchema(BaseModel): """Schema for query tool.""" + query: str = Field(..., description="Search query for the query tool.") # NOTE: setting `resolve_input_errors` to True is important because the schema expects `input` but we are using `query` @@ -72,13 +71,9 @@ class LlamaIndexTool(BaseTool): name=name, description=description, return_direct=return_direct, - resolve_input_errors=True, + resolve_input_errors=True, ) # HACK: we are replacing the schema with our custom schema query_engine_tool.metadata.fn_schema = QueryToolSchema - - return cls.from_tool( - query_engine_tool, - **kwargs - ) - \ No newline at end of file + + return cls.from_tool(query_engine_tool, **kwargs) diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index 69572140b..832ab1166 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py index 372a02f38..02f6ec3f4 100644 --- a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Type from embedchain.loaders.mysql import MySQLLoader -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -17,7 +17,9 @@ class MySQLSearchToolSchema(BaseModel): class MySQLSearchTool(RagTool): name: str = "Search a database's table content" - description: str = "A tool that can be used to semantic search a query from a database table's content." + description: str = ( + "A tool that can be used to semantic search a query from a database table's content." + ) args_schema: Type[BaseModel] = MySQLSearchToolSchema db_uri: str = Field(..., description="Mandatory database URI") diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index 48df8e966..23e4af4d9 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -1,8 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic import model_validator -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field, model_validator from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index 6f9ea2901..ff478a542 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Type from embedchain.loaders.postgres import PostgresLoader -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -17,7 +17,9 @@ class PGSearchToolSchema(BaseModel): class PGSearchTool(RagTool): name: str = "Search a database's table content" - description: str = "A tool that can be used to semantic search a query from a database table's content." + description: str = ( + "A tool that can be used to semantic search a query from a database table's content." + ) args_schema: Type[BaseModel] = PGSearchToolSchema db_uri: str = Field(..., description="Mandatory database URI") diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index 36bc088e5..56bb27195 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -1,57 +1,76 @@ import os +from typing import Any, Optional, Type + import requests from bs4 import BeautifulSoup -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field + from ..base_tool import BaseTool + class FixedScrapeElementFromWebsiteToolSchema(BaseModel): - """Input for ScrapeElementFromWebsiteTool.""" - pass + """Input for ScrapeElementFromWebsiteTool.""" + + pass + class ScrapeElementFromWebsiteToolSchema(FixedScrapeElementFromWebsiteToolSchema): - """Input for ScrapeElementFromWebsiteTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file") - css_element: str = Field(..., description="Mandatory css reference for element to scrape from the website") + """Input for ScrapeElementFromWebsiteTool.""" + + website_url: str = Field(..., description="Mandatory website url to read the file") + css_element: str = Field( + ..., + description="Mandatory css reference for element to scrape from the website", + ) + class ScrapeElementFromWebsiteTool(BaseTool): - name: str = "Read a website content" - description: str = "A tool that can be used to read a website content." - args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema - website_url: Optional[str] = None - cookies: Optional[dict] = None - css_element: Optional[str] = None - headers: Optional[dict] = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', - 'Accept-Language': 'en-US,en;q=0.9', - 'Referer': 'https://www.google.com/', - 'Connection': 'keep-alive', - 'Upgrade-Insecure-Requests': '1', - 'Accept-Encoding': 'gzip, deflate, br' - } - - def __init__(self, website_url: Optional[str] = None, cookies: Optional[dict] = None, css_element: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if website_url is not None: - self.website_url = website_url - self.css_element = css_element - self.description = f"A tool that can be used to read {website_url}'s content." - self.args_schema = FixedScrapeElementFromWebsiteToolSchema - self._generate_description() - if cookies is not None: - self.cookies = {cookies["name"]: os.getenv(cookies["value"])} - - def _run( - self, - **kwargs: Any, - ) -> Any: - website_url = kwargs.get('website_url', self.website_url) - css_element = kwargs.get('css_element', self.css_element) - page = requests.get(website_url, headers=self.headers, cookies=self.cookies if self.cookies else {}) - parsed = BeautifulSoup(page.content, "html.parser") - elements = parsed.select(css_element) - return "\n".join([element.get_text() for element in elements]) - + name: str = "Read a website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = ScrapeElementFromWebsiteToolSchema + website_url: Optional[str] = None + cookies: Optional[dict] = None + css_element: Optional[str] = None + headers: Optional[dict] = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", + "Accept-Language": "en-US,en;q=0.9", + "Referer": "https://www.google.com/", + "Connection": "keep-alive", + "Upgrade-Insecure-Requests": "1", + "Accept-Encoding": "gzip, deflate, br", + } + def __init__( + self, + website_url: Optional[str] = None, + cookies: Optional[dict] = None, + css_element: Optional[str] = None, + **kwargs, + ): + super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + self.css_element = css_element + self.description = ( + f"A tool that can be used to read {website_url}'s content." + ) + self.args_schema = FixedScrapeElementFromWebsiteToolSchema + self._generate_description() + if cookies is not None: + self.cookies = {cookies["name"]: os.getenv(cookies["value"])} + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get("website_url", self.website_url) + css_element = kwargs.get("css_element", self.css_element) + page = requests.get( + website_url, + headers=self.headers, + cookies=self.cookies if self.cookies else {}, + ) + parsed = BeautifulSoup(page.content, "html.parser") + elements = parsed.select(css_element) + return "\n".join([element.get_text() for element in elements]) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 92f84cba9..7173c2156 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -1,59 +1,73 @@ import os +from typing import Any, Optional, Type + import requests from bs4 import BeautifulSoup -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field + from ..base_tool import BaseTool + class FixedScrapeWebsiteToolSchema(BaseModel): - """Input for ScrapeWebsiteTool.""" - pass + """Input for ScrapeWebsiteTool.""" + + pass + class ScrapeWebsiteToolSchema(FixedScrapeWebsiteToolSchema): - """Input for ScrapeWebsiteTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file") + """Input for ScrapeWebsiteTool.""" + + website_url: str = Field(..., description="Mandatory website url to read the file") + class ScrapeWebsiteTool(BaseTool): - name: str = "Read website content" - description: str = "A tool that can be used to read a website content." - args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema - website_url: Optional[str] = None - cookies: Optional[dict] = None - headers: Optional[dict] = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', - 'Accept-Language': 'en-US,en;q=0.9', - 'Referer': 'https://www.google.com/', - 'Connection': 'keep-alive', - 'Upgrade-Insecure-Requests': '1' - } + name: str = "Read website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = ScrapeWebsiteToolSchema + website_url: Optional[str] = None + cookies: Optional[dict] = None + headers: Optional[dict] = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", + "Accept-Language": "en-US,en;q=0.9", + "Referer": "https://www.google.com/", + "Connection": "keep-alive", + "Upgrade-Insecure-Requests": "1", + } - def __init__(self, website_url: Optional[str] = None, cookies: Optional[dict] = None, **kwargs): - super().__init__(**kwargs) - if website_url is not None: - self.website_url = website_url - self.description = f"A tool that can be used to read {website_url}'s content." - self.args_schema = FixedScrapeWebsiteToolSchema - self._generate_description() - if cookies is not None: - self.cookies = {cookies["name"]: os.getenv(cookies["value"])} + def __init__( + self, + website_url: Optional[str] = None, + cookies: Optional[dict] = None, + **kwargs, + ): + super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + self.description = ( + f"A tool that can be used to read {website_url}'s content." + ) + self.args_schema = FixedScrapeWebsiteToolSchema + self._generate_description() + if cookies is not None: + self.cookies = {cookies["name"]: os.getenv(cookies["value"])} - def _run( - self, - **kwargs: Any, - ) -> Any: - website_url = kwargs.get('website_url', self.website_url) - page = requests.get( - website_url, - timeout=15, - headers=self.headers, - cookies=self.cookies if self.cookies else {} - ) + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get("website_url", self.website_url) + page = requests.get( + website_url, + timeout=15, + headers=self.headers, + cookies=self.cookies if self.cookies else {}, + ) - page.encoding = page.apparent_encoding - parsed = BeautifulSoup(page.text, "html.parser") + page.encoding = page.apparent_encoding + parsed = BeautifulSoup(page.text, "html.parser") - text = parsed.get_text() - text = '\n'.join([i for i in text.split('\n') if i.strip() != '']) - text = ' '.join([i for i in text.split(' ') if i.strip() != '']) - return text + text = parsed.get_text() + text = "\n".join([i for i in text.split("\n") if i.strip() != ""]) + text = " ".join([i for i in text.split(" ") if i.strip() != ""]) + return text diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index b0bfa7ee6..5800e223c 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -1,20 +1,31 @@ import logging +from typing import Any, Dict, Literal, Optional, Type + +from pydantic import BaseModel, Field -from typing import Optional, Any, Type, Dict, Literal -from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool logger = logging.getLogger(__file__) + class ScrapflyScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Webpage URL") - scrape_format: Optional[Literal["raw", "markdown", "text"]] = Field(default="markdown", description="Webpage extraction format") - scrape_config: Optional[Dict[str, Any]] = Field(default=None, description="Scrapfly request scrape config") - ignore_scrape_failures: Optional[bool] = Field(default=None, description="whether to ignore failures") + scrape_format: Optional[Literal["raw", "markdown", "text"]] = Field( + default="markdown", description="Webpage extraction format" + ) + scrape_config: Optional[Dict[str, Any]] = Field( + default=None, description="Scrapfly request scrape config" + ) + ignore_scrape_failures: Optional[bool] = Field( + default=None, description="whether to ignore failures" + ) + class ScrapflyScrapeWebsiteTool(BaseTool): name: str = "Scrapfly web scraping API tool" - description: str = "Scrape a webpage url using Scrapfly and return its content as markdown or text" + description: str = ( + "Scrape a webpage url using Scrapfly and return its content as markdown or text" + ) args_schema: Type[BaseModel] = ScrapflyScrapeWebsiteToolSchema api_key: str = None scrapfly: Optional[Any] = None @@ -29,7 +40,13 @@ class ScrapflyScrapeWebsiteTool(BaseTool): ) self.scrapfly = ScrapflyClient(key=api_key) - def _run(self, url: str, scrape_format: str = "markdown", scrape_config: Optional[Dict[str, Any]] = None, ignore_scrape_failures: Optional[bool] = None): + def _run( + self, + url: str, + scrape_format: str = "markdown", + scrape_config: Optional[Dict[str, Any]] = None, + ignore_scrape_failures: Optional[bool] = None, + ): from scrapfly import ScrapeApiResponse, ScrapeConfig scrape_config = scrape_config if scrape_config is not None else {} @@ -44,4 +61,3 @@ class ScrapflyScrapeWebsiteTool(BaseTool): return None else: raise e - \ No newline at end of file diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 6bf8ff5f1..970cde7ca 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -1,77 +1,94 @@ -from typing import Optional, Type, Any import time -from pydantic.v1 import BaseModel, Field +from typing import Any, Optional, Type from bs4 import BeautifulSoup +from pydantic import BaseModel, Field from selenium import webdriver -from selenium.webdriver.common.by import By from selenium.webdriver.chrome.options import Options +from selenium.webdriver.common.by import By from ..base_tool import BaseTool + class FixedSeleniumScrapingToolSchema(BaseModel): - """Input for SeleniumScrapingTool.""" - pass + """Input for SeleniumScrapingTool.""" + + pass + class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): - """Input for SeleniumScrapingTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file") - css_element: str = Field(..., description="Mandatory css reference for element to scrape from the website") + """Input for SeleniumScrapingTool.""" + + website_url: str = Field(..., description="Mandatory website url to read the file") + css_element: str = Field( + ..., + description="Mandatory css reference for element to scrape from the website", + ) + class SeleniumScrapingTool(BaseTool): - name: str = "Read a website content" - description: str = "A tool that can be used to read a website content." - args_schema: Type[BaseModel] = SeleniumScrapingToolSchema - website_url: Optional[str] = None - driver: Optional[Any] = webdriver.Chrome - cookie: Optional[dict] = None - wait_time: Optional[int] = 3 - css_element: Optional[str] = None + name: str = "Read a website content" + description: str = "A tool that can be used to read a website content." + args_schema: Type[BaseModel] = SeleniumScrapingToolSchema + website_url: Optional[str] = None + driver: Optional[Any] = webdriver.Chrome + cookie: Optional[dict] = None + wait_time: Optional[int] = 3 + css_element: Optional[str] = None - def __init__(self, website_url: Optional[str] = None, cookie: Optional[dict] = None, css_element: Optional[str] = None, **kwargs): - super().__init__(**kwargs) - if cookie is not None: - self.cookie = cookie + def __init__( + self, + website_url: Optional[str] = None, + cookie: Optional[dict] = None, + css_element: Optional[str] = None, + **kwargs, + ): + super().__init__(**kwargs) + if cookie is not None: + self.cookie = cookie - if css_element is not None: - self.css_element = css_element + if css_element is not None: + self.css_element = css_element - if website_url is not None: - self.website_url = website_url - self.description = f"A tool that can be used to read {website_url}'s content." - self.args_schema = FixedSeleniumScrapingToolSchema + if website_url is not None: + self.website_url = website_url + self.description = ( + f"A tool that can be used to read {website_url}'s content." + ) + self.args_schema = FixedSeleniumScrapingToolSchema - self._generate_description() - def _run( - self, - **kwargs: Any, - ) -> Any: - website_url = kwargs.get('website_url', self.website_url) - css_element = kwargs.get('css_element', self.css_element) - driver = self._create_driver(website_url, self.cookie, self.wait_time) + self._generate_description() - content = [] - if css_element is None or css_element.strip() == "": - body_text = driver.find_element(By.TAG_NAME, "body").text - content.append(body_text) - else: - for element in driver.find_elements(By.CSS_SELECTOR, css_element): - content.append(element.text) - driver.close() - return "\n".join(content) + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get("website_url", self.website_url) + css_element = kwargs.get("css_element", self.css_element) + driver = self._create_driver(website_url, self.cookie, self.wait_time) - def _create_driver(self, url, cookie, wait_time): - options = Options() - options.add_argument("--headless") - driver = self.driver(options=options) - driver.get(url) - time.sleep(wait_time) - if cookie: - driver.add_cookie(cookie) - time.sleep(wait_time) - driver.get(url) - time.sleep(wait_time) - return driver + content = [] + if css_element is None or css_element.strip() == "": + body_text = driver.find_element(By.TAG_NAME, "body").text + content.append(body_text) + else: + for element in driver.find_elements(By.CSS_SELECTOR, css_element): + content.append(element.text) + driver.close() + return "\n".join(content) - def close(self): - self.driver.close() \ No newline at end of file + def _create_driver(self, url, cookie, wait_time): + options = Options() + options.add_argument("--headless") + driver = self.driver(options=options) + driver.get(url) + time.sleep(wait_time) + if cookie: + driver.add_cookie(cookie) + time.sleep(wait_time) + driver.get(url) + time.sleep(wait_time) + return driver + + def close(self): + self.driver.close() diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index f89768064..ca118326e 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -1,80 +1,94 @@ import datetime -import os import json -import requests +import os +from typing import Any, Optional, Type + +import requests +from pydantic import BaseModel, Field -from typing import Optional, Type, Any -from pydantic.v1 import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool + def _save_results_to_file(content: str) -> None: - """Saves the search results to a file.""" - filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" - with open(filename, 'w') as file: - file.write(content) - print(f"Results saved to {filename}") + """Saves the search results to a file.""" + filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, "w") as file: + file.write(content) + print(f"Results saved to {filename}") class SerperDevToolSchema(BaseModel): - """Input for SerperDevTool.""" - search_query: str = Field(..., description="Mandatory search query you want to use to search the internet") + """Input for SerperDevTool.""" + + search_query: str = Field( + ..., description="Mandatory search query you want to use to search the internet" + ) + class SerperDevTool(BaseTool): - name: str = "Search the internet" - description: str = "A tool that can be used to search the internet with a search_query." - args_schema: Type[BaseModel] = SerperDevToolSchema - search_url: str = "https://google.serper.dev/search" - country: Optional[str] = '' - location: Optional[str] = '' - locale: Optional[str] = '' - n_results: int = 10 - save_file: bool = False + name: str = "Search the internet" + description: str = ( + "A tool that can be used to search the internet with a search_query." + ) + args_schema: Type[BaseModel] = SerperDevToolSchema + search_url: str = "https://google.serper.dev/search" + country: Optional[str] = "" + location: Optional[str] = "" + locale: Optional[str] = "" + n_results: int = 10 + save_file: bool = False - def _run( - self, - **kwargs: Any, - ) -> Any: + def _run( + self, + **kwargs: Any, + ) -> Any: - search_query = kwargs.get('search_query') or kwargs.get('query') - save_file = kwargs.get('save_file', self.save_file) - n_results = kwargs.get('n_results', self.n_results) + search_query = kwargs.get("search_query") or kwargs.get("query") + save_file = kwargs.get("save_file", self.save_file) + n_results = kwargs.get("n_results", self.n_results) - payload = { "q": search_query, "num": n_results } + payload = {"q": search_query, "num": n_results} - if self.country != '': - payload["gl"] = self.country - if self.location != '': - payload["location"] = self.location - if self.locale != '': - payload["hl"] = self.locale + if self.country != "": + payload["gl"] = self.country + if self.location != "": + payload["location"] = self.location + if self.locale != "": + payload["hl"] = self.locale - payload = json.dumps(payload) + payload = json.dumps(payload) - headers = { - 'X-API-KEY': os.environ['SERPER_API_KEY'], - 'content-type': 'application/json' - } + headers = { + "X-API-KEY": os.environ["SERPER_API_KEY"], + "content-type": "application/json", + } - response = requests.request("POST", self.search_url, headers=headers, data=payload) - results = response.json() + response = requests.request( + "POST", self.search_url, headers=headers, data=payload + ) + results = response.json() - if 'organic' in results: - results = results['organic'][:self.n_results] - string = [] - for result in results: - try: - string.append('\n'.join([ - f"Title: {result['title']}", - f"Link: {result['link']}", - f"Snippet: {result['snippet']}", - "---" - ])) - except KeyError: - continue + if "organic" in results: + results = results["organic"][: self.n_results] + string = [] + for result in results: + try: + string.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Link: {result['link']}", + f"Snippet: {result['snippet']}", + "---", + ] + ) + ) + except KeyError: + continue - content = '\n'.join(string) - if save_file: - _save_results_to_file(content) - return f"\nSearch results: {content}\n" - else: - return results + content = "\n".join(string) + if save_file: + _save_results_to_file(content) + return f"\nSearch results: {content}\n" + else: + return results diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py index a69ff3de6..b0474262a 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -1,19 +1,27 @@ import os -import requests +from typing import Any, Optional, Type from urllib.parse import urlencode -from typing import Type, Any, Optional -from pydantic.v1 import BaseModel, Field + +import requests +from pydantic import BaseModel, Field + from crewai_tools.tools.rag.rag_tool import RagTool class SerplyJobSearchToolSchema(BaseModel): """Input for Job Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to fetch jobs postings.") + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to fetch jobs postings.", + ) class SerplyJobSearchTool(RagTool): name: str = "Job Search" - description: str = "A tool to perform to perform a job search in the US with a search_query." + description: str = ( + "A tool to perform to perform a job search in the US with a search_query." + ) args_schema: Type[BaseModel] = SerplyJobSearchToolSchema request_url: str = "https://api.serply.io/v1/job/search/" proxy_location: Optional[str] = "US" @@ -23,20 +31,17 @@ class SerplyJobSearchTool(RagTool): """ headers: Optional[dict] = {} - def __init__( - self, - **kwargs - ): + def __init__(self, **kwargs): super().__init__(**kwargs) self.headers = { "X-API-KEY": os.environ["SERPLY_API_KEY"], "User-Agent": "crew-tools", - "X-Proxy-Location": self.proxy_location + "X-Proxy-Location": self.proxy_location, } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: query_payload = {} @@ -58,18 +63,22 @@ class SerplyJobSearchTool(RagTool): string = [] for job in jobs: try: - string.append('\n'.join([ - f"Position: {job['position']}", - f"Employer: {job['employer']}", - f"Location: {job['location']}", - f"Link: {job['link']}", - f"""Highest: {', '.join([h for h in job['highlights']])}""", - f"Is Remote: {job['is_remote']}", - f"Is Hybrid: {job['is_remote']}", - "---" - ])) + string.append( + "\n".join( + [ + f"Position: {job['position']}", + f"Employer: {job['employer']}", + f"Location: {job['location']}", + f"Link: {job['link']}", + f"""Highest: {', '.join([h for h in job['highlights']])}""", + f"Is Remote: {job['is_remote']}", + f"Is Hybrid: {job['is_remote']}", + "---", + ] + ) + ) except KeyError: continue - content = '\n'.join(string) + content = "\n".join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py index f1127246e..21e6e9872 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -1,14 +1,19 @@ import os -import requests +from typing import Any, Optional, Type from urllib.parse import urlencode -from typing import Type, Any, Optional -from pydantic.v1 import BaseModel, Field + +import requests +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool class SerplyNewsSearchToolSchema(BaseModel): """Input for Serply News Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to fetch news articles") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to fetch news articles" + ) class SerplyNewsSearchTool(BaseTool): @@ -21,15 +26,12 @@ class SerplyNewsSearchTool(BaseTool): limit: Optional[int] = 10 def __init__( - self, - limit: Optional[int] = 10, - proxy_location: Optional[str] = "US", - **kwargs + self, limit: Optional[int] = 10, proxy_location: Optional[str] = "US", **kwargs ): """ - param: limit (int): The maximum number of results to return [10-100, defaults to 10] - proxy_location: (str): Where to get news, specifically for a specific country results. - ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + param: limit (int): The maximum number of results to return [10-100, defaults to 10] + proxy_location: (str): Where to get news, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) self.limit = limit @@ -37,12 +39,12 @@ class SerplyNewsSearchTool(BaseTool): self.headers = { "X-API-KEY": os.environ["SERPLY_API_KEY"], "User-Agent": "crew-tools", - "X-Proxy-Location": proxy_location + "X-Proxy-Location": proxy_location, } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: # build query parameters query_payload = {} @@ -58,24 +60,28 @@ class SerplyNewsSearchTool(BaseTool): response = requests.request("GET", url, headers=self.headers) results = response.json() if "entries" in results: - results = results['entries'] + results = results["entries"] string = [] - for result in results[:self.limit]: + for result in results[: self.limit]: try: # follow url - r = requests.get(result['link']) - final_link = r.history[-1].headers['Location'] - string.append('\n'.join([ - f"Title: {result['title']}", - f"Link: {final_link}", - f"Source: {result['source']['title']}", - f"Published: {result['published']}", - "---" - ])) + r = requests.get(result["link"]) + final_link = r.history[-1].headers["Location"] + string.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Link: {final_link}", + f"Source: {result['source']['title']}", + f"Published: {result['published']}", + "---", + ] + ) + ) except KeyError: continue - content = '\n'.join(string) + content = "\n".join(string) return f"\nSearch results: {content}\n" else: return results diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index a37c36e5f..1ac6337f6 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -1,36 +1,39 @@ import os -import requests +from typing import Any, Optional, Type from urllib.parse import urlencode -from typing import Type, Any, Optional -from pydantic.v1 import BaseModel, Field + +import requests +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool class SerplyScholarSearchToolSchema(BaseModel): """Input for Serply Scholar Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to fetch scholarly literature") + + search_query: str = Field( + ..., + description="Mandatory search query you want to use to fetch scholarly literature", + ) class SerplyScholarSearchTool(BaseTool): name: str = "Scholar Search" - description: str = "A tool to perform scholarly literature search with a search_query." + description: str = ( + "A tool to perform scholarly literature search with a search_query." + ) args_schema: Type[BaseModel] = SerplyScholarSearchToolSchema search_url: str = "https://api.serply.io/v1/scholar/" hl: Optional[str] = "us" proxy_location: Optional[str] = "US" headers: Optional[dict] = {} - def __init__( - self, - hl: str = "us", - proxy_location: Optional[str] = "US", - **kwargs - ): + def __init__(self, hl: str = "us", proxy_location: Optional[str] = "US", **kwargs): """ - param: hl (str): host Language code to display results in - (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) - proxy_location: (str): Specify the proxy location for the search, specifically for a specific country results. - ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + param: hl (str): host Language code to display results in + (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) + proxy_location: (str): Specify the proxy location for the search, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) self.hl = hl @@ -38,16 +41,14 @@ class SerplyScholarSearchTool(BaseTool): self.headers = { "X-API-KEY": os.environ["SERPLY_API_KEY"], "User-Agent": "crew-tools", - "X-Proxy-Location": proxy_location + "X-Proxy-Location": proxy_location, } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: - query_payload = { - "hl": self.hl - } + query_payload = {"hl": self.hl} if "query" in kwargs: query_payload["q"] = kwargs["query"] @@ -67,20 +68,24 @@ class SerplyScholarSearchTool(BaseTool): for article in articles: try: if "doc" in article: - link = article['doc']['link'] + link = article["doc"]["link"] else: - link = article['link'] - authors = [author['name'] for author in article['author']['authors']] - string.append('\n'.join([ - f"Title: {article['title']}", - f"Link: {link}", - f"Description: {article['description']}", - f"Cite: {article['cite']}", - f"Authors: {', '.join(authors)}", - "---" - ])) + link = article["link"] + authors = [author["name"] for author in article["author"]["authors"]] + string.append( + "\n".join( + [ + f"Title: {article['title']}", + f"Link: {link}", + f"Description: {article['description']}", + f"Cite: {article['cite']}", + f"Authors: {', '.join(authors)}", + "---", + ] + ) + ) except KeyError: continue - content = '\n'.join(string) + content = "\n".join(string) return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py index 894c24741..b65fa21d1 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py @@ -1,14 +1,19 @@ import os -import requests +from typing import Any, Optional, Type from urllib.parse import urlencode -from typing import Type, Any, Optional -from pydantic.v1 import BaseModel, Field + +import requests +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool class SerplyWebSearchToolSchema(BaseModel): """Input for Serply Web Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to Google search") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to Google search" + ) class SerplyWebSearchTool(BaseTool): @@ -24,21 +29,21 @@ class SerplyWebSearchTool(BaseTool): headers: Optional[dict] = {} def __init__( - self, - hl: str = "us", - limit: int = 10, - device_type: str = "desktop", - proxy_location: str = "US", - **kwargs + self, + hl: str = "us", + limit: int = 10, + device_type: str = "desktop", + proxy_location: str = "US", + **kwargs, ): """ - param: query (str): The query to search for - param: hl (str): host Language code to display results in - (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) - param: limit (int): The maximum number of results to return [10-100, defaults to 10] - param: device_type (str): desktop/mobile results (defaults to desktop) - proxy_location: (str): Where to perform the search, specifically for local/regional results. - ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + param: query (str): The query to search for + param: hl (str): host Language code to display results in + (reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages) + param: limit (int): The maximum number of results to return [10-100, defaults to 10] + param: device_type (str): desktop/mobile results (defaults to desktop) + proxy_location: (str): Where to perform the search, specifically for local/regional results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) @@ -50,18 +55,18 @@ class SerplyWebSearchTool(BaseTool): self.query_payload = { "num": limit, "gl": proxy_location.upper(), - "hl": hl.lower() + "hl": hl.lower(), } self.headers = { "X-API-KEY": os.environ["SERPLY_API_KEY"], "X-User-Agent": device_type, "User-Agent": "crew-tools", - "X-Proxy-Location": proxy_location + "X-Proxy-Location": proxy_location, } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: if "query" in kwargs: self.query_payload["q"] = kwargs["query"] @@ -74,20 +79,24 @@ class SerplyWebSearchTool(BaseTool): response = requests.request("GET", url, headers=self.headers) results = response.json() if "results" in results: - results = results['results'] + results = results["results"] string = [] for result in results: try: - string.append('\n'.join([ - f"Title: {result['title']}", - f"Link: {result['link']}", - f"Description: {result['description'].strip()}", - "---" - ])) + string.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Link: {result['link']}", + f"Description: {result['description'].strip()}", + "---", + ] + ) + ) except KeyError: continue - content = '\n'.join(string) + content = "\n".join(string) return f"\nSearch results: {content}\n" else: return results diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index 5049826c5..e09a36fd9 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -1,48 +1,50 @@ import os +from typing import Any, Optional, Type + import requests -from typing import Type, Any, Optional -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field + from crewai_tools.tools.rag.rag_tool import RagTool class SerplyWebpageToMarkdownToolSchema(BaseModel): """Input for Serply Search.""" - url: str = Field(..., description="Mandatory url you want to use to fetch and convert to markdown") + + url: str = Field( + ..., + description="Mandatory url you want to use to fetch and convert to markdown", + ) class SerplyWebpageToMarkdownTool(RagTool): name: str = "Webpage to Markdown" - description: str = "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" + description: str = ( + "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" + ) args_schema: Type[BaseModel] = SerplyWebpageToMarkdownToolSchema request_url: str = "https://api.serply.io/v1/request" proxy_location: Optional[str] = "US" headers: Optional[dict] = {} - def __init__( - self, - proxy_location: Optional[str] = "US", - **kwargs - ): + def __init__(self, proxy_location: Optional[str] = "US", **kwargs): """ - proxy_location: (str): Where to perform the search, specifically for a specific country results. - ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) + proxy_location: (str): Where to perform the search, specifically for a specific country results. + ['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US) """ super().__init__(**kwargs) self.proxy_location = proxy_location self.headers = { "X-API-KEY": os.environ["SERPLY_API_KEY"], "User-Agent": "crew-tools", - "X-Proxy-Location": proxy_location + "X-Proxy-Location": proxy_location, } def _run( - self, - **kwargs: Any, + self, + **kwargs: Any, ) -> Any: - data = { - "url": kwargs["url"], - "method": "GET", - "response_type": "markdown" - } - response = requests.request("POST", self.request_url, headers=self.headers, json=data) + data = {"url": kwargs["url"], "method": "GET", "response_type": "markdown"} + response = requests.request( + "POST", self.request_url, headers=self.headers, json=data + ) return response.text diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index b4b230c8e..c01b5e2a3 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -1,21 +1,25 @@ -from typing import Optional, Any, Type, Dict, Literal -from pydantic.v1 import BaseModel, Field +from typing import Any, Dict, Literal, Optional, Type + +from pydantic import BaseModel, Field + from crewai_tools.tools.base_tool import BaseTool + class SpiderToolSchema(BaseModel): url: str = Field(description="Website URL") params: Optional[Dict[str, Any]] = Field( description="Set additional params. Options include:\n" - "- `limit`: Optional[int] - The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.\n" - "- `depth`: Optional[int] - The crawl limit for maximum depth. If `0`, no limit will be applied.\n" - "- `metadata`: Optional[bool] - Boolean to include metadata or not. Defaults to `False` unless set to `True`. If the user wants metadata, include params.metadata = True.\n" - "- `query_selector`: Optional[str] - The CSS query selector to use when extracting content from the markup.\n" + "- `limit`: Optional[int] - The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.\n" + "- `depth`: Optional[int] - The crawl limit for maximum depth. If `0`, no limit will be applied.\n" + "- `metadata`: Optional[bool] - Boolean to include metadata or not. Defaults to `False` unless set to `True`. If the user wants metadata, include params.metadata = True.\n" + "- `query_selector`: Optional[str] - The CSS query selector to use when extracting content from the markup.\n" ) mode: Literal["scrape", "crawl"] = Field( default="scrape", - description="Mode, the only two allowed modes are `scrape` or `crawl`. Use `scrape` to scrape a single page and `crawl` to crawl the entire website following subpages. These modes are the only allowed values even when ANY params is set." + description="Mode, the only two allowed modes are `scrape` or `crawl`. Use `scrape` to scrape a single page and `crawl` to crawl the entire website following subpages. These modes are the only allowed values even when ANY params is set.", ) + class SpiderTool(BaseTool): name: str = "Spider scrape & crawl tool" description: str = "Scrape & Crawl any url and return LLM-ready data." @@ -26,11 +30,11 @@ class SpiderTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) try: - from spider import Spider # type: ignore + from spider import Spider # type: ignore except ImportError: - raise ImportError( - "`spider-client` package not found, please run `pip install spider-client`" - ) + raise ImportError( + "`spider-client` package not found, please run `pip install spider-client`" + ) self.spider = Spider(api_key=api_key) @@ -38,7 +42,7 @@ class SpiderTool(BaseTool): self, url: str, params: Optional[Dict[str, Any]] = None, - mode: Optional[Literal["scrape", "crawl"]] = "scrape" + mode: Optional[Literal["scrape", "crawl"]] = "scrape", ): if mode not in ["scrape", "crawl"]: raise ValueError( @@ -51,9 +55,7 @@ class SpiderTool(BaseTool): else: params = {"return_format": "markdown"} - action = ( - self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url - ) + action = self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url spider_docs = action(url=url, params=params) return spider_docs diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 5dbaed4d4..f50085eee 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index a9abd5c43..6b7a21dbd 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -2,9 +2,10 @@ import base64 from typing import Type import requests -from crewai_tools.tools.base_tool import BaseTool from openai import OpenAI -from pydantic.v1 import BaseModel +from pydantic import BaseModel + +from crewai_tools.tools.base_tool import BaseTool class ImagePromptSchema(BaseModel): diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 1ff587f00..d20c4bf23 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -25,7 +25,9 @@ class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema): class WebsiteSearchTool(RagTool): name: str = "Search in a specific website" - description: str = "A tool that can be used to semantic search a query from a specific URL content." + description: str = ( + "A tool that can be used to semantic search a query from a specific URL content." + ) args_schema: Type[BaseModel] = WebsiteSearchToolSchema def __init__(self, website: Optional[str] = None, **kwargs): diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 0346d484e..ada37d766 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 2edc0026b..1a1c521d0 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -25,7 +25,9 @@ class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema): class YoutubeChannelSearchTool(RagTool): name: str = "Search a Youtube Channels content" - description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." + description: str = ( + "A tool that can be used to semantic search a query from a Youtube Channels content." + ) args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 77d25752e..9feb2b941 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -1,7 +1,7 @@ from typing import Any, Optional, Type from embedchain.models.data_type import DataType -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -25,7 +25,9 @@ class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema): class YoutubeVideoSearchTool(RagTool): name: str = "Search a Youtube Video content" - description: str = "A tool that can be used to semantic search a query from a Youtube Video content." + description: str = ( + "A tool that can be used to semantic search a query from a Youtube Video content." + ) args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): From 1cc8966e2eeb24a0ca4f2052329b67874bfc083b Mon Sep 17 00:00:00 2001 From: Mike Sorensen <12532193+masorensen@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:18:22 -0500 Subject: [PATCH 136/391] fixed scoping issue causing error in RAG tools --- .../tools/code_docs_search_tool/code_docs_search_tool.py | 2 +- src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | 2 +- .../tools/directory_search_tool/directory_search_tool.py | 2 +- src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | 2 +- .../tools/github_search_tool/github_search_tool.py | 5 +++-- src/crewai_tools/tools/json_search_tool/json_search_tool.py | 2 +- src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py | 2 +- .../tools/mysql_search_tool/mysql_search_tool.py | 4 ++-- src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py | 2 +- src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py | 4 ++-- src/crewai_tools/tools/txt_search_tool/txt_search_tool.py | 2 +- src/crewai_tools/tools/website_search/website_search_tool.py | 2 +- src/crewai_tools/tools/xml_search_tool/xml_search_tool.py | 2 +- .../youtube_channel_search_tool.py | 3 +-- .../youtube_video_search_tool/youtube_video_search_tool.py | 2 +- 15 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 49ef2cb3d..ae51adf54 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -31,6 +31,7 @@ class CodeDocsSearchTool(RagTool): def __init__(self, docs_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) if docs_url is not None: + kwargs["data_type"] = DataType.DOCS_SITE self.add(docs_url) self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." self.args_schema = FixedCodeDocsSearchToolSchema @@ -41,7 +42,6 @@ class CodeDocsSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.DOCS_SITE super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index dde9d4673..e255df6b5 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -31,6 +31,7 @@ class CSVSearchTool(RagTool): def __init__(self, csv: Optional[str] = None, **kwargs): super().__init__(**kwargs) if csv is not None: + kwargs["data_type"] = DataType.CSV self.add(csv) self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." self.args_schema = FixedCSVSearchToolSchema @@ -41,7 +42,6 @@ class CSVSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.CSV super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index e0938007b..ffd132c0e 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -31,6 +31,7 @@ class DirectorySearchTool(RagTool): def __init__(self, directory: Optional[str] = None, **kwargs): super().__init__(**kwargs) if directory is not None: + kwargs["loader"] = DirectoryLoader(config=dict(recursive=True)) self.add(directory) self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." self.args_schema = FixedDirectorySearchToolSchema @@ -41,7 +42,6 @@ class DirectorySearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["loader"] = DirectoryLoader(config=dict(recursive=True)) super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 8be327af3..9f6c49df7 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -37,6 +37,7 @@ class DOCXSearchTool(RagTool): def __init__(self, docx: Optional[str] = None, **kwargs): super().__init__(**kwargs) if docx is not None: + kwargs["data_type"] = DataType.DOCX self.add(docx) self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." self.args_schema = FixedDOCXSearchToolSchema @@ -47,7 +48,6 @@ class DOCXSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.DOCX super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index f1b372d8e..4bf8b9e05 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -38,6 +38,9 @@ class GithubSearchTool(RagTool): def __init__(self, github_repo: Optional[str] = None, **kwargs): super().__init__(**kwargs) if github_repo is not None: + kwargs["data_type"] = "github" + kwargs["loader"] = GithubLoader(config={"token": self.gh_token}) + self.add(repo=github_repo) self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." self.args_schema = FixedGithubSearchToolSchema @@ -51,8 +54,6 @@ class GithubSearchTool(RagTool): ) -> None: content_types = content_types or self.content_types - kwargs["data_type"] = "github" - kwargs["loader"] = GithubLoader(config={"token": self.gh_token}) super().add(f"repo:{repo} type:{','.join(content_types)}", **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 68dee653f..422f2f175 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -31,6 +31,7 @@ class JSONSearchTool(RagTool): def __init__(self, json_path: Optional[str] = None, **kwargs): super().__init__(**kwargs) if json_path is not None: + kwargs["data_type"] = DataType.JSON self.add(json_path) self.description = f"A tool that can be used to semantic search a query the {json_path} JSON's content." self.args_schema = FixedJSONSearchToolSchema @@ -41,7 +42,6 @@ class JSONSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.JSON super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py index 832ab1166..dd1c2bb9d 100644 --- a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py @@ -31,6 +31,7 @@ class MDXSearchTool(RagTool): def __init__(self, mdx: Optional[str] = None, **kwargs): super().__init__(**kwargs) if mdx is not None: + kwargs["data_type"] = DataType.MDX self.add(mdx) self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." self.args_schema = FixedMDXSearchToolSchema @@ -41,7 +42,6 @@ class MDXSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.MDX super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py index 02f6ec3f4..f931a006b 100644 --- a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py @@ -25,6 +25,8 @@ class MySQLSearchTool(RagTool): def __init__(self, table_name: str, **kwargs): super().__init__(**kwargs) + kwargs["data_type"] = "mysql" + kwargs["loader"] = MySQLLoader(config=dict(url=self.db_uri)) self.add(table_name) self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." self._generate_description() @@ -34,8 +36,6 @@ class MySQLSearchTool(RagTool): table_name: str, **kwargs: Any, ) -> None: - kwargs["data_type"] = "mysql" - kwargs["loader"] = MySQLLoader(config=dict(url=self.db_uri)) super().add(f"SELECT * FROM {table_name};", **kwargs) def _run( diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index 23e4af4d9..fc11306ce 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -30,6 +30,7 @@ class PDFSearchTool(RagTool): def __init__(self, pdf: Optional[str] = None, **kwargs): super().__init__(**kwargs) if pdf is not None: + kwargs["data_type"] = DataType.PDF_FILE self.add(pdf) self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." self.args_schema = FixedPDFSearchToolSchema @@ -56,7 +57,6 @@ class PDFSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.PDF_FILE super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index ff478a542..dc75470a2 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -25,6 +25,8 @@ class PGSearchTool(RagTool): def __init__(self, table_name: str, **kwargs): super().__init__(**kwargs) + kwargs["data_type"] = "postgres" + kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) self.add(table_name) self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." self._generate_description() @@ -34,8 +36,6 @@ class PGSearchTool(RagTool): table_name: str, **kwargs: Any, ) -> None: - kwargs["data_type"] = "postgres" - kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) super().add(f"SELECT * FROM {table_name};", **kwargs) def _run( diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index f50085eee..95b353f45 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -31,6 +31,7 @@ class TXTSearchTool(RagTool): def __init__(self, txt: Optional[str] = None, **kwargs): super().__init__(**kwargs) if txt is not None: + kwargs["data_type"] = DataType.TEXT_FILE self.add(txt) self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." self.args_schema = FixedTXTSearchToolSchema @@ -41,7 +42,6 @@ class TXTSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.TEXT_FILE super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index d20c4bf23..faa1a02e8 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -33,6 +33,7 @@ class WebsiteSearchTool(RagTool): def __init__(self, website: Optional[str] = None, **kwargs): super().__init__(**kwargs) if website is not None: + kwargs["data_type"] = DataType.WEB_PAGE self.add(website) self.description = f"A tool that can be used to semantic search a query from {website} website content." self.args_schema = FixedWebsiteSearchToolSchema @@ -43,7 +44,6 @@ class WebsiteSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.WEB_PAGE super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index ada37d766..95a382299 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -31,6 +31,7 @@ class XMLSearchTool(RagTool): def __init__(self, xml: Optional[str] = None, **kwargs): super().__init__(**kwargs) if xml is not None: + kwargs["data_type"] = DataType.XML self.add(xml) self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." self.args_schema = FixedXMLSearchToolSchema @@ -41,7 +42,6 @@ class XMLSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.XML super().add(*args, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 1a1c521d0..b0c6209f1 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -33,6 +33,7 @@ class YoutubeChannelSearchTool(RagTool): def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): super().__init__(**kwargs) if youtube_channel_handle is not None: + kwargs["data_type"] = DataType.YOUTUBE_CHANNEL self.add(youtube_channel_handle) self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." self.args_schema = FixedYoutubeChannelSearchToolSchema @@ -45,8 +46,6 @@ class YoutubeChannelSearchTool(RagTool): ) -> None: if not youtube_channel_handle.startswith("@"): youtube_channel_handle = f"@{youtube_channel_handle}" - - kwargs["data_type"] = DataType.YOUTUBE_CHANNEL super().add(youtube_channel_handle, **kwargs) def _before_run( diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 9feb2b941..6852fafb4 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -33,6 +33,7 @@ class YoutubeVideoSearchTool(RagTool): def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) if youtube_video_url is not None: + kwargs["data_type"] = DataType.YOUTUBE_VIDEO self.add(youtube_video_url) self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." self.args_schema = FixedYoutubeVideoSearchToolSchema @@ -43,7 +44,6 @@ class YoutubeVideoSearchTool(RagTool): *args: Any, **kwargs: Any, ) -> None: - kwargs["data_type"] = DataType.YOUTUBE_VIDEO super().add(*args, **kwargs) def _before_run( From 90a13cb6f741c26d677c7389cdad027686285b80 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Tue, 15 Oct 2024 09:16:17 -0300 Subject: [PATCH 137/391] feat: change to uv on docs --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f622ec49d..491890877 100644 --- a/README.md +++ b/README.md @@ -96,13 +96,14 @@ Please ensure your code adheres to our coding standards and includes appropriate **Installing Dependencies:** ```bash -poetry install +uv sync ``` **Activating Virtual Environment:** ```bash -poetry shell +uv venv +source .venv/bin/activate ``` **Setting Up Pre-commit Hooks:** @@ -114,19 +115,19 @@ pre-commit install **Running Tests:** ```bash -poetry run pytest +uv run pytest ``` **Static Type Checking:** ```bash -poetry run pyright +uv run pyright ``` **Packaging:** ```bash -poetry build +uv build ``` **Local Installation:** From fe172cb4de7998c812a711cb5b5f12f2768fce4b Mon Sep 17 00:00:00 2001 From: Aoki Haruhito <53058630+11bluetree@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:03:37 +0900 Subject: [PATCH 138/391] fix: SpiderTool import --- src/crewai_tools/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index b5dcc81b9..6bd8dfd71 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -33,6 +33,7 @@ from .tools import ( SerplyScholarSearchTool, SerplyWebpageToMarkdownTool, SerplyWebSearchTool, + SpiderTool, TXTSearchTool, VisionTool, WebsiteSearchTool, From 96429040de61a9bb65692a660c8d6a82110ab371 Mon Sep 17 00:00:00 2001 From: Adan Butto Date: Sun, 20 Oct 2024 18:54:01 +0300 Subject: [PATCH 139/391] Fixed File Writer tool errors for better interaction with the agents --- .../file_writer_tool/file_writer_tool.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index ce0c4ebd9..428502d46 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,19 +1,16 @@ import os -from typing import Any, Optional, Type - +from typing import Any, Type from pydantic import BaseModel - -from ..base_tool import BaseTool - +from crewai_tools import BaseTool +from distutils.util import strtobool class FileWriterToolInput(BaseModel): filename: str content: str - directory: Optional[str] = None - overwrite: bool = False - - -class FileWriterTool(BaseTool): + directory: str = "./" + overwrite: str = "False" + +class FileWriterTool2(BaseTool): name: str = "File Writer Tool" description: str = ( "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." @@ -23,11 +20,14 @@ class FileWriterTool(BaseTool): def _run(self, **kwargs: Any) -> str: try: # Create the directory if it doesn't exist - if kwargs["directory"] and not os.path.exists(kwargs["directory"]): + if kwargs.get("directory") and not os.path.exists(kwargs["directory"]): os.makedirs(kwargs["directory"]) # Construct the full path - filepath = os.path.join(kwargs["directory"] or "", kwargs["filename"]) + filepath = os.path.join(kwargs.get("directory") or "", kwargs["filename"]) + + # Convert overwrite to boolean + kwargs["overwrite"] = bool(strtobool(kwargs["overwrite"])) # Check if file exists and overwrite is not allowed if os.path.exists(filepath) and not kwargs["overwrite"]: @@ -42,5 +42,7 @@ class FileWriterTool(BaseTool): return ( f"File {filepath} already exists and overwrite option was not passed." ) + except KeyError as e: + return f"An error occurred while accessing key: {str(e)}" except Exception as e: - return f"An error occurred while writing to the file: {str(e)}" + return f"An error occurred while writing to the file: {str(e)}" \ No newline at end of file From 857d6c135c2b7786211434140c3d66a3ef4b7ada Mon Sep 17 00:00:00 2001 From: Adan Butto Date: Sun, 20 Oct 2024 20:37:59 +0300 Subject: [PATCH 140/391] reverted directory variable back to optional --- .../tools/file_writer_tool/file_writer_tool.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index 428502d46..b3ca13c55 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,16 +1,17 @@ import os -from typing import Any, Type +from typing import Any, Optional, Type from pydantic import BaseModel -from crewai_tools import BaseTool +from ..base_tool import BaseTool from distutils.util import strtobool + class FileWriterToolInput(BaseModel): filename: str content: str - directory: str = "./" + directory: Optional[str] = "./" overwrite: str = "False" -class FileWriterTool2(BaseTool): +class FileWriterTool(BaseTool): name: str = "File Writer Tool" description: str = ( "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." From 6a7e917e1d9bd766945b390a07d1ccd9927a4c29 Mon Sep 17 00:00:00 2001 From: Adan Butto Date: Tue, 22 Oct 2024 18:47:52 +0300 Subject: [PATCH 141/391] Changed order of the arguments, placing 'content' last. It tends to forget context when it gets to filling other arguments when content is on the longer side. --- src/crewai_tools/tools/file_writer_tool/file_writer_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index b3ca13c55..a008e4a75 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -6,10 +6,10 @@ from distutils.util import strtobool class FileWriterToolInput(BaseModel): - filename: str - content: str + filename: str directory: Optional[str] = "./" overwrite: str = "False" + content: str class FileWriterTool(BaseTool): name: str = "File Writer Tool" From 8e15bc63869588ff20aea4c4740fac719a76a08f Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Wed, 23 Oct 2024 10:36:29 -0400 Subject: [PATCH 142/391] add support for unsafe code execution --- .../code_interpreter_tool.py | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index f333a676d..a4488b35f 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -29,6 +29,7 @@ class CodeInterpreterTool(BaseTool): default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None user_dockerfile_path: Optional[str] = None + unsafe_mode: bool = False @staticmethod def _get_installed_package_path(): @@ -66,7 +67,11 @@ class CodeInterpreterTool(BaseTool): def _run(self, **kwargs) -> str: code = kwargs.get("code", self.code) libraries_used = kwargs.get("libraries_used", []) - return self.run_code_in_docker(code, libraries_used) + + if self.unsafe_mode: + return self.run_code_unsafe(code, libraries_used) + else: + return self.run_code_in_docker(code, libraries_used) def _install_libraries( self, container: docker.models.containers.Container, libraries: List[str] @@ -113,3 +118,19 @@ class CodeInterpreterTool(BaseTool): if exec_result.exit_code != 0: return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" return exec_result.output.decode("utf-8") + + def run_code_unsafe(self, code: str, libraries_used: List[str]) -> str: + """ + Run the code directly on the host machine (unsafe mode). + """ + # Install libraries on the host machine + for library in libraries_used: + os.system(f"pip install {library}") + + # Execute the code + try: + exec_locals = {} + exec(code, {}, exec_locals) + return exec_locals.get("result", "No result variable found.") + except Exception as e: + return f"An error occurred: {str(e)}" From 96e52767ad417738c684e6ff7470ce25a458634e Mon Sep 17 00:00:00 2001 From: Ernest Poletaev Date: Fri, 25 Oct 2024 22:03:59 +0700 Subject: [PATCH 143/391] fix: web scraper concatenate words --- .../tools/scrape_website_tool/scrape_website_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 7173c2156..3cfb67bae 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -1,4 +1,5 @@ import os +import re from typing import Any, Optional, Type import requests @@ -67,7 +68,6 @@ class ScrapeWebsiteTool(BaseTool): page.encoding = page.apparent_encoding parsed = BeautifulSoup(page.text, "html.parser") - text = parsed.get_text() - text = "\n".join([i for i in text.split("\n") if i.strip() != ""]) - text = " ".join([i for i in text.split(" ") if i.strip() != ""]) + text = parsed.get_text(" ") + text = re.sub('\s+', ' ', text) return text From 1f8791953e41194fe0c34761076096824c844bf8 Mon Sep 17 00:00:00 2001 From: Ernest Poletaev Date: Fri, 25 Oct 2024 22:33:24 +0700 Subject: [PATCH 144/391] fix: retain line breaks --- .../tools/scrape_website_tool/scrape_website_tool.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 3cfb67bae..99df1d2dd 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -69,5 +69,6 @@ class ScrapeWebsiteTool(BaseTool): parsed = BeautifulSoup(page.text, "html.parser") text = parsed.get_text(" ") - text = re.sub('\s+', ' ', text) + text = re.sub('[ \t]+', ' ', text) + text = re.sub('\\s+\n\\s+', '\n', text) return text From 2061f8ca4121d77ca6be9d1b0e7813a08f75ff56 Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Fri, 1 Nov 2024 17:19:54 -0400 Subject: [PATCH 145/391] Improve firecrawl tool --- .../firecrawl_crawl_website_tool.py | 8 ++++++-- .../firecrawl_scrape_website_tool.py | 8 ++++++-- .../firecrawl_search_tool/firecrawl_search_tool.py | 10 +++++++--- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 80a8392e9..a535b6c63 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,9 +1,13 @@ -from typing import Any, Dict, List, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, Optional, Type from pydantic import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +# Type checking import +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") @@ -20,7 +24,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None - firecrawl: Optional[Any] = None + firecrawl: Optional["FirecrawlApp"] = None def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 0de3335c7..ee8e592ca 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,9 +1,13 @@ -from typing import Any, Dict, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, Optional, Type from pydantic import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +# Type checking import +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") @@ -24,7 +28,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): description: str = "Scrape webpages url using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema api_key: Optional[str] = None - firecrawl: Optional[Any] = None + firecrawl: Optional["FirecrawlApp"] = None # Updated to use TYPE_CHECKING def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index ad92e2661..13c3b82ee 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,9 +1,13 @@ -from typing import Any, Dict, List, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, Optional, Type from pydantic import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +# Type checking import +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + class FirecrawlSearchToolSchema(BaseModel): query: str = Field(description="Search query") @@ -20,7 +24,7 @@ class FirecrawlSearchTool(BaseTool): description: str = "Search webpages using Firecrawl and return the results" args_schema: Type[BaseModel] = FirecrawlSearchToolSchema api_key: Optional[str] = None - firecrawl: Optional[Any] = None + firecrawl: Optional["FirecrawlApp"] = None def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -45,4 +49,4 @@ class FirecrawlSearchTool(BaseTool): result_options = {} options = {"pageOptions": page_options, "resultOptions": result_options} - return self.firecrawl.search(query, options) + return self.firecrawl.search(query, **options) From 10639d877514e50008c9474339e8bd681e7de4bf Mon Sep 17 00:00:00 2001 From: Piotr Mardziel Date: Tue, 5 Nov 2024 16:22:35 -0800 Subject: [PATCH 146/391] Update base_tool.py --- src/crewai_tools/tools/base_tool.py | 95 +---------------------------- 1 file changed, 1 insertion(+), 94 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index dee3b9317..60c477c41 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -5,100 +5,7 @@ from langchain_core.tools import StructuredTool from pydantic import BaseModel, ConfigDict, Field, validator from pydantic import BaseModel as PydanticBaseModel - -class BaseTool(BaseModel, ABC): - class _ArgsSchemaPlaceholder(PydanticBaseModel): - pass - - model_config = ConfigDict() - - name: str - """The unique name of the tool that clearly communicates its purpose.""" - description: str - """Used to tell the model how/when/why to use the tool.""" - args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder) - """The schema for the arguments that the tool accepts.""" - description_updated: bool = False - """Flag to check if the description has been updated.""" - cache_function: Optional[Callable] = lambda _args, _result: True - """Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached.""" - result_as_answer: bool = False - """Flag to check if the tool should be the final agent answer.""" - - @validator("args_schema", always=True, pre=True) - def _default_args_schema( - cls, v: Type[PydanticBaseModel] - ) -> Type[PydanticBaseModel]: - if not isinstance(v, cls._ArgsSchemaPlaceholder): - return v - - return type( - f"{cls.__name__}Schema", - (PydanticBaseModel,), - { - "__annotations__": { - k: v for k, v in cls._run.__annotations__.items() if k != "return" - }, - }, - ) - - def model_post_init(self, __context: Any) -> None: - self._generate_description() - - super().model_post_init(__context) - - def run( - self, - *args: Any, - **kwargs: Any, - ) -> Any: - print(f"Using Tool: {self.name}") - return self._run(*args, **kwargs) - - @abstractmethod - def _run( - self, - *args: Any, - **kwargs: Any, - ) -> Any: - """Here goes the actual implementation of the tool.""" - - def to_langchain(self) -> StructuredTool: - self._set_args_schema() - return StructuredTool( - name=self.name, - description=self.description, - args_schema=self.args_schema, - func=self._run, - ) - - def _set_args_schema(self): - if self.args_schema is None: - class_name = f"{self.__class__.__name__}Schema" - self.args_schema = type( - class_name, - (PydanticBaseModel,), - { - "__annotations__": { - k: v - for k, v in self._run.__annotations__.items() - if k != "return" - }, - }, - ) - - def _generate_description(self): - args = [] - args_description = [] - for arg, attribute in self.args_schema.schema()["properties"].items(): - if "type" in attribute: - args.append(f"{arg}: '{attribute['type']}'") - if "description" in attribute: - args_description.append(f"{arg}: '{attribute['description']}'") - - description = self.description.replace("\n", " ") - self.description = f"{self.name}({', '.join(args)}) - {description} {', '.join(args_description)}" - +from crewai.tools.base_tool import BaseTool class Tool(BaseTool): func: Callable From 1c37158208552f3ec84d4a8f269643c3a7eeb61c Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:58:58 +0800 Subject: [PATCH 147/391] fix: correct variable name typo in exa_base_tool --- src/crewai_tools/tools/exa_tools/exa_base_tool.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py index 6273c5f7a..d2fe6217c 100644 --- a/src/crewai_tools/tools/exa_tools/exa_base_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_base_tool.py @@ -28,10 +28,10 @@ class EXABaseTool(BaseTool): } def _parse_results(self, results): - stirng = [] + string = [] for result in results: try: - stirng.append( + string.append( "\n".join( [ f"Title: {result['title']}", @@ -43,7 +43,7 @@ class EXABaseTool(BaseTool): ) ) except KeyError: - next + continue - content = "\n".join(stirng) + content = "\n".join(string) return f"\nSearch results: {content}\n" From 488782fb4ad26411649df5d16d848e5001e69bf8 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Mon, 25 Nov 2024 16:14:49 -0300 Subject: [PATCH 148/391] feat: remove langchain from code and change to CrewStructuredTool and update Dockerilf CodeInterpreter --- src/crewai_tools/tools/base_tool.py | 13 ++++++------- .../tools/code_interpreter_tool/Dockerfile | 12 ++---------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py index 60c477c41..674e33030 100644 --- a/src/crewai_tools/tools/base_tool.py +++ b/src/crewai_tools/tools/base_tool.py @@ -1,11 +1,10 @@ -from abc import ABC, abstractmethod -from typing import Any, Callable, Optional, Type +from typing import Any, Callable -from langchain_core.tools import StructuredTool -from pydantic import BaseModel, ConfigDict, Field, validator from pydantic import BaseModel as PydanticBaseModel from crewai.tools.base_tool import BaseTool +from crewai.tools.structured_tool import CrewStructuredTool + class Tool(BaseTool): func: Callable @@ -16,9 +15,9 @@ class Tool(BaseTool): def to_langchain( - tools: list[BaseTool | StructuredTool], -) -> list[StructuredTool]: - return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools] + tools: list[BaseTool | CrewStructuredTool], +) -> list[CrewStructuredTool]: + return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools] def tool(*args): diff --git a/src/crewai_tools/tools/code_interpreter_tool/Dockerfile b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile index ae9b2ffd6..4df22ca58 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/Dockerfile +++ b/src/crewai_tools/tools/code_interpreter_tool/Dockerfile @@ -1,14 +1,6 @@ -FROM python:3.11-slim +FROM python:3.12-alpine -# Install common utilities -RUN apt-get update && apt-get install -y \ - build-essential \ - curl \ - wget \ - software-properties-common - -# Clean up -RUN apt-get clean && rm -rf /var/lib/apt/lists/* +RUN pip install requests beautifulsoup4 # Set the working directory WORKDIR /workspace From 15970734e3690c81198ea238543d6a08000dd242 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 27 Nov 2024 17:52:56 -0800 Subject: [PATCH 149/391] Jina Website Scraper v1 --- .../tools/jina_scrape_website_tool/README.md | 38 ++++++++++++++ .../jina_scrape_website_tool.py | 52 +++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 src/crewai_tools/tools/jina_scrape_website_tool/README.md create mode 100644 src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py diff --git a/src/crewai_tools/tools/jina_scrape_website_tool/README.md b/src/crewai_tools/tools/jina_scrape_website_tool/README.md new file mode 100644 index 000000000..0278e5aa0 --- /dev/null +++ b/src/crewai_tools/tools/jina_scrape_website_tool/README.md @@ -0,0 +1,38 @@ +# JinaScrapeWebsiteTool + +## Description +A tool designed to extract and read the content of a specified website by using Jina.ai reader. It is capable of handling various types of web pages by making HTTP requests and parsing the received HTML content. This tool can be particularly useful for web scraping tasks, data collection, or extracting specific information from websites. + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Example +```python +from crewai_tools import JinaScrapeWebsiteTool + +# To enable scraping any website it finds during its execution +tool = JinaScrapeWebsiteTool(api_key='YOUR_API_KEY') + +# Initialize the tool with the website URL, so the agent can only scrape the content of the specified website +tool = JinaScrapeWebsiteTool(website_url='https://www.example.com') + +# With custom headers +tool = JinaScrapeWebsiteTool( + website_url='https://www.example.com', + custom_headers={'X-Target-Selector': 'body, .class, #id'} +) +``` + +## Authentication +The tool uses Jina.ai's reader service. While it can work without an API key, Jina.ai may apply rate limiting or blocking to unauthenticated requests. For production use, it's recommended to provide an API key. + +## Arguments +- `website_url`: Mandatory website URL to read the file. This is the primary input for the tool, specifying which website's content should be scraped and read. +- `api_key`: Optional Jina.ai API key for authenticated access to the reader service. +- `custom_headers`: Optional dictionary of HTTP headers to use when making requests. + +## Note +This tool is an alternative to the standard `ScrapeWebsiteTool` that specifically uses Jina.ai's reader service for enhanced content extraction. Choose this tool when you need more sophisticated content parsing capabilities. \ No newline at end of file diff --git a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py new file mode 100644 index 000000000..7fec77938 --- /dev/null +++ b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py @@ -0,0 +1,52 @@ +import requests +from typing import Type, Optional +from crewai_tools import BaseTool +from pydantic import BaseModel, Field + + +class JinaScrapeWebsiteToolInput(BaseModel): + """Input schema for JinaScrapeWebsiteTool.""" + website_url: str = Field(..., description="Mandatory website url to read the file") + + +class JinaScrapeWebsiteTool(BaseTool): + name: str = "JinaScrapeWebsiteTool" + description: str = "A tool that can be used to read a website content using Jina.ai reader and return markdown content." + args_schema: Type[BaseModel] = JinaScrapeWebsiteToolInput + website_url: Optional[str] = None + api_key: Optional[str] = None + headers: dict = {} + + def __init__( + self, + website_url: Optional[str] = None, + api_key: Optional[str] = None, + custom_headers: Optional[dict] = None, + **kwargs + ): + super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + self.description = ( + f"A tool that can be used to read {website_url}'s content and return markdown content." + ) + self._generate_description() + + if custom_headers is not None: + self.headers = custom_headers + + if api_key is not None: + self.headers["Authorization"] = f"Bearer {api_key}" + + def _run(self, website_url: Optional[str] = None) -> str: + url = website_url or self.website_url + if not url: + raise ValueError("Website URL must be provided either during initialization or execution") + + response = requests.get( + f"https://r.jina.ai/{url}", + headers=self.headers, + timeout=15 + ) + response.raise_for_status() + return response.text From cbec6d5cd797f36088ab6f3c4f8f8957c79d68b1 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:15:18 +0800 Subject: [PATCH 150/391] docs: fix API key reference in MultiOnTool README --- src/crewai_tools/tools/multion_tool/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/multion_tool/README.md b/src/crewai_tools/tools/multion_tool/README.md index ea530037f..da92a0682 100644 --- a/src/crewai_tools/tools/multion_tool/README.md +++ b/src/crewai_tools/tools/multion_tool/README.md @@ -41,7 +41,7 @@ crew.kickoff() ## Arguments -- `api_key`: Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable. +- `api_key`: Specifies MultiOn API key. Default is the `MULTION_API_KEY` environment variable. - `local`: Use the local flag set as "true" to run the agent locally on your browser. Make sure the multion browser extension is installed and API Enabled is checked. - `max_steps`: Optional. Set the max_steps the multion agent can take for a command @@ -51,4 +51,3 @@ To effectively use the `MultiOnTool`, follow these steps: 1. **Install CrewAI**: Confirm that the `crewai[tools]` package is installed in your Python environment. 2. **Install and use MultiOn**: Follow MultiOn documentation for installing the MultiOn Browser Extension (https://docs.multion.ai/learn/browser-extension). 3. **Enable API Usage**: Click on the MultiOn extension in the extensions folder of your browser (not the hovering MultiOn icon on the web page) to open the extension configurations. Click the API Enabled toggle to enable the API - From dd18c59a9bbb2d73842f041ee7c0717f76658d3d Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 27 Nov 2024 19:57:52 -0800 Subject: [PATCH 151/391] Update jina_scrape_website_tool.py --- .../tools/jina_scrape_website_tool/jina_scrape_website_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py index 7fec77938..d887c085d 100644 --- a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py +++ b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py @@ -1,6 +1,6 @@ import requests from typing import Type, Optional -from crewai_tools import BaseTool +from ..base_tool import BaseTool from pydantic import BaseModel, Field From a94470772fbfad0e538858b8d245c89cfab1dde8 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Thu, 28 Nov 2024 19:09:44 -0500 Subject: [PATCH 152/391] Fix pydantic related errors on FirecrawlScrapeWebsiteTool --- .../firecrawl_scrape_website_tool.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index ee8e592ca..89478976d 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict from crewai_tools.tools.base_tool import BaseTool @@ -24,6 +24,7 @@ class FirecrawlScrapeWebsiteToolSchema(BaseModel): class FirecrawlScrapeWebsiteTool(BaseTool): + model_config = ConfigDict(arbitrary_types_allowed=True) name: str = "Firecrawl web scrape tool" description: str = "Scrape webpages url using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema @@ -61,3 +62,11 @@ class FirecrawlScrapeWebsiteTool(BaseTool): "timeout": timeout, } return self.firecrawl.scrape_url(url, options) + +try: + from firecrawl import FirecrawlApp + FirecrawlScrapeWebsiteTool.model_rebuild() +except ImportError: + raise ImportError( + "`firecrawl` package not found, please run `pip install firecrawl-py`" + ) \ No newline at end of file From 49ad43ff083c924e3ed510b84999efd3ba0144c8 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 29 Nov 2024 05:51:55 -0500 Subject: [PATCH 153/391] Do not reraise exception Tool use is optional and missing dependency should not raise error --- .../firecrawl_scrape_website_tool.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 89478976d..cb00a46c6 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -65,8 +65,10 @@ class FirecrawlScrapeWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp + # Must rebuild model after class is defined FirecrawlScrapeWebsiteTool.model_rebuild() except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) \ No newline at end of file + """ + When this tool is not used, then exception can be ignored. + """ + pass \ No newline at end of file From e677a271e53afa265a55f5ad77f85b4cceba9ee4 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 29 Nov 2024 05:57:09 -0500 Subject: [PATCH 154/391] More explicit model config --- .../firecrawl_scrape_website_tool.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index cb00a46c6..84b61209b 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -24,7 +24,11 @@ class FirecrawlScrapeWebsiteToolSchema(BaseModel): class FirecrawlScrapeWebsiteTool(BaseTool): - model_config = ConfigDict(arbitrary_types_allowed=True) + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + frozen=False + ) name: str = "Firecrawl web scrape tool" description: str = "Scrape webpages url using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema @@ -63,6 +67,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): } return self.firecrawl.scrape_url(url, options) + try: from firecrawl import FirecrawlApp # Must rebuild model after class is defined @@ -71,4 +76,4 @@ except ImportError: """ When this tool is not used, then exception can be ignored. """ - pass \ No newline at end of file + pass From ec9951e28a8ce5eb66fcabf33b93cc84779b9a2d Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Sat, 30 Nov 2024 00:34:04 +0800 Subject: [PATCH 155/391] docs: improve CodeDocsSearchTool README - Fix tool name in custom model example --- src/crewai_tools/tools/code_docs_search_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_docs_search_tool/README.md b/src/crewai_tools/tools/code_docs_search_tool/README.md index 879461427..f90398a11 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/README.md +++ b/src/crewai_tools/tools/code_docs_search_tool/README.md @@ -32,7 +32,7 @@ Note: Substitute 'https://docs.example.com/reference' with your target documenta By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows: ```python -tool = YoutubeVideoSearchTool( +tool = CodeDocsSearchTool( config=dict( llm=dict( provider="ollama", # or google, openai, anthropic, llama2, ... From eed6a38ea4060e44d55a072cef4a47dfcb0ff266 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 29 Nov 2024 11:41:47 -0500 Subject: [PATCH 156/391] Fix pydantic related errors. --- .../firecrawl_crawl_website_tool.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index a535b6c63..672656fc9 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict from crewai_tools.tools.base_tool import BaseTool @@ -20,6 +20,11 @@ class FirecrawlCrawlWebsiteToolSchema(BaseModel): class FirecrawlCrawlWebsiteTool(BaseTool): + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + frozen=False + ) name: str = "Firecrawl web crawl tool" description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema @@ -50,3 +55,14 @@ class FirecrawlCrawlWebsiteTool(BaseTool): options = {"crawlerOptions": crawler_options, "pageOptions": page_options} return self.firecrawl.crawl_url(url, options) + + +try: + from firecrawl import FirecrawlApp + # Must rebuild model after class is defined + FirecrawlCrawlWebsiteTool.model_rebuild() +except ImportError: + """ + When this tool is not used, then exception can be ignored. + """ + pass \ No newline at end of file From 945ed7aaaa3962e5412e8b5476a476b39c0c36d9 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Sat, 30 Nov 2024 10:52:57 +0800 Subject: [PATCH 157/391] docs: fix typos and formatting in NL2SQL tool README --- src/crewai_tools/tools/nl2sql/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/nl2sql/README.md b/src/crewai_tools/tools/nl2sql/README.md index d0bb82271..932867c90 100644 --- a/src/crewai_tools/tools/nl2sql/README.md +++ b/src/crewai_tools/tools/nl2sql/README.md @@ -2,9 +2,9 @@ ## Description -This tool is used to convert natural language to SQL queries. When passsed to the agent it will generate queries and then use them to interact with the database. +This tool is used to convert natural language to SQL queries. When passed to the agent it will generate queries and then use them to interact with the database. -This enables multiple workflows like having an Agent to access the database fetch information based on the goal and then use the information to generate a response, report or any other output. Along with that proivdes the ability for the Agent to update the database based on its goal. +This enables multiple workflows like having an Agent to access the database fetch information based on the goal and then use the information to generate a response, report or any other output. Along with that provides the ability for the Agent to update the database based on its goal. **Attention**: Make sure that the Agent has access to a Read-Replica or that is okay for the Agent to run insert/update queries on the database. @@ -23,7 +23,6 @@ pip install 'crewai[tools]' In order to use the NL2SQLTool, you need to pass the database URI to the tool. The URI should be in the format `dialect+driver://username:password@host:port/database`. - ```python from crewai_tools import NL2SQLTool @@ -43,7 +42,7 @@ def researcher(self) -> Agent: The primary task goal was: -"Retrieve the average, maximum, and minimum monthly revenue for each city, but only include cities that have more than one user. Also, count the number of user in each city and sort the results by the average monthly revenue in descending order" +"Retrieve the average, maximum, and minimum monthly revenue for each city, but only include cities that have more than one user. Also, count the number of users in each city and sort the results by the average monthly revenue in descending order" So the Agent tried to get information from the DB, the first one is wrong so the Agent tries again and gets the correct information and passes to the next agent. From 6c242ef3bbfe722d4159e60c931d20e2a38a0570 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Sat, 30 Nov 2024 14:04:06 -0600 Subject: [PATCH 158/391] add brave search tool --- .../tools/brave_search_tool/README.md | 30 +++++++ .../brave_search_tool/brave_search_tool.py | 82 +++++++++++++++++++ tests/tools/brave_search_tool_test.py | 13 +++ 3 files changed, 125 insertions(+) create mode 100644 src/crewai_tools/tools/brave_search_tool/README.md create mode 100644 src/crewai_tools/tools/brave_search_tool/brave_search_tool.py create mode 100644 tests/tools/brave_search_tool_test.py diff --git a/src/crewai_tools/tools/brave_search_tool/README.md b/src/crewai_tools/tools/brave_search_tool/README.md new file mode 100644 index 000000000..a66210491 --- /dev/null +++ b/src/crewai_tools/tools/brave_search_tool/README.md @@ -0,0 +1,30 @@ +# BraveSearchTool Documentation + +## Description +This tool is designed to perform a web search for a specified query from a text's content across the internet. It utilizes the Brave Web Search API, which is a REST API to query Brave Search and get back search results from the web. The following sections describe how to curate requests, including parameters and headers, to Brave Web Search API and get a JSON response back. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: +```shell +pip install 'crewai[tools]' +``` + +## Example +The following example demonstrates how to initialize the tool and execute a search with a given query: + +```python +from crewai_tools import BraveSearchTool + +# Initialize the tool for internet searching capabilities +tool = BraveSearchTool() +``` + +## Steps to Get Started +To effectively use the `BraveSearchTool`, follow these steps: + +1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. +2. **API Key Acquisition**: Acquire a API key [here](https://api.search.brave.com/app/keys). +3. **Environment Configuration**: Store your obtained API key in an environment variable named `BRAVE_API_KEY` to facilitate its use by the tool. + +## Conclusion +By integrating the `BraveSearchTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py new file mode 100644 index 000000000..54f546f1e --- /dev/null +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -0,0 +1,82 @@ +import datetime +import os +from typing import Any, Optional, Type + +import requests +from pydantic import BaseModel, Field + +from crewai_tools.tools.base_tool import BaseTool + + +def _save_results_to_file(content: str) -> None: + """Saves the search results to a file.""" + filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, "w") as file: + file.write(content) + print(f"Results saved to {filename}") + + +class BraveSearchToolSchema(BaseModel): + """Input for BraveSearchTool.""" + + search_query: str = Field( + ..., description="Mandatory search query you want to use to search the internet" + ) + + +class BraveSearchTool(BaseTool): + name: str = "Search the internet" + description: str = ( + "A tool that can be used to search the internet with a search_query." + ) + args_schema: Type[BaseModel] = BraveSearchToolSchema + search_url: str = "https://api.search.brave.com/res/v1/web/search" + country: Optional[str] = "" + n_results: int = 10 + save_file: bool = False + + def _run( + self, + **kwargs: Any, + ) -> Any: + search_query = kwargs.get("search_query") or kwargs.get("query") + save_file = kwargs.get("save_file", self.save_file) + n_results = kwargs.get("n_results", self.n_results) + + payload = {"q": search_query, "count": n_results} + + if self.country != "": + payload["country"] = self.country + + headers = { + "X-Subscription-Token": os.environ["BRAVE_API_KEY"], + "Accept": "application/json", + } + + response = requests.get(self.search_url, headers=headers, params=payload) + results = response.json() + + if "web" in results: + results = results["web"]["results"] + string = [] + for result in results: + try: + string.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Link: {result['url']}", + f"Snippet: {result['description']}", + "---", + ] + ) + ) + except KeyError: + continue + + content = "\n".join(string) + if save_file: + _save_results_to_file(content) + return f"\nSearch results: {content}\n" + else: + return results diff --git a/tests/tools/brave_search_tool_test.py b/tests/tools/brave_search_tool_test.py new file mode 100644 index 000000000..16c1bcb92 --- /dev/null +++ b/tests/tools/brave_search_tool_test.py @@ -0,0 +1,13 @@ +from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool + + +def test_brave_tool(): + tool = BraveSearchTool( + n_results=2, + ) + + print(tool.run(search_query="ChatGPT")) + + +if __name__ == "__main__": + test_brave_tool() From d168b8e24554e37a706d0af18c4b82af483fd442 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Sat, 30 Nov 2024 21:36:28 -0600 Subject: [PATCH 159/391] add error handling --- .../tools/brave_search_tool/__init__.py | 0 .../brave_search_tool/brave_search_tool.py | 90 ++++++++++++------- tests/tools/brave_search_tool_test.py | 37 ++++++++ 3 files changed, 96 insertions(+), 31 deletions(-) create mode 100644 src/crewai_tools/tools/brave_search_tool/__init__.py diff --git a/src/crewai_tools/tools/brave_search_tool/__init__.py b/src/crewai_tools/tools/brave_search_tool/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 54f546f1e..6a8818d75 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -25,6 +25,18 @@ class BraveSearchToolSchema(BaseModel): class BraveSearchTool(BaseTool): + """ + BraveSearchTool - A tool for performing web searches using the Brave Search API. + + This module provides functionality to search the internet using Brave's Search API, + supporting customizable result counts and country-specific searches. + + Dependencies: + - requests + - pydantic + - python-dotenv (for API key management) + """ + name: str = "Search the internet" description: str = ( "A tool that can be used to search the internet with a search_query." @@ -35,48 +47,64 @@ class BraveSearchTool(BaseTool): n_results: int = 10 save_file: bool = False + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if "BRAVE_API_KEY" not in os.environ: + raise ValueError( + "BRAVE_API_KEY environment variable is required for BraveSearchTool" + ) + def _run( self, **kwargs: Any, ) -> Any: - search_query = kwargs.get("search_query") or kwargs.get("query") - save_file = kwargs.get("save_file", self.save_file) - n_results = kwargs.get("n_results", self.n_results) + try: + search_query = kwargs.get("search_query") or kwargs.get("query") + if not search_query: + raise ValueError("Search query is required") - payload = {"q": search_query, "count": n_results} + save_file = kwargs.get("save_file", self.save_file) + n_results = kwargs.get("n_results", self.n_results) - if self.country != "": - payload["country"] = self.country + payload = {"q": search_query, "count": n_results} - headers = { - "X-Subscription-Token": os.environ["BRAVE_API_KEY"], - "Accept": "application/json", - } + if self.country != "": + payload["country"] = self.country - response = requests.get(self.search_url, headers=headers, params=payload) - results = response.json() + headers = { + "X-Subscription-Token": os.environ["BRAVE_API_KEY"], + "Accept": "application/json", + } - if "web" in results: - results = results["web"]["results"] - string = [] - for result in results: - try: - string.append( - "\n".join( - [ - f"Title: {result['title']}", - f"Link: {result['url']}", - f"Snippet: {result['description']}", - "---", - ] + response = requests.get(self.search_url, headers=headers, params=payload) + response.raise_for_status() # Handle non-200 responses + results = response.json() + + if "web" in results: + results = results["web"]["results"] + string = [] + for result in results: + try: + string.append( + "\n".join( + [ + f"Title: {result['title']}", + f"Link: {result['url']}", + f"Snippet: {result['description']}", + "---", + ] + ) ) - ) - except KeyError: - continue + except KeyError: + continue content = "\n".join(string) - if save_file: - _save_results_to_file(content) + except requests.RequestException as e: + return f"Error performing search: {str(e)}" + except KeyError as e: + return f"Error parsing search results: {str(e)}" + if save_file: + _save_results_to_file(content) return f"\nSearch results: {content}\n" else: - return results + return content diff --git a/tests/tools/brave_search_tool_test.py b/tests/tools/brave_search_tool_test.py index 16c1bcb92..969bd48fe 100644 --- a/tests/tools/brave_search_tool_test.py +++ b/tests/tools/brave_search_tool_test.py @@ -1,6 +1,41 @@ +from unittest.mock import patch + +import pytest + from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool +@pytest.fixture +def brave_tool(): + return BraveSearchTool(n_results=2) + + +def test_brave_tool_initialization(): + tool = BraveSearchTool() + assert tool.n_results == 10 + assert tool.save_file is False + + +@patch("requests.get") +def test_brave_tool_search(mock_get, brave_tool): + mock_response = { + "web": { + "results": [ + { + "title": "Test Title", + "url": "http://test.com", + "description": "Test Description", + } + ] + } + } + mock_get.return_value.json.return_value = mock_response + + result = brave_tool.run(search_query="test") + assert "Test Title" in result + assert "http://test.com" in result + + def test_brave_tool(): tool = BraveSearchTool( n_results=2, @@ -11,3 +46,5 @@ def test_brave_tool(): if __name__ == "__main__": test_brave_tool() + test_brave_tool_initialization() + # test_brave_tool_search(brave_tool) From 5532ea8ff72993860b85326d7299351a0b23c3b5 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Sat, 30 Nov 2024 21:51:46 -0600 Subject: [PATCH 160/391] add lru caching --- src/crewai_tools/tools/brave_search_tool/brave_search_tool.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 6a8818d75..5ff451484 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -1,5 +1,6 @@ import datetime import os +from functools import lru_cache from typing import Any, Optional, Type import requests @@ -54,6 +55,7 @@ class BraveSearchTool(BaseTool): "BRAVE_API_KEY environment variable is required for BraveSearchTool" ) + @lru_cache(maxsize=100) def _run( self, **kwargs: Any, From e7e059d02a4fa09f2b13873643a4ce38c4c45dc2 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Sat, 30 Nov 2024 22:08:29 -0600 Subject: [PATCH 161/391] add rate limiting --- .../tools/brave_search_tool/brave_search_tool.py | 13 ++++++++++--- tests/tools/brave_search_tool_test.py | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 5ff451484..8d6a9a182 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -1,7 +1,7 @@ import datetime import os -from functools import lru_cache -from typing import Any, Optional, Type +import time +from typing import Any, ClassVar, Optional, Type import requests from pydantic import BaseModel, Field @@ -47,6 +47,8 @@ class BraveSearchTool(BaseTool): country: Optional[str] = "" n_results: int = 10 save_file: bool = False + _last_request_time: ClassVar[float] = 0 + _min_request_interval: ClassVar[float] = 1.0 # seconds def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -55,11 +57,16 @@ class BraveSearchTool(BaseTool): "BRAVE_API_KEY environment variable is required for BraveSearchTool" ) - @lru_cache(maxsize=100) def _run( self, **kwargs: Any, ) -> Any: + current_time = time.time() + if (current_time - self._last_request_time) < self._min_request_interval: + time.sleep( + self._min_request_interval - (current_time - self._last_request_time) + ) + BraveSearchTool._last_request_time = time.time() try: search_query = kwargs.get("search_query") or kwargs.get("query") if not search_query: diff --git a/tests/tools/brave_search_tool_test.py b/tests/tools/brave_search_tool_test.py index 969bd48fe..36300f723 100644 --- a/tests/tools/brave_search_tool_test.py +++ b/tests/tools/brave_search_tool_test.py @@ -40,8 +40,8 @@ def test_brave_tool(): tool = BraveSearchTool( n_results=2, ) - - print(tool.run(search_query="ChatGPT")) + x = tool.run(search_query="ChatGPT") + print(x) if __name__ == "__main__": From 95cc6835a130a35fab52bae5c9e41e7073fc0ef0 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Sat, 30 Nov 2024 22:30:31 -0600 Subject: [PATCH 162/391] update name --- src/crewai_tools/tools/brave_search_tool/brave_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 8d6a9a182..dceff1d57 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -38,7 +38,7 @@ class BraveSearchTool(BaseTool): - python-dotenv (for API key management) """ - name: str = "Search the internet" + name: str = "Brave Web Search the internet" description: str = ( "A tool that can be used to search the internet with a search_query." ) From e0d3ee5b23b1999e4a8b01904e61fc19c85a41c0 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Tue, 3 Dec 2024 20:35:23 +0800 Subject: [PATCH 163/391] docs: add Discourse community link to contact section Add link to Discourse community platform in the contact section to provide users with an additional support channel alongside Discord. --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 491890877..aca20f640 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ In the realm of CrewAI agents, tools are pivotal for enhancing functionality. Th

-[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb) +[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb) | [Discourse](https://community.crewai.com/)

@@ -140,6 +140,4 @@ Thank you for your interest in enhancing the capabilities of AI agents through a ## Contact -For questions or support, please join our [Discord community](https://discord.com/invite/X4JWnZnxPb) or open an issue in this repository. - - +For questions or support, please join our [Discord community](https://discord.com/invite/X4JWnZnxPb), [Discourse](https://community.crewai.com/) or open an issue in this repository. From a64cccbd724a9c24fc825a16de0ec1de2fc39ed9 Mon Sep 17 00:00:00 2001 From: siddas27 Date: Wed, 4 Dec 2024 22:28:30 -0600 Subject: [PATCH 164/391] add BraveSearchTool to init --- src/crewai_tools/__init__.py | 3 ++- src/crewai_tools/tools/__init__.py | 13 +++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 6bd8dfd71..5f9a81d9c 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,4 +1,5 @@ from .tools import ( + BraveSearchTool, BrowserbaseLoadTool, CodeDocsSearchTool, CodeInterpreterTool, @@ -19,6 +20,7 @@ from .tools import ( LlamaIndexTool, MDXSearchTool, MultiOnTool, + MySQLSearchTool, NL2SQLTool, PDFSearchTool, PGSearchTool, @@ -40,6 +42,5 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, - MySQLSearchTool ) from .tools.base_tool import BaseTool, Tool, tool diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 9016c57fd..73a96f4cf 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,3 +1,4 @@ +from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool @@ -11,10 +12,10 @@ from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .file_writer_tool.file_writer_tool import FileWriterTool from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import ( - FirecrawlCrawlWebsiteTool + FirecrawlCrawlWebsiteTool, ) from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( - FirecrawlScrapeWebsiteTool + FirecrawlScrapeWebsiteTool, ) from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool @@ -22,16 +23,17 @@ from .json_search_tool.json_search_tool import JSONSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool +from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( - ScrapeElementFromWebsiteTool + ScrapeElementFromWebsiteTool, ) from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( - ScrapflyScrapeWebsiteTool + ScrapflyScrapeWebsiteTool, ) from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool @@ -46,7 +48,6 @@ from .vision_tool.vision_tool import VisionTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import ( - YoutubeChannelSearchTool + YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .mysql_search_tool.mysql_search_tool import MySQLSearchTool From d5fb31e645ccf0ebd7299442c6b099d6ca116e2c Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Thu, 5 Dec 2024 13:16:48 -0500 Subject: [PATCH 165/391] update basetool dependencies to use root crewai repo --- README.md | 4 +- src/crewai_tools/__init__.py | 1 - src/crewai_tools/tools/base_tool.py | 59 ------------------- .../brave_search_tool/brave_search_tool.py | 3 +- .../browserbase_load_tool.py | 3 +- .../code_interpreter_tool.py | 3 +- .../tools/composio_tool/composio_tool.py | 3 +- .../tools/dalle_tool/dalle_tool.py | 3 +- .../directory_read_tool.py | 3 +- .../tools/exa_tools/exa_base_tool.py | 4 +- .../tools/exa_tools/exa_search_tool.py | 40 +++++++------ .../tools/file_read_tool/file_read_tool.py | 3 +- .../file_writer_tool/file_writer_tool.py | 16 ++--- .../firecrawl_crawl_website_tool.py | 12 ++-- .../firecrawl_scrape_website_tool.py | 10 ++-- .../firecrawl_search_tool.py | 3 +- .../jina_scrape_website_tool.py | 26 ++++---- .../tools/llamaindex_tool/llamaindex_tool.py | 3 +- .../tools/multion_tool/multion_tool.py | 2 +- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 6 +- src/crewai_tools/tools/rag/rag_tool.py | 3 +- .../scrape_element_from_website.py | 3 +- .../scrape_website_tool.py | 7 +-- .../scrapfly_scrape_website_tool.py | 3 +- .../selenium_scraping_tool.py | 4 +- .../tools/serper_dev_tool/serper_dev_tool.py | 3 +- .../serply_news_search_tool.py | 3 +- .../serply_scholar_search_tool.py | 3 +- .../serply_api_tool/serply_web_search_tool.py | 3 +- .../tools/spider_tool/spider_tool.py | 3 +- .../tools/vision_tool/vision_tool.py | 3 +- 31 files changed, 82 insertions(+), 163 deletions(-) delete mode 100644 src/crewai_tools/tools/base_tool.py diff --git a/README.md b/README.md index aca20f640..43cdc9b57 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ There are three ways to create tools for crewAI agents: ### Subclassing `BaseTool` ```python -from crewai_tools import BaseTool +from crewai.tools import BaseTool class MyCustomTool(BaseTool): name: str = "Name of my tool" @@ -70,7 +70,7 @@ Define a new class inheriting from `BaseTool`, specifying `name`, `description`, For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic. ```python -from crewai_tools import tool +from crewai.tools import BaseTool @tool("Name of my tool") def my_tool(question: str) -> str: """Clear description for what this tool is useful for, you agent will need this information to use it.""" diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 5f9a81d9c..3fad09d9f 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -43,4 +43,3 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, ) -from .tools.base_tool import BaseTool, Tool, tool diff --git a/src/crewai_tools/tools/base_tool.py b/src/crewai_tools/tools/base_tool.py deleted file mode 100644 index 674e33030..000000000 --- a/src/crewai_tools/tools/base_tool.py +++ /dev/null @@ -1,59 +0,0 @@ -from typing import Any, Callable - -from pydantic import BaseModel as PydanticBaseModel - -from crewai.tools.base_tool import BaseTool -from crewai.tools.structured_tool import CrewStructuredTool - - -class Tool(BaseTool): - func: Callable - """The function that will be executed when the tool is called.""" - - def _run(self, *args: Any, **kwargs: Any) -> Any: - return self.func(*args, **kwargs) - - -def to_langchain( - tools: list[BaseTool | CrewStructuredTool], -) -> list[CrewStructuredTool]: - return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools] - - -def tool(*args): - """ - Decorator to create a tool from a function. - """ - - def _make_with_name(tool_name: str) -> Callable: - def _make_tool(f: Callable) -> BaseTool: - if f.__doc__ is None: - raise ValueError("Function must have a docstring") - if f.__annotations__ is None: - raise ValueError("Function must have type annotations") - - class_name = "".join(tool_name.split()).title() - args_schema = type( - class_name, - (PydanticBaseModel,), - { - "__annotations__": { - k: v for k, v in f.__annotations__.items() if k != "return" - }, - }, - ) - - return Tool( - name=tool_name, - description=f.__doc__, - func=f, - args_schema=args_schema, - ) - - return _make_tool - - if len(args) == 1 and callable(args[0]): - return _make_with_name(args[0].__name__)(args[0]) - if len(args) == 1 and isinstance(args[0], str): - return _make_with_name(args[0]) - raise ValueError("Invalid arguments") diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index dceff1d57..11035739d 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -4,10 +4,9 @@ import time from typing import Any, ClassVar, Optional, Type import requests +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - def _save_results_to_file(content: str) -> None: """Saves the search results to a file.""" diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 514664557..54c33db3c 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,9 +1,8 @@ from typing import Any, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class BrowserbaseLoadToolSchema(BaseModel): url: str = Field(description="Website URL") diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index a4488b35f..61c180fe3 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -3,10 +3,9 @@ import os from typing import List, Optional, Type import docker +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class CodeInterpreterSchema(BaseModel): """Input for CodeInterpreterTool.""" diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py index 62068c0bd..4823441bf 100644 --- a/src/crewai_tools/tools/composio_tool/composio_tool.py +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -5,8 +5,7 @@ Composio tools wrapper. import typing as t import typing_extensions as te - -from crewai_tools.tools.base_tool import BaseTool +from crewai.tools import BaseTool class ComposioTool(BaseTool): diff --git a/src/crewai_tools/tools/dalle_tool/dalle_tool.py b/src/crewai_tools/tools/dalle_tool/dalle_tool.py index da6adb2b1..7040de11a 100644 --- a/src/crewai_tools/tools/dalle_tool/dalle_tool.py +++ b/src/crewai_tools/tools/dalle_tool/dalle_tool.py @@ -1,11 +1,10 @@ import json from typing import Type +from crewai.tools import BaseTool from openai import OpenAI from pydantic import BaseModel -from crewai_tools.tools.base_tool import BaseTool - class ImagePromptSchema(BaseModel): """Input for Dall-E Tool.""" diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index 3d308ba45..6033202be 100644 --- a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -1,10 +1,9 @@ import os from typing import Any, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from ..base_tool import BaseTool - class FixedDirectoryReadToolSchema(BaseModel): """Input for DirectoryReadTool.""" diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py index d2fe6217c..295b283ad 100644 --- a/src/crewai_tools/tools/exa_tools/exa_base_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_base_tool.py @@ -1,10 +1,8 @@ -import os from typing import Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class EXABaseToolToolSchema(BaseModel): """Input for EXABaseTool.""" diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 30f77d1ee..6724c2417 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,28 +1,30 @@ import os -import requests from typing import Any +import requests + from .exa_base_tool import EXABaseTool + class EXASearchTool(EXABaseTool): - def _run( - self, - **kwargs: Any, - ) -> Any: - search_query = kwargs.get('search_query') - if search_query is None: - search_query = kwargs.get('query') + def _run( + self, + **kwargs: Any, + ) -> Any: + search_query = kwargs.get("search_query") + if search_query is None: + search_query = kwargs.get("query") - payload = { - "query": search_query, - "type": "magic", - } + payload = { + "query": search_query, + "type": "magic", + } - headers = self.headers.copy() - headers["x-api-key"] = os.environ['EXA_API_KEY'] + headers = self.headers.copy() + headers["x-api-key"] = os.environ["EXA_API_KEY"] - response = requests.post(self.search_url, json=payload, headers=headers) - results = response.json() - if 'results' in results: - results = super()._parse_results(results['results']) - return results + response = requests.post(self.search_url, json=payload, headers=headers) + results = response.json() + if "results" in results: + results = super()._parse_results(results["results"]) + return results diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 265dca54a..fe34c9d8b 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -1,9 +1,8 @@ from typing import Any, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from ..base_tool import BaseTool - class FixedFileReadToolSchema(BaseModel): """Input for FileReadTool.""" diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index a008e4a75..ed454a1bd 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,16 +1,18 @@ import os -from typing import Any, Optional, Type -from pydantic import BaseModel -from ..base_tool import BaseTool from distutils.util import strtobool +from typing import Any, Optional, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel class FileWriterToolInput(BaseModel): - filename: str + filename: str directory: Optional[str] = "./" overwrite: str = "False" content: str - + + class FileWriterTool(BaseTool): name: str = "File Writer Tool" description: str = ( @@ -26,7 +28,7 @@ class FileWriterTool(BaseTool): # Construct the full path filepath = os.path.join(kwargs.get("directory") or "", kwargs["filename"]) - + # Convert overwrite to boolean kwargs["overwrite"] = bool(strtobool(kwargs["overwrite"])) @@ -46,4 +48,4 @@ class FileWriterTool(BaseTool): except KeyError as e: return f"An error occurred while accessing key: {str(e)}" except Exception as e: - return f"An error occurred while writing to the file: {str(e)}" \ No newline at end of file + return f"An error occurred while writing to the file: {str(e)}" diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 672656fc9..c23ff2100 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,8 +1,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from pydantic import BaseModel, Field, ConfigDict - -from crewai_tools.tools.base_tool import BaseTool +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field # Type checking import if TYPE_CHECKING: @@ -21,9 +20,7 @@ class FirecrawlCrawlWebsiteToolSchema(BaseModel): class FirecrawlCrawlWebsiteTool(BaseTool): model_config = ConfigDict( - arbitrary_types_allowed=True, - validate_assignment=True, - frozen=False + arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) name: str = "Firecrawl web crawl tool" description: str = "Crawl webpages using Firecrawl and return the contents" @@ -59,10 +56,11 @@ class FirecrawlCrawlWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp + # Must rebuild model after class is defined FirecrawlCrawlWebsiteTool.model_rebuild() except ImportError: """ When this tool is not used, then exception can be ignored. """ - pass \ No newline at end of file + pass diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 84b61209b..9ab7d293e 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,8 +1,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from pydantic import BaseModel, Field, ConfigDict - -from crewai_tools.tools.base_tool import BaseTool +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field # Type checking import if TYPE_CHECKING: @@ -25,9 +24,7 @@ class FirecrawlScrapeWebsiteToolSchema(BaseModel): class FirecrawlScrapeWebsiteTool(BaseTool): model_config = ConfigDict( - arbitrary_types_allowed=True, - validate_assignment=True, - frozen=False + arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) name: str = "Firecrawl web scrape tool" description: str = "Scrape webpages url using Firecrawl and return the contents" @@ -70,6 +67,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp + # Must rebuild model after class is defined FirecrawlScrapeWebsiteTool.model_rebuild() except ImportError: diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 13c3b82ee..5efd274de 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,9 +1,8 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - # Type checking import if TYPE_CHECKING: from firecrawl import FirecrawlApp diff --git a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py index d887c085d..a10a4ffdb 100644 --- a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py +++ b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py @@ -1,17 +1,21 @@ +from typing import Optional, Type + import requests -from typing import Type, Optional -from ..base_tool import BaseTool +from crewai.tools import BaseTool from pydantic import BaseModel, Field class JinaScrapeWebsiteToolInput(BaseModel): """Input schema for JinaScrapeWebsiteTool.""" + website_url: str = Field(..., description="Mandatory website url to read the file") class JinaScrapeWebsiteTool(BaseTool): name: str = "JinaScrapeWebsiteTool" - description: str = "A tool that can be used to read a website content using Jina.ai reader and return markdown content." + description: str = ( + "A tool that can be used to read a website content using Jina.ai reader and return markdown content." + ) args_schema: Type[BaseModel] = JinaScrapeWebsiteToolInput website_url: Optional[str] = None api_key: Optional[str] = None @@ -22,31 +26,29 @@ class JinaScrapeWebsiteTool(BaseTool): website_url: Optional[str] = None, api_key: Optional[str] = None, custom_headers: Optional[dict] = None, - **kwargs + **kwargs, ): super().__init__(**kwargs) if website_url is not None: self.website_url = website_url - self.description = ( - f"A tool that can be used to read {website_url}'s content and return markdown content." - ) + self.description = f"A tool that can be used to read {website_url}'s content and return markdown content." self._generate_description() if custom_headers is not None: self.headers = custom_headers - + if api_key is not None: self.headers["Authorization"] = f"Bearer {api_key}" def _run(self, website_url: Optional[str] = None) -> str: url = website_url or self.website_url if not url: - raise ValueError("Website URL must be provided either during initialization or execution") + raise ValueError( + "Website URL must be provided either during initialization or execution" + ) response = requests.get( - f"https://r.jina.ai/{url}", - headers=self.headers, - timeout=15 + f"https://r.jina.ai/{url}", headers=self.headers, timeout=15 ) response.raise_for_status() return response.text diff --git a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py index af5c93e1f..61a747956 100644 --- a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py +++ b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py @@ -1,9 +1,8 @@ from typing import Any, Optional, Type, cast +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class LlamaIndexTool(BaseTool): """Tool to wrap LlamaIndex tools/query engines.""" diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index 2dc944f23..a991074da 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -2,7 +2,7 @@ from typing import Any, Optional -from crewai_tools.tools.base_tool import BaseTool +from crewai.tools import BaseTool class MultiOnTool(BaseTool): diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index 22c3a299b..786550ee7 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -1,11 +1,10 @@ -from typing import Any, Union +from typing import Any, Type, Union -from ..base_tool import BaseTool +from crewai.tools import BaseTool from pydantic import BaseModel, Field from sqlalchemy import create_engine, text from sqlalchemy.orm import sessionmaker -from typing import Type, Any class NL2SQLToolInput(BaseModel): sql_query: str = Field( @@ -13,6 +12,7 @@ class NL2SQLToolInput(BaseModel): description="The SQL query to execute.", ) + class NL2SQLTool(BaseTool): name: str = "NL2SQLTool" description: str = "Converts natural language to SQL queries and executes them." diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 97291cd81..a9bbdab53 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,10 +1,9 @@ from abc import ABC, abstractmethod from typing import Any +from crewai.tools import BaseTool from pydantic import BaseModel, Field, model_validator -from crewai_tools.tools.base_tool import BaseTool - class Adapter(BaseModel, ABC): class Config: diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index 56bb27195..14757d247 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -3,10 +3,9 @@ from typing import Any, Optional, Type import requests from bs4 import BeautifulSoup +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from ..base_tool import BaseTool - class FixedScrapeElementFromWebsiteToolSchema(BaseModel): """Input for ScrapeElementFromWebsiteTool.""" diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 99df1d2dd..8cfc5d136 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -4,10 +4,9 @@ from typing import Any, Optional, Type import requests from bs4 import BeautifulSoup +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from ..base_tool import BaseTool - class FixedScrapeWebsiteToolSchema(BaseModel): """Input for ScrapeWebsiteTool.""" @@ -69,6 +68,6 @@ class ScrapeWebsiteTool(BaseTool): parsed = BeautifulSoup(page.text, "html.parser") text = parsed.get_text(" ") - text = re.sub('[ \t]+', ' ', text) - text = re.sub('\\s+\n\\s+', '\n', text) + text = re.sub("[ \t]+", " ", text) + text = re.sub("\\s+\n\\s+", "\n", text) return text diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index 5800e223c..b47ce8e5b 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -1,10 +1,9 @@ import logging from typing import Any, Dict, Literal, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - logger = logging.getLogger(__file__) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 970cde7ca..47910f35b 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -1,14 +1,12 @@ import time from typing import Any, Optional, Type -from bs4 import BeautifulSoup +from crewai.tools import BaseTool from pydantic import BaseModel, Field from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By -from ..base_tool import BaseTool - class FixedSeleniumScrapingToolSchema(BaseModel): """Input for SeleniumScrapingTool.""" diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index ca118326e..8f53ce0a4 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -4,10 +4,9 @@ import os from typing import Any, Optional, Type import requests +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - def _save_results_to_file(content: str) -> None: """Saves the search results to a file.""" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py index 21e6e9872..c058091a2 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -3,10 +3,9 @@ from typing import Any, Optional, Type from urllib.parse import urlencode import requests +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class SerplyNewsSearchToolSchema(BaseModel): """Input for Serply News Search.""" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index 1ac6337f6..3ed9de4ab 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -3,10 +3,9 @@ from typing import Any, Optional, Type from urllib.parse import urlencode import requests +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class SerplyScholarSearchToolSchema(BaseModel): """Input for Serply Scholar Search.""" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py index b65fa21d1..b4d1ae4b5 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py @@ -3,10 +3,9 @@ from typing import Any, Optional, Type from urllib.parse import urlencode import requests +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class SerplyWebSearchToolSchema(BaseModel): """Input for Serply Web Search.""" diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index c01b5e2a3..94da9f6fe 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -1,9 +1,8 @@ from typing import Any, Dict, Literal, Optional, Type +from crewai.tools import BaseTool from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool - class SpiderToolSchema(BaseModel): url: str = Field(description="Website URL") diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index 6b7a21dbd..3ac3c3ae5 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -2,11 +2,10 @@ import base64 from typing import Type import requests +from crewai.tools import BaseTool from openai import OpenAI from pydantic import BaseModel -from crewai_tools.tools.base_tool import BaseTool - class ImagePromptSchema(BaseModel): """Input for Vision Tool.""" From a0e0c2815273efe75760b4b83519f2d44500f916 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sun, 8 Dec 2024 21:44:19 -0800 Subject: [PATCH 166/391] setup weaviate vector search tool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/weaviate_tool/README.md | 80 +++++++++++++++++ .../tools/weaviate_tool/vector_search.py | 89 +++++++++++++++++++ 4 files changed, 171 insertions(+) create mode 100644 src/crewai_tools/tools/weaviate_tool/README.md create mode 100644 src/crewai_tools/tools/weaviate_tool/vector_search.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 3fad09d9f..12523a214 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -42,4 +42,5 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, + WeaviateVectorSearchTool, ) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 73a96f4cf..23565dbea 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -51,3 +51,4 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .weaviate_tool.vector_search import WeaviateVectorSearchTool diff --git a/src/crewai_tools/tools/weaviate_tool/README.md b/src/crewai_tools/tools/weaviate_tool/README.md new file mode 100644 index 000000000..42daa40e0 --- /dev/null +++ b/src/crewai_tools/tools/weaviate_tool/README.md @@ -0,0 +1,80 @@ +# WeaviateVectorSearchTool + +## Description +This tool is specifically crafted for conducting semantic searches within docs within a Weaviate vector database. Use this tool to find semantically similar docs to a given query. + +Weaviate is a vector database that is used to store and query vector embeddings. You can follow their docs here: https://weaviate.io/developers/wcs/connect + +## Installation +Install the crewai_tools package by executing the following command in your terminal: + +```shell +uv pip install 'crewai[tools]' +``` + +## Example +To utilize the WeaviateVectorSearchTool for different use cases, follow these examples: + +```python +from crewai_tools import WeaviateVectorSearchTool + +# To enable the tool to search any website the agent comes across or learns about during its operation +tool = WeaviateVectorSearchTool( + collection_name='example_collections', + limit=3, + weaviate_cluster_url="https://your-weaviate-cluster-url.com", + weaviate_api_key="your-weaviate-api-key", +) + +# or + +# Setup custom model for vectorizer and generative model +tool = WeaviateVectorSearchTool( + collection_name='example_collections', + limit=3, + vectorizer=Configure.Vectorizer.text2vec_openai(model="nomic-embed-text"), + generative_model=Configure.Generative.openai(model="gpt-4o-mini"), + weaviate_cluster_url="https://your-weaviate-cluster-url.com", + weaviate_api_key="your-weaviate-api-key", +) + +# Adding the tool to an agent +rag_agent = Agent( + name="rag_agent", + role="You are a helpful assistant that can answer questions with the help of the WeaviateVectorSearchTool.", + llm="gpt-4o-mini", + tools=[tool], +) +``` + +## Arguments +- `collection_name` : The name of the collection to search within. (Required) +- `weaviate_cluster_url` : The URL of the Weaviate cluster. (Required) +- `weaviate_api_key` : The API key for the Weaviate cluster. (Required) +- `limit` : The number of results to return. (Optional) +- `vectorizer` : The vectorizer to use. (Optional) +- `generative_model` : The generative model to use. (Optional) + +Preloading the Weaviate database with documents: + +```python +from crewai_tools import WeaviateVectorSearchTool + +# Use before hooks to generate the documents and add them to the Weaviate database. Follow the weaviate docs: https://weaviate.io/developers/wcs/connect +test_docs = client.collections.get("test_collection_name") + + +docs_to_load = os.listdir("knowledge") +with test_docs.batch.dynamic() as batch: + for d in docs_to_load: + with open(os.path.join("knowledge", d), "r") as f: + content = f.read() + batch.add_object( + { + "content": content, + "year": d.split("_")[0], + } + ) +tool = WeaviateVectorSearchTool(collection_name='example_collections', limit=3) + +``` diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py new file mode 100644 index 000000000..ab80b6ce1 --- /dev/null +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -0,0 +1,89 @@ +import os +import json +import weaviate +from pydantic import BaseModel, Field +from typing import Type, Optional +from crewai.tools import BaseTool + +from weaviate.classes.config import Configure, Vectorizers +from weaviate.classes.init import Auth + + +class WeaviateToolSchema(BaseModel): + """Input for WeaviateTool.""" + + query: str = Field( + ..., + description="The query to search retrieve relevant information from the Weaviate database. Pass only the query, not the question.", + ) + + +class WeaviateVectorSearchTool(BaseTool): + """Tool to search the Weaviate database""" + + name: str = "WeaviateVectorSearchTool" + description: str = "A tool to search the Weaviate database for relevant information on internal documents." + args_schema: Type[BaseModel] = WeaviateToolSchema + query: Optional[str] = None + + vectorizer: Optional[Vectorizers] = Field( + default=Configure.Vectorizer.text2vec_openai( + model="nomic-embed-text", + ) + ) + generative_model: Optional[str] = Field( + default=Configure.Generative.openai( + model="gpt-4o", + ), + ) + collection_name: Optional[str] = None + limit: Optional[int] = Field(default=3) + headers: Optional[dict] = Field( + default={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]} + ) + weaviate_cluster_url: str = Field( + ..., + description="The URL of the Weaviate cluster", + ) + weaviate_api_key: str = Field( + ..., + description="The API key for the Weaviate cluster", + ) + + def _run(self, query: str) -> str: + """Search the Weaviate database + + Args: + query (str): The query to search retrieve relevant information from the Weaviate database. Pass only the query as a string, not the question. + + Returns: + str: The result of the search query + """ + + if not self.weaviate_cluster_url or not self.weaviate_api_key: + raise ValueError("WEAVIATE_URL or WEAVIATE_API_KEY is not set") + + client = weaviate.connect_to_weaviate_cloud( + cluster_url=self.weaviate_cluster_url, + auth_credentials=Auth.api_key(self.weaviate_api_key), + headers=self.headers, + ) + internal_docs = client.collections.get(self.collection_name) + + if not internal_docs: + internal_docs = client.collections.create( + name=self.collection_name, + vectorizer_config=self.vectorizer, + generative_config=self.generative_model, + ) + + response = internal_docs.query.near_text( + query=query, + limit=self.limit, + ) + json_response = "" + for obj in response.objects: + json_response += json.dumps(obj.properties, indent=2) + + client.close() + return json_response From d5d83cbd7eea3cebac36fbdaa027229ec875e2bb Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sun, 8 Dec 2024 21:48:15 -0800 Subject: [PATCH 167/391] fix collection name docs --- src/crewai_tools/tools/weaviate_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/weaviate_tool/README.md b/src/crewai_tools/tools/weaviate_tool/README.md index 42daa40e0..c48f2f70a 100644 --- a/src/crewai_tools/tools/weaviate_tool/README.md +++ b/src/crewai_tools/tools/weaviate_tool/README.md @@ -61,7 +61,7 @@ Preloading the Weaviate database with documents: from crewai_tools import WeaviateVectorSearchTool # Use before hooks to generate the documents and add them to the Weaviate database. Follow the weaviate docs: https://weaviate.io/developers/wcs/connect -test_docs = client.collections.get("test_collection_name") +test_docs = client.collections.get("example_collections") docs_to_load = os.listdir("knowledge") From 1eb5d50a5572e82837387b0d3f7cfdbb10c6c421 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Thu, 12 Dec 2024 16:00:24 -0500 Subject: [PATCH 168/391] Fix url and api_key args on crawler tool --- .../firecrawl_crawl_website_tool.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index c23ff2100..d753cdd6f 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,7 +1,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type - from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field +import os # Type checking import if TYPE_CHECKING: @@ -27,6 +27,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None firecrawl: Optional["FirecrawlApp"] = None + url: Optional[str] = None def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -37,7 +38,11 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - self.firecrawl = FirecrawlApp(api_key=api_key) + client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") + if not client_api_key: + raise ValueError("FIRECRAWL_API_KEY is not set") + + self.firecrawl = FirecrawlApp(api_key=client_api_key) def _run( self, @@ -45,13 +50,17 @@ class FirecrawlCrawlWebsiteTool(BaseTool): crawler_options: Optional[Dict[str, Any]] = None, page_options: Optional[Dict[str, Any]] = None, ): + # Unless url has been previously set via constructor by the user, + # use the url argument provided by the agent + base_url = self.url or url + if crawler_options is None: crawler_options = {} if page_options is None: page_options = {} options = {"crawlerOptions": crawler_options, "pageOptions": page_options} - return self.firecrawl.crawl_url(url, options) + return self.firecrawl.crawl_url(base_url, options) try: From b0a948797aa973e165853d98fbef3788ab850b35 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Fri, 13 Dec 2024 21:42:01 +0800 Subject: [PATCH 169/391] feat(serper-dev): implement enhanced search capabilities and error handling - Add support for multiple search types (general and news) - Implement knowledge graph integration - Add structured result processing for organic results, "People Also Ask", and related searches - Enhance error handling with try-catch blocks and logging - Update documentation with comprehensive feature list and usage examples --- .../tools/serper_dev_tool/README.md | 49 ++-- .../tools/serper_dev_tool/serper_dev_tool.py | 248 ++++++++++++++---- 2 files changed, 229 insertions(+), 68 deletions(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/README.md b/src/crewai_tools/tools/serper_dev_tool/README.md index ae900a3bc..0beb9f2ab 100644 --- a/src/crewai_tools/tools/serper_dev_tool/README.md +++ b/src/crewai_tools/tools/serper_dev_tool/README.md @@ -1,30 +1,49 @@ # SerperDevTool Documentation ## Description -This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the `serper.dev` API to fetch and display the most relevant search results based on the query provided by the user. +The SerperDevTool is a powerful search tool that interfaces with the `serper.dev` API to perform internet searches. It supports multiple search types including general search and news search, with features like knowledge graph integration, organic results, "People Also Ask" questions, and related searches. + +## Features +- Multiple search types: 'search' (default) and 'news' +- Knowledge graph integration for enhanced search context +- Organic search results with sitelinks +- "People Also Ask" questions and answers +- Related searches suggestions +- News search with date, source, and image information +- Configurable number of results +- Optional result saving to file ## Installation -To incorporate this tool into your project, follow the installation instructions below: ```shell pip install 'crewai[tools]' ``` -## Example -The following example demonstrates how to initialize the tool and execute a search with a given query: - +## Usage ```python from crewai_tools import SerperDevTool -# Initialize the tool for internet searching capabilities -tool = SerperDevTool() +# Initialize the tool +tool = SerperDevTool( + n_results=10, # Optional: Number of results to return (default: 10) + save_file=False, # Optional: Save results to file (default: False) + search_type="search" # Optional: Type of search - "search" or "news" (default: "search") +) + +# Execute a search +results = tool._run(search_query="your search query") ``` -## Steps to Get Started -To effectively use the `SerperDevTool`, follow these steps: +## Configuration +1. **API Key Setup**: + - Sign up for an account at `serper.dev` + - Obtain your API key + - Set the environment variable: `SERPER_API_KEY` -1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. -2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`. -3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool. - -## Conclusion -By integrating the `SerperDevTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. +## Response Format +The tool returns structured data including: +- Search parameters +- Knowledge graph data (for general search) +- Organic search results +- "People Also Ask" questions +- Related searches +- News results (for news search type) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index ca118326e..bbea77909 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -1,20 +1,30 @@ import datetime import json import os -from typing import Any, Optional, Type +import logging +from typing import Any, Type import requests from pydantic import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + def _save_results_to_file(content: str) -> None: """Saves the search results to a file.""" - filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" - with open(filename, "w") as file: - file.write(content) - print(f"Results saved to {filename}") + try: + filename = f"search_results_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, "w") as file: + file.write(content) + logger.info(f"Results saved to {filename}") + except IOError as e: + logger.error(f"Failed to save results to file: {e}") + raise class SerperDevToolSchema(BaseModel): @@ -28,67 +38,199 @@ class SerperDevToolSchema(BaseModel): class SerperDevTool(BaseTool): name: str = "Search the internet" description: str = ( - "A tool that can be used to search the internet with a search_query." + "A tool that can be used to search the internet with a search_query. " + "Supports different search types: 'search' (default), 'news'" ) args_schema: Type[BaseModel] = SerperDevToolSchema - search_url: str = "https://google.serper.dev/search" - country: Optional[str] = "" - location: Optional[str] = "" - locale: Optional[str] = "" + base_url: str = "https://google.serper.dev" n_results: int = 10 save_file: bool = False + search_type: str = "search" - def _run( - self, - **kwargs: Any, - ) -> Any: + def _get_search_url(self, search_type: str) -> str: + """Get the appropriate endpoint URL based on search type.""" + search_type = search_type.lower() + allowed_search_types = ["search", "news"] + if search_type not in allowed_search_types: + raise ValueError( + f"Invalid search type: {search_type}. Must be one of: {', '.join(allowed_search_types)}" + ) + return f"{self.base_url}/{search_type}" - search_query = kwargs.get("search_query") or kwargs.get("query") - save_file = kwargs.get("save_file", self.save_file) - n_results = kwargs.get("n_results", self.n_results) + def _process_knowledge_graph(self, kg: dict) -> dict: + """Process knowledge graph data from search results.""" + return { + "title": kg.get("title", ""), + "type": kg.get("type", ""), + "website": kg.get("website", ""), + "imageUrl": kg.get("imageUrl", ""), + "description": kg.get("description", ""), + "descriptionSource": kg.get("descriptionSource", ""), + "descriptionLink": kg.get("descriptionLink", ""), + "attributes": kg.get("attributes", {}), + } - payload = {"q": search_query, "num": n_results} + def _process_organic_results(self, organic_results: list) -> list: + """Process organic search results.""" + processed_results = [] + for result in organic_results[: self.n_results]: + try: + result_data = { + "title": result["title"], + "link": result["link"], + "snippet": result.get("snippet", ""), + "position": result.get("position"), + } - if self.country != "": - payload["gl"] = self.country - if self.location != "": - payload["location"] = self.location - if self.locale != "": - payload["hl"] = self.locale + if "sitelinks" in result: + result_data["sitelinks"] = [ + { + "title": sitelink.get("title", ""), + "link": sitelink.get("link", ""), + } + for sitelink in result["sitelinks"] + ] - payload = json.dumps(payload) + processed_results.append(result_data) + except KeyError: + logger.warning(f"Skipping malformed organic result: {result}") + continue + return processed_results + def _process_people_also_ask(self, paa_results: list) -> list: + """Process 'People Also Ask' results.""" + processed_results = [] + for result in paa_results[: self.n_results]: + try: + result_data = { + "question": result["question"], + "snippet": result.get("snippet", ""), + "title": result.get("title", ""), + "link": result.get("link", ""), + } + processed_results.append(result_data) + except KeyError: + logger.warning(f"Skipping malformed PAA result: {result}") + continue + return processed_results + + def _process_related_searches(self, related_results: list) -> list: + """Process related search results.""" + processed_results = [] + for result in related_results[: self.n_results]: + try: + processed_results.append({"query": result["query"]}) + except KeyError: + logger.warning(f"Skipping malformed related search result: {result}") + continue + return processed_results + + def _process_news_results(self, news_results: list) -> list: + """Process news search results.""" + processed_results = [] + for result in news_results[: self.n_results]: + try: + result_data = { + "title": result["title"], + "link": result["link"], + "snippet": result.get("snippet", ""), + "date": result.get("date", ""), + "source": result.get("source", ""), + "imageUrl": result.get("imageUrl", ""), + } + processed_results.append(result_data) + except KeyError: + logger.warning(f"Skipping malformed news result: {result}") + continue + return processed_results + + def _make_api_request(self, search_query: str, search_type: str) -> dict: + """Make API request to Serper.""" + search_url = self._get_search_url(search_type) + payload = json.dumps({"q": search_query, "num": self.n_results}) headers = { "X-API-KEY": os.environ["SERPER_API_KEY"], "content-type": "application/json", } - response = requests.request( - "POST", self.search_url, headers=headers, data=payload - ) - results = response.json() - - if "organic" in results: - results = results["organic"][: self.n_results] - string = [] - for result in results: - try: - string.append( - "\n".join( - [ - f"Title: {result['title']}", - f"Link: {result['link']}", - f"Snippet: {result['snippet']}", - "---", - ] - ) - ) - except KeyError: - continue - - content = "\n".join(string) - if save_file: - _save_results_to_file(content) - return f"\nSearch results: {content}\n" - else: + response = None + try: + response = requests.post( + search_url, headers=headers, json=json.loads(payload), timeout=10 + ) + response.raise_for_status() + results = response.json() + if not results: + logger.error("Empty response from Serper API") + raise ValueError("Empty response from Serper API") return results + except requests.exceptions.RequestException as e: + error_msg = f"Error making request to Serper API: {e}" + if response is not None and hasattr(response, "content"): + error_msg += f"\nResponse content: {response.content}" + logger.error(error_msg) + raise + except json.JSONDecodeError as e: + if response is not None and hasattr(response, "content"): + logger.error(f"Error decoding JSON response: {e}") + logger.error(f"Response content: {response.content}") + else: + logger.error( + f"Error decoding JSON response: {e} (No response content available)" + ) + raise + + def _process_search_results(self, results: dict, search_type: str) -> dict: + """Process search results based on search type.""" + formatted_results = {} + + if search_type == "search": + if "knowledgeGraph" in results: + formatted_results["knowledgeGraph"] = self._process_knowledge_graph( + results["knowledgeGraph"] + ) + + if "organic" in results: + formatted_results["organic"] = self._process_organic_results( + results["organic"] + ) + + if "peopleAlsoAsk" in results: + formatted_results["peopleAlsoAsk"] = self._process_people_also_ask( + results["peopleAlsoAsk"] + ) + + if "relatedSearches" in results: + formatted_results["relatedSearches"] = self._process_related_searches( + results["relatedSearches"] + ) + + elif search_type == "news": + if "news" in results: + formatted_results["news"] = self._process_news_results(results["news"]) + + return formatted_results + + def _run(self, **kwargs: Any) -> Any: + """Execute the search operation.""" + search_query = kwargs.get("search_query") or kwargs.get("query") + search_type = kwargs.get("search_type", self.search_type) + save_file = kwargs.get("save_file", self.save_file) + + results = self._make_api_request(search_query, search_type) + + formatted_results = { + "searchParameters": { + "q": search_query, + "type": search_type, + **results.get("searchParameters", {}), + } + } + + formatted_results.update(self._process_search_results(results, search_type)) + formatted_results["credits"] = results.get("credits", 1) + + if save_file: + _save_results_to_file(json.dumps(formatted_results, indent=2)) + + return formatted_results From 1fd5805bef7341923da184debdb9515073eb9385 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Fri, 13 Dec 2024 21:59:38 +0800 Subject: [PATCH 170/391] Resolved conflict --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index bbea77909..fde30735f 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -7,7 +7,7 @@ from typing import Any, Type import requests from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool +from crewai_tools import BaseTool logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" From 00418d98f7df82380f519ac76ee870876eb75266 Mon Sep 17 00:00:00 2001 From: theCyberTech <84775494+theCyberTech@users.noreply.github.com> Date: Fri, 13 Dec 2024 22:01:04 +0800 Subject: [PATCH 171/391] resolved conflict --- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index fde30735f..b23884180 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -7,7 +7,7 @@ from typing import Any, Type import requests from pydantic import BaseModel, Field -from crewai_tools import BaseTool +from crewai.tools import BaseTool logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" From 2cb33b18e5bab2a6948ad0cb8bba81c27f27bed1 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 13 Dec 2024 11:18:59 -0500 Subject: [PATCH 172/391] Remove outdated params --- .../firecrawl_crawl_website_tool.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index d753cdd6f..f75685a49 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -10,13 +10,6 @@ if TYPE_CHECKING: class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - crawler_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for crawling" - ) - page_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for page" - ) - class FirecrawlCrawlWebsiteTool(BaseTool): model_config = ConfigDict( From 3a095183c56aff4a7f8e7d9a1324f36f4fa52590 Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 13 Dec 2024 11:20:08 -0500 Subject: [PATCH 173/391] Use proper options and accept custom FirecrawlApp --- .../firecrawl_crawl_website_tool.py | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index f75685a49..07fef7730 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -18,9 +18,12 @@ class FirecrawlCrawlWebsiteTool(BaseTool): name: str = "Firecrawl web crawl tool" description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema + firecrawl_app: Optional["FirecrawlApp"] = None api_key: Optional[str] = None - firecrawl: Optional["FirecrawlApp"] = None url: Optional[str] = None + params: Optional[Dict[str, Any]] = None + poll_interval: Optional[int] = 2 + idempotency_key: Optional[str] = None def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -31,29 +34,28 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") - if not client_api_key: - raise ValueError("FIRECRAWL_API_KEY is not set") + # Allows passing a previously created FirecrawlApp instance + # or builds a new one with the provided API key + if not self.firecrawl_app: + client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") + if not client_api_key: + raise ValueError( + "FIRECRAWL_API_KEY is not set. Please provide it either via the constructor " + "with the `api_key` argument or by setting the FIRECRAWL_API_KEY environment variable." + ) + self.firecrawl_app = FirecrawlApp(api_key=client_api_key) - self.firecrawl = FirecrawlApp(api_key=client_api_key) - - def _run( - self, - url: str, - crawler_options: Optional[Dict[str, Any]] = None, - page_options: Optional[Dict[str, Any]] = None, - ): + def _run(self, url: str): # Unless url has been previously set via constructor by the user, - # use the url argument provided by the agent + # use the url argument provided by the agent at runtime. base_url = self.url or url - if crawler_options is None: - crawler_options = {} - if page_options is None: - page_options = {} - - options = {"crawlerOptions": crawler_options, "pageOptions": page_options} - return self.firecrawl.crawl_url(base_url, options) + return self.firecrawl_app.crawl_url( + base_url, + params=self.params, + poll_interval=self.poll_interval, + idempotency_key=self.idempotency_key + ) try: From 164442223e153bffefcc794f22c71ece86eb095a Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 13 Dec 2024 11:23:53 -0500 Subject: [PATCH 174/391] Organize imports --- .../firecrawl_crawl_website_tool.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 07fef7730..1de7602ec 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,7 +1,9 @@ -from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from crewai.tools import BaseTool -from pydantic import BaseModel, ConfigDict, Field import os +from typing import TYPE_CHECKING, Any, Dict, Optional, Type + +from pydantic import BaseModel, ConfigDict, Field + +from crewai.tools import BaseTool # Type checking import if TYPE_CHECKING: From 668e87d5e13ea45f8b388dcf2a9a8187048e381c Mon Sep 17 00:00:00 2001 From: Carlos Souza Date: Fri, 13 Dec 2024 11:26:46 -0500 Subject: [PATCH 175/391] Add constructor comments --- .../firecrawl_crawl_website_tool.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 1de7602ec..edada38dd 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -28,6 +28,17 @@ class FirecrawlCrawlWebsiteTool(BaseTool): idempotency_key: Optional[str] = None def __init__(self, api_key: Optional[str] = None, **kwargs): + """Initialize FirecrawlCrawlWebsiteTool. + + Args: + api_key (Optional[str]): Firecrawl API key. If not provided, will check FIRECRAWL_API_KEY env var. + url (Optional[str]): Base URL to crawl. Can be overridden by the _run method. + firecrawl_app (Optional[FirecrawlApp]): Previously created FirecrawlApp instance. + params (Optional[Dict[str, Any]]): Additional parameters to pass to the FirecrawlApp. + poll_interval (Optional[int]): Poll interval for the FirecrawlApp. + idempotency_key (Optional[str]): Idempotency key for the FirecrawlApp. + **kwargs: Additional arguments passed to BaseTool. + """ super().__init__(**kwargs) try: from firecrawl import FirecrawlApp # type: ignore From c76e0f3445ea84268be6318c4931ec729b70f2dc Mon Sep 17 00:00:00 2001 From: Rebecca Qian Date: Fri, 13 Dec 2024 18:55:42 -0500 Subject: [PATCH 176/391] Add patronus evaluation tools remove fields rename eval tool remove eval tool init files --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/patronus_eval_tool/example.py | 34 ++++++++++++++ .../patronus_eval_tool/patronus_eval_tool.py | 45 +++++++++++++++++++ 4 files changed, 81 insertions(+) create mode 100644 src/crewai_tools/tools/patronus_eval_tool/example.py create mode 100644 src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 12523a214..7e27286e7 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -22,6 +22,7 @@ from .tools import ( MultiOnTool, MySQLSearchTool, NL2SQLTool, + PatronusEvalTool, PDFSearchTool, PGSearchTool, RagTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 23565dbea..9831a2346 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -25,6 +25,7 @@ from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool +from .patronus_eval_tool.eval_tool import PatronusEvalTool from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py new file mode 100644 index 000000000..99088d17f --- /dev/null +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -0,0 +1,34 @@ +import os + +from crewai import Agent, Crew, Task +from patronus_eval_tool import PatronusEvalTool + + +patronus_eval_tool = PatronusEvalTool( + evaluators=[{ + "evaluator": "judge", + "criteria": "patronus:is-code" + }], + tags={} +) + +# Create a new agent +coding_agent = Agent( + role="Coding Agent", + goal="Generate high quality code. Use the evaluation tool to score the agent outputs", + backstory="Coding agent to generate high quality code. Use the evaluation tool to score the agent outputs", + tools=[patronus_eval_tool], + verbose=True, +) + +# Define tasks +generate_code = Task( + description="Create a simple program to generate the first N numbers in the Fibonacci sequence.", + expected_output="Program that generates the first N numbers in the Fibonacci sequence.", + agent=coding_agent, +) + + +crew = Crew(agents=[coding_agent], tasks=[generate_code]) + +crew.kickoff() \ No newline at end of file diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py new file mode 100644 index 000000000..c0e2b95e0 --- /dev/null +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -0,0 +1,45 @@ +from typing import Any, Optional, Type, cast, ClassVar + +from crewai.tools import BaseTool +import json +import os +import requests + + +class PatronusEvalTool(BaseTool): + """ + PatronusEvalTool is a tool to automatically evaluate and score agent interactions. + + Results are logged to the Patronus platform at app.patronus.ai + """ + + name: str = "Call Patronus API tool" + description: str = ( + "This tool calls the Patronus Evaluation API. This function returns the response from the API." + ) + evaluate_url: str = "https://api.patronus.ai/v1/evaluate" + + + def _run( + self, + evaluated_model_input: str, + evaluated_model_output: str, + evaluators: list, + tags: dict + ) -> Any: + + api_key = os.getenv("PATRONUS_API_KEY") + headers = { + "X-API-KEY": api_key, + "accept": "application/json", + "content-type": "application/json" + } + data = { + "evaluated_model_input": evaluated_model_input, + "evaluated_model_output": evaluated_model_output, + "evaluators": evaluators, + "tags": tags + } + + # Make the POST request + response = requests.post(self.evaluate_url, headers=headers, data=json.dumps(data)) \ No newline at end of file From d94f7e03dce4866ee180d7f4e97e7a2aa51b8373 Mon Sep 17 00:00:00 2001 From: DarshanDeshpande Date: Sat, 14 Dec 2024 15:46:10 -0500 Subject: [PATCH 177/391] Update Patronus AI evaluator tool and example --- .../tools/patronus_eval_tool/example.py | 17 +++----- .../patronus_eval_tool/patronus_eval_tool.py | 39 ++++++++++++------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index 99088d17f..4015a5f4a 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -1,34 +1,27 @@ -import os - from crewai import Agent, Crew, Task from patronus_eval_tool import PatronusEvalTool patronus_eval_tool = PatronusEvalTool( - evaluators=[{ - "evaluator": "judge", - "criteria": "patronus:is-code" - }], - tags={} + evaluators=[{"evaluator": "judge", "criteria": "patronus:is-code"}], tags={} ) # Create a new agent coding_agent = Agent( role="Coding Agent", - goal="Generate high quality code. Use the evaluation tool to score the agent outputs", - backstory="Coding agent to generate high quality code. Use the evaluation tool to score the agent outputs", + goal="Generate high quality code and verify that the code is correct by using Patronus AI's evaluation tool to check validity of your output code.", + backstory="You are an experienced coder who can generate high quality python code. You can follow complex instructions accurately and effectively.", tools=[patronus_eval_tool], verbose=True, ) # Define tasks generate_code = Task( - description="Create a simple program to generate the first N numbers in the Fibonacci sequence.", + description="Create a simple program to generate the first N numbers in the Fibonacci sequence. Use the evaluator as `judge` from Patronus AI with the criteria `patronus:is-code` and feed your task input as input and your code as output to verify your code validity.", expected_output="Program that generates the first N numbers in the Fibonacci sequence.", agent=coding_agent, ) - crew = Crew(agents=[coding_agent], tasks=[generate_code]) -crew.kickoff() \ No newline at end of file +crew.kickoff() diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index c0e2b95e0..88ad28253 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -1,45 +1,54 @@ -from typing import Any, Optional, Type, cast, ClassVar - -from crewai.tools import BaseTool -import json import os +import json import requests +from typing import Any, List, Dict +from crewai.tools import BaseTool + class PatronusEvalTool(BaseTool): """ PatronusEvalTool is a tool to automatically evaluate and score agent interactions. - + Results are logged to the Patronus platform at app.patronus.ai """ - name: str = "Call Patronus API tool" + name: str = "Call Patronus API tool for evaluation of model inputs and outputs" description: str = ( - "This tool calls the Patronus Evaluation API. This function returns the response from the API." + """This tool calls the Patronus Evaluation API that takes the following arguments: +1. evaluated_model_input: str: The agent's task description +2. evaluated_model_output: str: The agent's output code +3. evaluators: list[dict[str,str]]: list of dictionaries, each with a an evaluator (such as `judge`) and a criteria (like `patronus:[criteria-name-here]`).""" ) evaluate_url: str = "https://api.patronus.ai/v1/evaluate" - def _run( self, evaluated_model_input: str, evaluated_model_output: str, - evaluators: list, - tags: dict + evaluators: List[Dict[str, str]], + tags: dict, ) -> Any: - + api_key = os.getenv("PATRONUS_API_KEY") headers = { "X-API-KEY": api_key, "accept": "application/json", - "content-type": "application/json" + "content-type": "application/json", } data = { "evaluated_model_input": evaluated_model_input, "evaluated_model_output": evaluated_model_output, "evaluators": evaluators, - "tags": tags + "tags": tags, } - # Make the POST request - response = requests.post(self.evaluate_url, headers=headers, data=json.dumps(data)) \ No newline at end of file + response = requests.post( + self.evaluate_url, headers=headers, data=json.dumps(data) + ) + if response.status_code != 200: + raise Exception( + f"Failed to evaluate model input and output. Reason: {response.text}" + ) + + return response.json() From c26e962d174164a2fdd751376fa24ceed33c2ec6 Mon Sep 17 00:00:00 2001 From: Hammam Abdelwahab Date: Sun, 15 Dec 2024 10:34:07 +0100 Subject: [PATCH 178/391] Enabled manual setting of docker base url for code interpreter tool. Goal is to avoid the error: CodeInterpreterTool Error while fetching server API version: --- .../tools/code_interpreter_tool/README.md | 13 +++++++++++++ .../code_interpreter_tool/code_interpreter_tool.py | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/README.md b/src/crewai_tools/tools/code_interpreter_tool/README.md index bc73df7a4..ab0cbf44b 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/README.md +++ b/src/crewai_tools/tools/code_interpreter_tool/README.md @@ -38,3 +38,16 @@ Agent( tools=[CodeInterpreterTool(user_dockerfile_path="")], ) ``` + +If it is difficult to connect to docker daemon automatically (especially for macOS users), you can do this to setup docker host manually + +```python +from crewai_tools import CodeInterpreterTool + +Agent( + ... + tools=[CodeInterpreterTool(user_docker_base_url="", + user_dockerfile_path="")], +) + +``` diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 61c180fe3..2f385c809 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -28,6 +28,7 @@ class CodeInterpreterTool(BaseTool): default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None user_dockerfile_path: Optional[str] = None + user_docker_base_url: Optional[str] = None unsafe_mode: bool = False @staticmethod @@ -39,7 +40,7 @@ class CodeInterpreterTool(BaseTool): """ Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker.from_env() + client = docker.from_env() if self.user_docker_base_url != None else docker.DockerClient(base_url=self.user_docker_base_url) try: client.images.get(self.default_image_tag) From b6bb5dbd535166ee4dbfac622afb66094faf7902 Mon Sep 17 00:00:00 2001 From: Hammam Abdelwahab Date: Sun, 15 Dec 2024 10:38:56 +0100 Subject: [PATCH 179/391] Enabled manual setting of docker base url for code interpreter tool. Goal is to avoid the error: CodeInterpreterTool Error while fetching server API version: --- .../tools/code_interpreter_tool/code_interpreter_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 2f385c809..34648eb37 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -40,7 +40,7 @@ class CodeInterpreterTool(BaseTool): """ Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker.from_env() if self.user_docker_base_url != None else docker.DockerClient(base_url=self.user_docker_base_url) + client = docker.from_env() if self.user_docker_base_url == None else docker.DockerClient(base_url=self.user_docker_base_url) try: client.images.get(self.default_image_tag) From 56a9060840f4f33b63a0dde5ebce3c985de61e60 Mon Sep 17 00:00:00 2001 From: Terry Tan Yongsheng Date: Tue, 17 Dec 2024 10:35:33 +0800 Subject: [PATCH 180/391] Add SerpApi tools - google search, google shopping --- src/crewai_tools/__init__.py | 2 + src/crewai_tools/tools/__init__.py | 2 + .../tools/serpapi_tool/serpapi_base_tool.py | 37 ++++++++++++++++ .../serpapi_google_search_tool.py | 40 ++++++++++++++++++ .../serpapi_google_shopping_tool.py | 42 +++++++++++++++++++ 5 files changed, 123 insertions(+) create mode 100644 src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py create mode 100644 src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py create mode 100644 src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 12523a214..87aca8531 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -43,4 +43,6 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, WeaviateVectorSearchTool, + SerpApiGoogleSearchTool, + SerpApiGoogleShoppingTool, ) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 23565dbea..f6c31f45f 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -52,3 +52,5 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool from .weaviate_tool.vector_search import WeaviateVectorSearchTool +from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool +from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool \ No newline at end of file diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py new file mode 100644 index 000000000..57e33e71e --- /dev/null +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -0,0 +1,37 @@ +import os +import re +from typing import Optional, Any + +from crewai.tools import BaseTool + +class SerpApiBaseTool(BaseTool): + client: Optional[Any] = None + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + try: + from serpapi import Client + except ImportError: + raise ImportError( + "`serpapi` package not found" + ) + api_key = os.getenv("SERPAPI_API_KEY") + if not api_key: + raise ValueError( + "Missing API key, you can get the key from https://serpapi.com/manage-api-key" + ) + self.client = Client(api_key=api_key) + + def _omit_fields(self, data, omit_patterns): + if isinstance(data, dict): + for field in list(data.keys()): + if any(re.compile(p).match(field) for p in omit_patterns): + data.pop(field, None) + else: + if isinstance(data[field], (dict, list)): + self._omit_fields(data[field], omit_patterns) + elif isinstance(data, list): + for item in data: + self._omit_fields(item, omit_patterns) + diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py new file mode 100644 index 000000000..199b7f5a2 --- /dev/null +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py @@ -0,0 +1,40 @@ +from typing import Any, Type, Optional + +import re +from pydantic import BaseModel, Field +from .serpapi_base_tool import SerpApiBaseTool +from serpapi import HTTPError + +class SerpApiGoogleSearchToolSchema(BaseModel): + """Input for Google Search.""" + search_query: str = Field(..., description="Mandatory search query you want to use to Google search.") + location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + +class SerpApiGoogleSearchTool(SerpApiBaseTool): + name: str = "Google Search" + description: str = ( + "A tool to perform to perform a Google search with a search_query." + ) + args_schema: Type[BaseModel] = SerpApiGoogleSearchToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + try: + results = self.client.search({ + "q": kwargs.get("search_query"), + "location": kwargs.get("location"), + }).as_dict() + + self._omit_fields( + results, + [r"search_metadata", r"search_parameters", r"serpapi_.+", r".+_token", r"displayed_link", r"pagination"] + ) + + return results + except HTTPError as e: + return f"An error occurred: {str(e)}. Some parameters may be invalid." + + + \ No newline at end of file diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py new file mode 100644 index 000000000..b44b3a809 --- /dev/null +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py @@ -0,0 +1,42 @@ +from typing import Any, Type, Optional + +import re +from pydantic import BaseModel, Field +from .serpapi_base_tool import SerpApiBaseTool +from serpapi import HTTPError + +class SerpApiGoogleShoppingToolSchema(BaseModel): + """Input for Google Shopping.""" + search_query: str = Field(..., description="Mandatory search query you want to use to Google shopping.") + location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + + +class SerpApiGoogleShoppingTool(SerpApiBaseTool): + name: str = "Google Shopping" + description: str = ( + "A tool to perform search on Google shopping with a search_query." + ) + args_schema: Type[BaseModel] = SerpApiGoogleShoppingToolSchema + + def _run( + self, + **kwargs: Any, + ) -> Any: + try: + results = self.client.search({ + "engine": "google_shopping", + "q": kwargs.get("search_query"), + "location": kwargs.get("location") + }).as_dict() + + self._omit_fields( + results, + [r"search_metadata", r"search_parameters", r"serpapi_.+", r"filters", r"pagination"] + ) + + return results + except HTTPError as e: + return f"An error occurred: {str(e)}. Some parameters may be invalid." + + + \ No newline at end of file From 2effe9a7d2ebacb063f8814e5974041161d839ef Mon Sep 17 00:00:00 2001 From: Terry Tan Yongsheng Date: Tue, 17 Dec 2024 11:09:38 +0800 Subject: [PATCH 181/391] Add README --- src/crewai_tools/tools/serpapi_tool/README.md | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 src/crewai_tools/tools/serpapi_tool/README.md diff --git a/src/crewai_tools/tools/serpapi_tool/README.md b/src/crewai_tools/tools/serpapi_tool/README.md new file mode 100644 index 000000000..d81b851f8 --- /dev/null +++ b/src/crewai_tools/tools/serpapi_tool/README.md @@ -0,0 +1,32 @@ +# SerpApi Tools + +## Description +[SerpApi](https://serpapi.com/) tools are built for searching information in the internet. It currently supports: +- Google Search +- Google Shopping + +To successfully make use of SerpApi tools, you have to have `SERPAPI_API_KEY` set in the environment. To get the API key, register a free account at [SerpApi](https://serpapi.com/). + +## Installation +To start using the SerpApi Tools, you must first install the `crewai_tools` package. This can be easily done with the following command: + +```shell +pip install 'crewai[tools]' +``` + +## Examples +The following example demonstrates how to initialize the tool + +### Google Search +```python +from crewai_tools import SerpApiGoogleSearchTool + +tool = SerpApiGoogleSearchTool() +``` + +### Google Shopping +```python +from crewai_tools import SerpApiGoogleShoppingTool + +tool = SerpApiGoogleShoppingTool() +``` From 81981e43b668dc1d2073fa1d7defbcfa97e452ac Mon Sep 17 00:00:00 2001 From: Terry Tan Yongsheng Date: Tue, 17 Dec 2024 13:45:50 +0800 Subject: [PATCH 182/391] Add type hints --- src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index 57e33e71e..98491190c 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -1,10 +1,12 @@ import os import re -from typing import Optional, Any +from typing import Optional, Any, Union from crewai.tools import BaseTool class SerpApiBaseTool(BaseTool): + """Base class for SerpApi functionality with shared capabilities.""" + client: Optional[Any] = None def __init__(self, **kwargs): @@ -14,7 +16,7 @@ class SerpApiBaseTool(BaseTool): from serpapi import Client except ImportError: raise ImportError( - "`serpapi` package not found" + "`serpapi` package not found, please install with `pip install serpapi`" ) api_key = os.getenv("SERPAPI_API_KEY") if not api_key: @@ -23,7 +25,7 @@ class SerpApiBaseTool(BaseTool): ) self.client = Client(api_key=api_key) - def _omit_fields(self, data, omit_patterns): + def _omit_fields(self, data: Union[dict, list], omit_patterns: list[str]) -> None: if isinstance(data, dict): for field in list(data.keys()): if any(re.compile(p).match(field) for p in omit_patterns): @@ -34,4 +36,3 @@ class SerpApiBaseTool(BaseTool): elif isinstance(data, list): for item in data: self._omit_fields(item, omit_patterns) - From cd37ede869b3032ba191644a9eda613d2624ae30 Mon Sep 17 00:00:00 2001 From: Gilbert Bagaoisan Date: Mon, 16 Dec 2024 22:05:28 -0800 Subject: [PATCH 183/391] lint fixes --- .../tools/spider_tool/spider_tool.py | 135 ++++++++++++++---- 1 file changed, 106 insertions(+), 29 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 94da9f6fe..74fee809d 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -1,60 +1,137 @@ +import logging from typing import Any, Dict, Literal, Optional, Type +from urllib.parse import urlparse from crewai.tools import BaseTool from pydantic import BaseModel, Field +logger = logging.getLogger(__file__) + class SpiderToolSchema(BaseModel): - url: str = Field(description="Website URL") - params: Optional[Dict[str, Any]] = Field( - description="Set additional params. Options include:\n" - "- `limit`: Optional[int] - The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages.\n" - "- `depth`: Optional[int] - The crawl limit for maximum depth. If `0`, no limit will be applied.\n" - "- `metadata`: Optional[bool] - Boolean to include metadata or not. Defaults to `False` unless set to `True`. If the user wants metadata, include params.metadata = True.\n" - "- `query_selector`: Optional[str] - The CSS query selector to use when extracting content from the markup.\n" + """Input schema for SpiderTool.""" + + website_url: str = Field( + ..., description="Mandatory website URL to scrape or crawl" ) mode: Literal["scrape", "crawl"] = Field( default="scrape", - description="Mode, the only two allowed modes are `scrape` or `crawl`. Use `scrape` to scrape a single page and `crawl` to crawl the entire website following subpages. These modes are the only allowed values even when ANY params is set.", + description="The mode of the SpiderTool. The only two allowed modes are `scrape` or `crawl`. Crawl mode will follow up to 5 links and return their content in markdown format.", ) class SpiderTool(BaseTool): - name: str = "Spider scrape & crawl tool" - description: str = "Scrape & Crawl any url and return LLM-ready data." - args_schema: Type[BaseModel] = SpiderToolSchema - api_key: Optional[str] = None - spider: Optional[Any] = None + """Tool for scraping and crawling websites.""" + + DEFAULT_CRAWL_LIMIT: int = 5 + DEFAULT_RETURN_FORMAT: str = "markdown" + + name: str = "SpiderTool" + description: str = ( + "A tool to scrape or crawl a website and return LLM-ready content." + ) + args_schema: Type[BaseModel] = SpiderToolSchema + custom_params: Optional[Dict[str, Any]] = None + website_url: Optional[str] = None + api_key: Optional[str] = None + spider: Any = None + log_failures: bool = True + + def __init__( + self, + api_key: Optional[str] = None, + website_url: Optional[str] = None, + custom_params: Optional[Dict[str, Any]] = None, + log_failures: bool = True, + **kwargs, + ): + """Initialize SpiderTool for web scraping and crawling. + + Args: + api_key (Optional[str]): Spider API key for authentication. Required for production use. + website_url (Optional[str]): Default website URL to scrape/crawl. Can be overridden during execution. + custom_params (Optional[Dict[str, Any]]): Additional parameters to pass to Spider API. + These override any parameters set by the LLM. + log_failures (bool): If True, logs errors. Defaults to True. + **kwargs: Additional arguments passed to BaseTool. + + Raises: + ImportError: If spider-client package is not installed. + RuntimeError: If Spider client initialization fails. + """ - def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) + if website_url is not None: + self.website_url = website_url + + self.log_failures = log_failures + self.custom_params = custom_params + try: from spider import Spider # type: ignore + + self.spider = Spider(api_key=api_key) except ImportError: raise ImportError( "`spider-client` package not found, please run `pip install spider-client`" ) + except Exception as e: + raise RuntimeError(f"Failed to initialize Spider client: {str(e)}") - self.spider = Spider(api_key=api_key) + def _validate_url(self, url: str) -> bool: + """Validate URL format. + + Args: + url (str): URL to validate. + Returns: + bool: True if valid URL. + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except Exception: + return False def _run( self, - url: str, - params: Optional[Dict[str, Any]] = None, - mode: Optional[Literal["scrape", "crawl"]] = "scrape", - ): - if mode not in ["scrape", "crawl"]: + website_url: str, + mode: Literal["scrape", "crawl"] = "scrape", + ) -> str: + params = {} + url = website_url or self.website_url + + if not self._validate_url(url): + raise ValueError("Invalid URL format") + + if not url: raise ValueError( - "Unknown mode in `mode` parameter, `scrape` or `crawl` are the allowed modes" + "Website URL must be provided either during initialization or execution" ) - # Ensure 'return_format': 'markdown' is always included - if params: - params["return_format"] = "markdown" - else: - params = {"return_format": "markdown"} + if mode not in ["scrape", "crawl"]: + raise ValueError("Mode must be either 'scrape' or 'crawl'") - action = self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url - spider_docs = action(url=url, params=params) + params["request"] = "smart" + params["filter_output_svg"] = True + params["return_format"] = self.DEFAULT_RETURN_FORMAT - return spider_docs + if mode == "crawl": + params["limit"] = self.DEFAULT_CRAWL_LIMIT + + # Update params with custom params if provided. + # This will override any params passed by LLM. + if self.custom_params: + params.update(self.custom_params) + + try: + action = ( + self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url + ) + return action(url=url, params=params) + + except Exception as e: + if self.log_failures: + logger.error(f"Error fetching data from {url}, exception: {e}") + return None + else: + raise e From 4551b8c6251754e6c67832af63d705ef9eb43cb1 Mon Sep 17 00:00:00 2001 From: Gilbert Bagaoisan Date: Mon, 16 Dec 2024 22:05:46 -0800 Subject: [PATCH 184/391] Updated readme --- src/crewai_tools/tools/spider_tool/README.md | 72 +++++++------------- 1 file changed, 24 insertions(+), 48 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/README.md b/src/crewai_tools/tools/spider_tool/README.md index 563c07a04..c2dc8826a 100644 --- a/src/crewai_tools/tools/spider_tool/README.md +++ b/src/crewai_tools/tools/spider_tool/README.md @@ -1,81 +1,57 @@ # SpiderTool ## Description - -[Spider](https://spider.cloud/?ref=crewai) is the [fastest](https://github.com/spider-rs/spider/blob/main/benches/BENCHMARKS.md#benchmark-results) open source scraper and crawler that returns LLM-ready data. It converts any website into pure HTML, markdown, metadata or text while enabling you to crawl with custom actions using AI. +[Spider](https://spider.cloud/?ref=crewai) is a high-performance web scraping and crawling tool that delivers optimized markdown for LLMs and AI agents. It intelligently switches between HTTP requests and JavaScript rendering based on page requirements. Perfect for both single-page scraping and website crawling—making it ideal for content extraction and data collection. ## Installation - -To use the Spider API you need to download the [Spider SDK](https://pypi.org/project/spider-client/) and the crewai[tools] SDK too: +To use the Spider API you need to download the [Spider SDK](https://pypi.org/project/spider-client/) and the crewai[tools] SDK, too: ```python pip install spider-client 'crewai[tools]' ``` ## Example - -This example shows you how you can use the Spider tool to enable your agent to scrape and crawl websites. The data returned from the Spider API is already LLM-ready, so no need to do any cleaning there. +This example shows you how you can use the Spider tool to enable your agent to scrape and crawl websites. The data returned from the Spider API is LLM-ready. ```python from crewai_tools import SpiderTool -def main(): - spider_tool = SpiderTool() - - searcher = Agent( - role="Web Research Expert", - goal="Find related information from specific URL's", - backstory="An expert web researcher that uses the web extremely well", - tools=[spider_tool], - verbose=True, - ) +# To enable scraping any website it finds during its execution +spider_tool = SpiderTool(api_key='YOUR_API_KEY') - return_metadata = Task( - description="Scrape https://spider.cloud with a limit of 1 and enable metadata", - expected_output="Metadata and 10 word summary of spider.cloud", - agent=searcher - ) +# Initialize the tool with the website URL, so the agent can only scrape the content of the specified website +spider_tool = SpiderTool(website_url='https://www.example.com') - crew = Crew( - agents=[searcher], - tasks=[ - return_metadata, - ], - verbose=2 - ) - - crew.kickoff() - -if __name__ == "__main__": - main() +# Pass in custom parameters, see below for more details +spider_tool = SpiderTool( + website_url='https://www.example.com', + custom_params={"depth": 2, "anti_bot": True, "proxy_enabled": True} +) ``` ## Arguments - `api_key` (string, optional): Specifies Spider API key. If not specified, it looks for `SPIDER_API_KEY` in environment variables. -- `params` (object, optional): Optional parameters for the request. Defaults to `{"return_format": "markdown"}` to return the website's content in a format that fits LLMs better. +- `website_url` (string): The website URL. Will be used as a fallback if passed when the tool is initialized. +- `log_failures` (bool): Log scrape failures or fail silently. Defaults to `true`. +- `custom_params` (object, optional): Optional parameters for the request. + - `return_format` (string): The return format of the website's content. Defaults to `markdown`. - `request` (string): The request type to perform. Possible values are `http`, `chrome`, and `smart`. Use `smart` to perform an HTTP request by default until JavaScript rendering is needed for the HTML. - `limit` (int): The maximum number of pages allowed to crawl per website. Remove the value or set it to `0` to crawl all pages. - `depth` (int): The crawl limit for maximum depth. If `0`, no limit will be applied. - - `cache` (bool): Use HTTP caching for the crawl to speed up repeated runs. Default is `true`. - - `budget` (object): Object that has paths with a counter for limiting the amount of pages example `{"*":1}` for only crawling the root page. - `locale` (string): The locale to use for request, example `en-US`. - `cookies` (string): Add HTTP cookies to use for request. - `stealth` (bool): Use stealth mode for headless chrome request to help prevent being blocked. The default is `true` on chrome. - `headers` (object): Forward HTTP headers to use for all request. The object is expected to be a map of key value pairs. - - `metadata` (bool): Boolean to store metadata about the pages and content found. This could help improve AI interopt. Defaults to `false` unless you have the website already stored with the configuration enabled. - - `viewport` (object): Configure the viewport for chrome. Defaults to `800x600`. - - `encoding` (string): The type of encoding to use like `UTF-8`, `SHIFT_JIS`, or etc. + - `metadata` (bool): Boolean to store metadata about the pages and content found. Defaults to `false`. - `subdomains` (bool): Allow subdomains to be included. Default is `false`. - `user_agent` (string): Add a custom HTTP user agent to the request. By default this is set to a random agent. - - `store_data` (bool): Boolean to determine if storage should be used. If set this takes precedence over `storageless`. Defaults to `false`. - - `gpt_config` (object): Use AI to generate actions to perform during the crawl. You can pass an array for the `"prompt"` to chain steps. - - `fingerprint` (bool): Use advanced fingerprint for chrome. - - `storageless` (bool): Boolean to prevent storing any type of data for the request including storage and AI vectors embedding. Defaults to `false` unless you have the website already stored. - - `readability` (bool): Use [readability](https://github.com/mozilla/readability) to pre-process the content for reading. This may drastically improve the content for LLM usage. - `return_format` (string): The format to return the data in. Possible values are `markdown`, `raw`, `text`, and `html2text`. Use `raw` to return the default format of the page like HTML etc. - `proxy_enabled` (bool): Enable high performance premium proxies for the request to prevent being blocked at the network level. - - `query_selector` (string): The CSS query selector to use when extracting content from the markup. - - `full_resources` (bool): Crawl and download all the resources for a website. + - `css_extraction_map` (object): Use CSS or XPath selectors to scrape contents from the web page. Set the paths and the extraction object map to perform extractions per path or page. - `request_timeout` (int): The timeout to use for request. Timeouts can be from `5-60`. The default is `30` seconds. - - `run_in_background` (bool): Run the request in the background. Useful if storing data and wanting to trigger crawls to the dashboard. This has no effect if storageless is set. + - `return_headers` (bool): Return the HTTP response headers with the results. Defaults to `false`. + - `filter_output_main_only` (bool): Filter the nav, aside, and footer from the output. + - `headers` (object): Forward HTTP headers to use for all request. The object is expected to be a map of key value pairs. + +Learn other parameters that can be used: [https://spider.cloud/docs/api](https://spider.cloud/docs/api) + From 3795d7dd8eca55d8311bc776ff00dcea916500fb Mon Sep 17 00:00:00 2001 From: Gilbert Bagaoisan Date: Mon, 16 Dec 2024 22:19:46 -0800 Subject: [PATCH 185/391] Reversed order of url validation --- src/crewai_tools/tools/spider_tool/spider_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 74fee809d..970ac8d64 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -100,14 +100,14 @@ class SpiderTool(BaseTool): params = {} url = website_url or self.website_url - if not self._validate_url(url): - raise ValueError("Invalid URL format") - if not url: raise ValueError( "Website URL must be provided either during initialization or execution" ) + if not self._validate_url(url): + raise ValueError("Invalid URL format") + if mode not in ["scrape", "crawl"]: raise ValueError("Mode must be either 'scrape' or 'crawl'") From 059d635f02916bfec4c4bb62d7b3f4000569055b Mon Sep 17 00:00:00 2001 From: Ho Trong Hien <115549171+hienhayho@users.noreply.github.com> Date: Tue, 17 Dec 2024 22:28:41 +0700 Subject: [PATCH 186/391] fix: fix pydantic validation error - When passing result_as_answer=True, it will return ToolOutput so it won't pass pydantic validation as a string - Get content of ToolOutput before return --- src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py index 61a747956..ba2605816 100644 --- a/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py +++ b/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py @@ -18,6 +18,10 @@ class LlamaIndexTool(BaseTool): from llama_index.core.tools import BaseTool as LlamaBaseTool tool = cast(LlamaBaseTool, self.llama_index_tool) + + if self.result_as_answer: + return tool(*args, **kwargs).content + return tool(*args, **kwargs) @classmethod From 73b803ddc3604efc5975de6863c737d80a8723aa Mon Sep 17 00:00:00 2001 From: Gilbert Bagaoisan Date: Tue, 17 Dec 2024 20:53:17 -0800 Subject: [PATCH 187/391] various improvements for PR based on recommendations --- .../tools/spider_tool/spider_tool.py | 133 +++++++++++++----- 1 file changed, 99 insertions(+), 34 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 970ac8d64..40959612f 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -1,6 +1,6 @@ import logging from typing import Any, Dict, Literal, Optional, Type -from urllib.parse import urlparse +from urllib.parse import unquote, urlparse from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -20,12 +20,28 @@ class SpiderToolSchema(BaseModel): ) -class SpiderTool(BaseTool): - """Tool for scraping and crawling websites.""" +class SpiderToolConfig(BaseModel): + """Configuration settings for SpiderTool. + Contains all default values and constants used by SpiderTool. + Centralizes configuration management for easier maintenance. + """ + + # Crawling settings DEFAULT_CRAWL_LIMIT: int = 5 DEFAULT_RETURN_FORMAT: str = "markdown" + # Request parameters + DEFAULT_REQUEST_MODE: str = "smart" + FILTER_SVG: bool = True + + +class SpiderTool(BaseTool): + """Tool for scraping and crawling websites. + This tool provides functionality to either scrape a single webpage or crawl multiple + pages, returning content in a format suitable for LLM processing. + """ + name: str = "SpiderTool" description: str = ( "A tool to scrape or crawl a website and return LLM-ready content." @@ -36,6 +52,7 @@ class SpiderTool(BaseTool): api_key: Optional[str] = None spider: Any = None log_failures: bool = True + config: SpiderToolConfig = SpiderToolConfig() def __init__( self, @@ -79,16 +96,26 @@ class SpiderTool(BaseTool): raise RuntimeError(f"Failed to initialize Spider client: {str(e)}") def _validate_url(self, url: str) -> bool: - """Validate URL format. + """Validate URL format and security constraints. Args: - url (str): URL to validate. + url (str): URL to validate. Must be a properly formatted HTTP(S) URL + Returns: - bool: True if valid URL. + bool: True if URL is valid and meets security requirements, False otherwise. """ try: - result = urlparse(url) - return all([result.scheme, result.netloc]) + url = url.strip() + decoded_url = unquote(url) + + result = urlparse(decoded_url) + if not all([result.scheme, result.netloc]): + return False + + if result.scheme not in ["http", "https"]: + return False + + return True except Exception: return False @@ -96,42 +123,80 @@ class SpiderTool(BaseTool): self, website_url: str, mode: Literal["scrape", "crawl"] = "scrape", - ) -> str: - params = {} - url = website_url or self.website_url + ) -> Optional[str]: + """Execute the spider tool to scrape or crawl the specified website. - if not url: - raise ValueError( - "Website URL must be provided either during initialization or execution" - ) + Args: + website_url (str): The URL to process. Must be a valid HTTP(S) URL. + mode (Literal["scrape", "crawl"]): Operation mode. + - "scrape": Extract content from single page + - "crawl": Follow links and extract content from multiple pages - if not self._validate_url(url): - raise ValueError("Invalid URL format") + Returns: + Optional[str]: Extracted content in markdown format, or None if extraction fails + and log_failures is True. - if mode not in ["scrape", "crawl"]: - raise ValueError("Mode must be either 'scrape' or 'crawl'") - - params["request"] = "smart" - params["filter_output_svg"] = True - params["return_format"] = self.DEFAULT_RETURN_FORMAT - - if mode == "crawl": - params["limit"] = self.DEFAULT_CRAWL_LIMIT - - # Update params with custom params if provided. - # This will override any params passed by LLM. - if self.custom_params: - params.update(self.custom_params) + Raises: + ValueError: If URL is invalid or missing, or if mode is invalid. + ImportError: If spider-client package is not properly installed. + ConnectionError: If network connection fails while accessing the URL. + Exception: For other runtime errors. + """ try: + params = {} + url = website_url or self.website_url + + if not url: + raise ValueError( + "Website URL must be provided either during initialization or execution" + ) + + if not self._validate_url(url): + raise ValueError(f"Invalid URL format: {url}") + + if mode not in ["scrape", "crawl"]: + raise ValueError( + f"Invalid mode: {mode}. Must be either 'scrape' or 'crawl'" + ) + + params = { + "request": self.config.DEFAULT_REQUEST_MODE, + "filter_output_svg": self.config.FILTER_SVG, + "return_format": self.config.DEFAULT_RETURN_FORMAT, + } + + if mode == "crawl": + params["limit"] = self.config.DEFAULT_CRAWL_LIMIT + + if self.custom_params: + params.update(self.custom_params) + action = ( self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url ) return action(url=url, params=params) + except ValueError as ve: + if self.log_failures: + logger.error(f"Validation error for URL {url}: {str(ve)}") + return None + raise ve + + except ImportError as ie: + logger.error(f"Spider client import error: {str(ie)}") + raise ie + + except ConnectionError as ce: + if self.log_failures: + logger.error(f"Connection error while accessing {url}: {str(ce)}") + return None + raise ce + except Exception as e: if self.log_failures: - logger.error(f"Error fetching data from {url}, exception: {e}") + logger.error( + f"Unexpected error during {mode} operation on {url}: {str(e)}" + ) return None - else: - raise e + raise e From 1bbac87e70cfe2fb71a3d5a5a5ec2af13bebbdaf Mon Sep 17 00:00:00 2001 From: Gilbert Bagaoisan Date: Tue, 17 Dec 2024 20:54:07 -0800 Subject: [PATCH 188/391] =?UTF-8?q?Improved=20readme=20based=20on=20recomm?= =?UTF-8?q?endations=E2=80=94added=20more=20advanced=20usage=20examples?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/crewai_tools/tools/spider_tool/README.md | 34 ++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/spider_tool/README.md b/src/crewai_tools/tools/spider_tool/README.md index c2dc8826a..482c7c830 100644 --- a/src/crewai_tools/tools/spider_tool/README.md +++ b/src/crewai_tools/tools/spider_tool/README.md @@ -20,13 +20,43 @@ from crewai_tools import SpiderTool spider_tool = SpiderTool(api_key='YOUR_API_KEY') # Initialize the tool with the website URL, so the agent can only scrape the content of the specified website -spider_tool = SpiderTool(website_url='https://www.example.com') +spider_tool = SpiderTool(website_url='https://spider.cloud') # Pass in custom parameters, see below for more details spider_tool = SpiderTool( - website_url='https://www.example.com', + website_url='https://spider.cloud', custom_params={"depth": 2, "anti_bot": True, "proxy_enabled": True} ) + +# Advanced usage using css query selector to extract content +css_extraction_map = { + "/": [ # pass in path (main index in this case) + { + "name": "headers", # give it a name for this element + "selectors": [ + "h1" + ] + } + ] +} + +spider_tool = SpiderTool( + website_url='https://spider.cloud', + custom_params={"anti_bot": True, "proxy_enabled": True, "metadata": True, "css_extraction_map": css_extraction_map} +) + +### Response (extracted text will be in the metadata) +"css_extracted": { + "headers": [ + "The Web Crawler for AI Agents and LLMs!" + ] +} +``` +## Agent setup +```yaml +researcher: + role: > + You're a researcher that is tasked with researching a website and it's content (use crawl mode). The website is to crawl is: {website_url}. ``` ## Arguments From c070ba002c0d1f96087a53ed89a6963ba8d4b7ac Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Wed, 18 Dec 2024 14:34:40 +0100 Subject: [PATCH 189/391] feat: integration of scrapegraph APIs --- .../tools/scrapegraph_scrape_tool/README.md | 43 ++++++++++ .../scrapegraph_scrape_tool.py | 82 +++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 src/crewai_tools/tools/scrapegraph_scrape_tool/README.md create mode 100644 src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md new file mode 100644 index 000000000..76f385831 --- /dev/null +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md @@ -0,0 +1,43 @@ +# ScrapegraphScrapeTool + +## Description +A tool that leverages Scrapegraph AI's SmartScraper API to intelligently extract content from websites. This tool provides advanced web scraping capabilities with AI-powered content extraction, making it ideal for targeted data collection and content analysis tasks. + +## Installation +Install the required packages: +```shell +pip install 'crewai[tools]' +``` + +## Example +```python +from crewai_tools import ScrapegraphScrapeTool + +# Basic usage with API key +tool = ScrapegraphScrapeTool(api_key="your_api_key") +result = tool.run( + website_url="https://www.example.com", + user_prompt="Extract the main heading and summary" +) + +# Initialize with a fixed website URL +tool = ScrapegraphScrapeTool( + website_url="https://www.example.com", + api_key="your_api_key" +) +result = tool.run() + +# With custom prompt +tool = ScrapegraphScrapeTool( + api_key="your_api_key", + user_prompt="Extract all product prices and descriptions" +) +``` + +## Arguments +- `website_url`: The URL of the website to scrape (required if not set during initialization) +- `user_prompt`: Custom instructions for content extraction (optional) +- `api_key`: Your Scrapegraph API key (required, can be set via SCRAPEGRAPH_API_KEY environment variable) + +## Environment Variables +- `SCRAPEGRAPH_API_KEY`: Your Scrapegraph API key diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py new file mode 100644 index 000000000..058af4150 --- /dev/null +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -0,0 +1,82 @@ +import os +from typing import Any, Optional, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from scrapegraph_py import Client +from scrapegraph_py.logger import sgai_logger + + +class FixedScrapegraphScrapeToolSchema(BaseModel): + """Input for ScrapegraphScrapeTool when website_url is fixed.""" + + pass + + +class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): + """Input for ScrapegraphScrapeTool.""" + + website_url: str = Field(..., description="Mandatory website url to scrape") + user_prompt: str = Field( + default="Extract the main content of the webpage", + description="Prompt to guide the extraction of content", + ) + + +class ScrapegraphScrapeTool(BaseTool): + name: str = "Scrapegraph website scraper" + description: str = "A tool that uses Scrapegraph AI to intelligently scrape website content." + args_schema: Type[BaseModel] = ScrapegraphScrapeToolSchema + website_url: Optional[str] = None + user_prompt: Optional[str] = None + api_key: Optional[str] = None + + def __init__( + self, + website_url: Optional[str] = None, + user_prompt: Optional[str] = None, + api_key: Optional[str] = None, + **kwargs, + ): + super().__init__(**kwargs) + self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") + + if not self.api_key: + raise ValueError("Scrapegraph API key is required") + + if website_url is not None: + self.website_url = website_url + self.description = f"A tool that uses Scrapegraph AI to intelligently scrape {website_url}'s content." + self.args_schema = FixedScrapegraphScrapeToolSchema + + if user_prompt is not None: + self.user_prompt = user_prompt + + # Configure logging + sgai_logger.set_logging(level="INFO") + + def _run( + self, + **kwargs: Any, + ) -> Any: + website_url = kwargs.get("website_url", self.website_url) + user_prompt = kwargs.get("user_prompt", self.user_prompt) or "Extract the main content of the webpage" + + if not website_url: + raise ValueError("website_url is required") + + # Initialize the client + sgai_client = Client(api_key=self.api_key) + + try: + # Make the SmartScraper request + response = sgai_client.smartscraper( + website_url=website_url, + user_prompt=user_prompt, + ) + + # Return the result + return response["result"] + finally: + # Always close the client + sgai_client.close() From 7608944e7f0e60f597e39fc2f40fc93fe31c4e28 Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Wed, 18 Dec 2024 14:38:34 +0100 Subject: [PATCH 190/391] Update README.md --- src/crewai_tools/tools/scrapegraph_scrape_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md index 76f385831..03467faee 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md @@ -40,4 +40,4 @@ tool = ScrapegraphScrapeTool( - `api_key`: Your Scrapegraph API key (required, can be set via SCRAPEGRAPH_API_KEY environment variable) ## Environment Variables -- `SCRAPEGRAPH_API_KEY`: Your Scrapegraph API key +- `SCRAPEGRAPH_API_KEY`: Your Scrapegraph API key, you can buy it [here](https://scrapegraphai.com) From b58d80dcf9373099ecc1bbc2715b6d042e8396ca Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Wed, 18 Dec 2024 14:42:37 +0100 Subject: [PATCH 191/391] update documents according to suggestions --- .../tools/scrapegraph_scrape_tool/README.md | 45 +++++++++++- .../scrapegraph_scrape_tool.py | 73 ++++++++++++++++++- 2 files changed, 112 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md index 03467faee..e006c0ff9 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/README.md @@ -9,7 +9,9 @@ Install the required packages: pip install 'crewai[tools]' ``` -## Example +## Example Usage + +### Basic Usage ```python from crewai_tools import ScrapegraphScrapeTool @@ -19,19 +21,40 @@ result = tool.run( website_url="https://www.example.com", user_prompt="Extract the main heading and summary" ) +``` +### Fixed Website URL +```python # Initialize with a fixed website URL tool = ScrapegraphScrapeTool( website_url="https://www.example.com", api_key="your_api_key" ) result = tool.run() +``` +### Custom Prompt +```python # With custom prompt tool = ScrapegraphScrapeTool( api_key="your_api_key", user_prompt="Extract all product prices and descriptions" ) +result = tool.run(website_url="https://www.example.com") +``` + +### Error Handling +```python +try: + tool = ScrapegraphScrapeTool(api_key="your_api_key") + result = tool.run( + website_url="https://www.example.com", + user_prompt="Extract the main heading" + ) +except ValueError as e: + print(f"Configuration error: {e}") # Handles invalid URLs or missing API keys +except RuntimeError as e: + print(f"Scraping error: {e}") # Handles API or network errors ``` ## Arguments @@ -40,4 +63,22 @@ tool = ScrapegraphScrapeTool( - `api_key`: Your Scrapegraph API key (required, can be set via SCRAPEGRAPH_API_KEY environment variable) ## Environment Variables -- `SCRAPEGRAPH_API_KEY`: Your Scrapegraph API key, you can buy it [here](https://scrapegraphai.com) +- `SCRAPEGRAPH_API_KEY`: Your Scrapegraph API key, you can obtain one [here](https://scrapegraphai.com) + +## Rate Limiting +The Scrapegraph API has rate limits that vary based on your subscription plan. Consider the following best practices: +- Implement appropriate delays between requests when processing multiple URLs +- Handle rate limit errors gracefully in your application +- Check your API plan limits on the Scrapegraph dashboard + +## Error Handling +The tool may raise the following exceptions: +- `ValueError`: When API key is missing or URL format is invalid +- `RuntimeError`: When scraping operation fails (network issues, API errors) +- `RateLimitError`: When API rate limits are exceeded + +## Best Practices +1. Always validate URLs before making requests +2. Implement proper error handling as shown in examples +3. Consider caching results for frequently accessed pages +4. Monitor your API usage through the Scrapegraph dashboard diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 058af4150..906bf6376 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -1,15 +1,25 @@ import os from typing import Any, Optional, Type +from urllib.parse import urlparse from crewai.tools import BaseTool -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, validator from scrapegraph_py import Client from scrapegraph_py.logger import sgai_logger +class ScrapegraphError(Exception): + """Base exception for Scrapegraph-related errors""" + pass + + +class RateLimitError(ScrapegraphError): + """Raised when API rate limits are exceeded""" + pass + + class FixedScrapegraphScrapeToolSchema(BaseModel): """Input for ScrapegraphScrapeTool when website_url is fixed.""" - pass @@ -22,8 +32,28 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): description="Prompt to guide the extraction of content", ) + @validator('website_url') + def validate_url(cls, v): + """Validate URL format""" + try: + result = urlparse(v) + if not all([result.scheme, result.netloc]): + raise ValueError + return v + except Exception: + raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + class ScrapegraphScrapeTool(BaseTool): + """ + A tool that uses Scrapegraph AI to intelligently scrape website content. + + Raises: + ValueError: If API key is missing or URL format is invalid + RateLimitError: If API rate limits are exceeded + RuntimeError: If scraping operation fails + """ + name: str = "Scrapegraph website scraper" description: str = "A tool that uses Scrapegraph AI to intelligently scrape website content." args_schema: Type[BaseModel] = ScrapegraphScrapeToolSchema @@ -45,6 +75,7 @@ class ScrapegraphScrapeTool(BaseTool): raise ValueError("Scrapegraph API key is required") if website_url is not None: + self._validate_url(website_url) self.website_url = website_url self.description = f"A tool that uses Scrapegraph AI to intelligently scrape {website_url}'s content." self.args_schema = FixedScrapegraphScrapeToolSchema @@ -55,6 +86,32 @@ class ScrapegraphScrapeTool(BaseTool): # Configure logging sgai_logger.set_logging(level="INFO") + @staticmethod + def _validate_url(url: str) -> None: + """Validate URL format""" + try: + result = urlparse(url) + if not all([result.scheme, result.netloc]): + raise ValueError + except Exception: + raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + + def _handle_api_response(self, response: dict) -> str: + """Handle and validate API response""" + if not response: + raise RuntimeError("Empty response from Scrapegraph API") + + if "error" in response: + error_msg = response.get("error", {}).get("message", "Unknown error") + if "rate limit" in error_msg.lower(): + raise RateLimitError(f"Rate limit exceeded: {error_msg}") + raise RuntimeError(f"API error: {error_msg}") + + if "result" not in response: + raise RuntimeError("Invalid response format from Scrapegraph API") + + return response["result"] + def _run( self, **kwargs: Any, @@ -65,6 +122,9 @@ class ScrapegraphScrapeTool(BaseTool): if not website_url: raise ValueError("website_url is required") + # Validate URL format + self._validate_url(website_url) + # Initialize the client sgai_client = Client(api_key=self.api_key) @@ -75,8 +135,13 @@ class ScrapegraphScrapeTool(BaseTool): user_prompt=user_prompt, ) - # Return the result - return response["result"] + # Handle and validate the response + return self._handle_api_response(response) + + except RateLimitError: + raise # Re-raise rate limit errors + except Exception as e: + raise RuntimeError(f"Scraping failed: {str(e)}") finally: # Always close the client sgai_client.close() From 8d8c3677ff372bca8b9d92fcd25d477f7956843f Mon Sep 17 00:00:00 2001 From: Pedro Pereira Date: Wed, 18 Dec 2024 18:23:18 +0100 Subject: [PATCH 192/391] feat: add optional return_html flag to SeleniumScrapingTool --- .../selenium_scraping_tool.py | 45 +++++++++++++++---- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 47910f35b..5f7d9391b 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -11,8 +11,6 @@ from selenium.webdriver.common.by import By class FixedSeleniumScrapingToolSchema(BaseModel): """Input for SeleniumScrapingTool.""" - pass - class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): """Input for SeleniumScrapingTool.""" @@ -33,6 +31,7 @@ class SeleniumScrapingTool(BaseTool): cookie: Optional[dict] = None wait_time: Optional[int] = 3 css_element: Optional[str] = None + return_html: Optional[bool] = False def __init__( self, @@ -63,18 +62,46 @@ class SeleniumScrapingTool(BaseTool): ) -> Any: website_url = kwargs.get("website_url", self.website_url) css_element = kwargs.get("css_element", self.css_element) + return_html = kwargs.get("return_html", self.return_html) driver = self._create_driver(website_url, self.cookie, self.wait_time) - content = [] - if css_element is None or css_element.strip() == "": - body_text = driver.find_element(By.TAG_NAME, "body").text - content.append(body_text) - else: - for element in driver.find_elements(By.CSS_SELECTOR, css_element): - content.append(element.text) + content = self._get_content(driver, css_element, return_html) driver.close() + return "\n".join(content) + def _get_content(self, driver, css_element, return_html): + content = [] + + if self._is_css_element_empty(css_element): + content.append(self._get_body_content(driver, return_html)) + else: + content.extend(self._get_elements_content(driver, css_element, return_html)) + + return content + + def _is_css_element_empty(self, css_element): + return css_element is None or css_element.strip() == "" + + def _get_body_content(self, driver, return_html): + body_element = driver.find_element(By.TAG_NAME, "body") + + return ( + body_element.get_attribute("outerHTML") + if return_html + else body_element.text + ) + + def _get_elements_content(self, driver, css_element, return_html): + elements_content = [] + + for element in driver.find_elements(By.CSS_SELECTOR, css_element): + elements_content.append( + element.get_attribute("outerHTML") if return_html else element.text + ) + + return elements_content + def _create_driver(self, url, cookie, wait_time): options = Options() options.add_argument("--headless") From 4c5f1962ace1f5ad4fe628f7cb2a33cf19753783 Mon Sep 17 00:00:00 2001 From: juliette_sivan Date: Thu, 19 Dec 2024 14:07:36 +0100 Subject: [PATCH 193/391] add linkup tool --- src/crewai_tools/tools/linkup/README.md | 98 ++++++++++++++++++ src/crewai_tools/tools/linkup/assets/icon.png | Bin 0 -> 32966 bytes .../tools/linkup/linkup_search_tool.py | 36 +++++++ 3 files changed, 134 insertions(+) create mode 100644 src/crewai_tools/tools/linkup/README.md create mode 100644 src/crewai_tools/tools/linkup/assets/icon.png create mode 100644 src/crewai_tools/tools/linkup/linkup_search_tool.py diff --git a/src/crewai_tools/tools/linkup/README.md b/src/crewai_tools/tools/linkup/README.md new file mode 100644 index 000000000..c51946a11 --- /dev/null +++ b/src/crewai_tools/tools/linkup/README.md @@ -0,0 +1,98 @@ +# Linkup Search Tool + +## Description + +The `LinkupSearchTool` is a tool designed for integration with the CrewAI framework. It provides the ability to query the Linkup API for contextual information and retrieve structured results. This tool is ideal for enriching workflows with up-to-date and reliable information from Linkup. + +--- + +## Features + +- Perform API queries to the Linkup platform using customizable parameters (`query`, `depth`, `output_type`). +- Gracefully handles API errors and provides structured feedback. +- Returns well-structured results for seamless integration into CrewAI processes. + +--- + +## Installation + +### Prerequisites + +- Linkup API Key + +### Steps + +1. ```shell + pip install 'crewai[tools]' + ``` + +2. Create a `.env` file in your project root and add your Linkup API Key: + ```plaintext + LINKUP_API_KEY=your_linkup_api_key + ``` + +--- + +## Usage + +### Basic Example + +Here is how to use the `LinkupSearchTool` in a CrewAI project: + +1. **Import and Initialize**: + ```python + from tools.linkup_tools import LinkupSearchTool + import os + from dotenv import load_dotenv + + load_dotenv() + + linkup_tool = LinkupSearchTool(api_key=os.getenv("LINKUP_API_KEY")) + ``` + +2. **Set Up an Agent and Task**: + ```python + from crewai import Agent, Task, Crew + + # Define the agent + research_agent = Agent( + role="Information Researcher", + goal="Fetch relevant results from Linkup.", + backstory="An expert in online information retrieval...", + tools=[linkup_tool], + verbose=True + ) + + # Define the task + search_task = Task( + expected_output="A detailed list of Nobel Prize-winning women in physics with their achievements.", + description="Search for women who have won the Nobel Prize in Physics.", + agent=research_agent + ) + + # Create and run the crew + crew = Crew( + agents=[research_agent], + tasks=[search_task] + ) + + result = crew.kickoff() + print(result) + ``` + +### Advanced Configuration + +You can customize the parameters for the `LinkupSearchTool`: + +- `query`: The search term or phrase. +- `depth`: The search depth (`"standard"` by default). +- `output_type`: The type of output (`"searchResults"` by default). + +Example: +```python +response = linkup_tool._run( + query="Women Nobel Prize Physics", + depth="standard", + output_type="searchResults" +) +``` \ No newline at end of file diff --git a/src/crewai_tools/tools/linkup/assets/icon.png b/src/crewai_tools/tools/linkup/assets/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..4848d4c6b19b1da998326dbd6a3efdf3671a48a3 GIT binary patch literal 32966 zcmX_HbwHEt*WL!BJ4Q-3qa-C2DFs15fg!PtR9aGUq%=x7Bn1VO8Xy9KFj}Pql#m*r zfKmgbVf*&Jzt``N#Xt9R-zV;Ku5+F9B;7VOq^IGg0RRB>H;nWw0045*UmzJ31?i`% zJ}4Og5CGiJyLKm}V6!mz9D4T=aj|VNXeZvdRq2<6(1&C&Ik*qi-2EKO(dgIO$hnLop7P7v-E^>WlE-rS5_VXL}3+D@GNT2zOcE`F$Ticu2 zii&O{S4RDdEUx`|eiIMNfK<>70eC>hWk1KTWvCBji}%Q+OE1JxGG)XFqDORF8Bsi% z7RBwWU;7aH2(ZPTj(L%#TyS1>tHdJ)2hBX#RNu=`oz(mjDSqj)g*Q zjDkv5r-m<;uK-3`#mNjgtDg3m%qYtAK&<$NloDSZgJzs0nJIV#;7wjjwi@dt_T&}n zzmufWSpe7`)6RIWc2Z*$RobGc@Xl~)laI9qvqKhwU#M*8L`UsC;DFK9@E+KlTu&m>&%AQjIalNkas`r^({)`?PSps2h`Czk?nE|cR_ z1a=4Ih?oQ(>CMVw)X*>fTTi9%a7d(spX{8C_RU(%uBOV$P9x#P7rVn(5NIHr-d7AS zw!BG}H6{ek{JY~*S8g?yW6|80QMZ1MMDW_nt6J_|9hNRwyFV8Q94{^n>pB);^ZlDW z+y6{OC3sWM{nW9KoVW;LWTO4F4=wt|2qxMVKXZ@(K}tQsIceFBZB3Xh(zIRZ1^#Cl zCZxzT<1yHmV>dPGIc||LS|{aKq{YeYw-z`-oD%f#8kZB#zkcw~S1TIO0h$|J=89t6 zvwr(!QM#B=I%=M>J(z@r7%~5S;wydE+2PknAVHgaldYzz!t>w^V!;UFd_(Iw8Mg%!1|-; zPkrt0!VPgd(OC-M9=nu>L5*|tylIl;<)!|{Ga`Sr2M=+MLLEepX3CNW3m=af4789P z3*#BlGupk1#~&c+7u?cat&?O7zz$kFN=+l)A&jLy%8eF((Mb+X8Cl~Z`+oYxt_;89Wa^=$0{Q8%a&mX`u(P%-D>2PO|;pNt$_Ea6w9|KwK@GOH1T` zSBy#6R3h9IDA#R5Xk#bzMEGE1mbBsxUg)&aaa)60e6`6u8ND$Lo| z8v9?r2~DPbVH|CSjKu2?pE`oW6Q+ARbMTH3QB)Rm;uIT(;X1!!LM?C1t13{1*I}$q zI@I9v*8gepX>pEI0E$B_B{u|2cQkygk0)OK51vn0%3PssJt&>z<5&}lJpFI z8QLzlTr|L=Y)7Z7noi7p$mp-*=mzLpd|Y7NKML;ps?!Lioi;gqzJ4}ugTv0ptcZ`Q zdeQ&}230RzL@MD0hYEBGW{O`mu?TowVVqV~>&OQD73RGd8!`hR`>HLEsGi%ui6b)^OKKG^uE$~3uG|huO&*Q9jNDw zS5gl&XM~AiBIm3aK-66*k{}}19wc;3Qi(vC)aogo9T|UfUFzvKZrn8-JKfQTzk$LW zw2+jy*~YXJ^I&o&O7$d4P-TBBKz&6=iUuHeMq!iOsc|Et;>lfV{8Q~*`~NOyFO@l1 z`}1<;pCva%QIs?d1VscNzVLg;pWuj+dma&O3FBoHsA7RL9NTr$%KY_SwcV0H`2CYr zcexNTw<-^G*wstNmsK_#U!9~9)mRsNFkuIQkOAPwGV)`!wEJxFfA3>?JH+Oe%8SOl z8RB4>Q`7z~he8$_N3cRhYuB-qq)P(3>B#?@gZztxNC#U8YV-B4%x+?hO=#x>Ur$Lu zgJoZ~m?2s7Q+sNr_YT$I1$JJuBiMh^^cVQ%hJZqVzWibVuJn2w-u^Neoix|2NIufS zm7A#R(QQeV;CF)zOz#=y9>Wmp+O+>EAFzDBYY33O?=`t-0=!LWN*3f?kCCKBjrhJA zD#GR{jqEjt@$tnXWPIt~ul+rue6%k=P6$Gq2de>$kn>%C?<)zux^{dq!>U$1k_`w( ze!i3#tMLEHj4U(eC8G#soV*0Ar~X>^_rcliNdm+3Q{<=U z+=o8?b#J7eUvX41c!BEp%CGLI%k1_N=Vwt-ezXAmVF+GA1B%jLoqD)YKPl%`%RN5%cj``Vz(3wB!T(AU|C*}Y=lFA6GVF^p8u%X{8zq)pv!GCoj+)s|DeNK7c(KN@P zDXDI1;eIEZNFZ6(dfdHhOe*NQ`@G*3%-sNJv((>fp&%5VZGh-3B(_LXLQrL8P9nqo zIjcUtCi=zvr5F1qXFHVBiqEf(8vi|7!Ws)@%wXeJ`QlwyBcObUD=O>b>gT8DMB(01 zNfYKgN-dxNIZf|J%b@6r80ts&$f7{+)^VJUpoN$dbjn@>7|#PQR+;^Fzq)w?C5*wP zd(SN0j0sYzA|7OWWp0R-x>!TouI2QHfQuX+s`zd0e=jimZ~IW)8Fm0G+BX2?OLsCD zCWQy#ih57+YAhWTgCgYxui?LdQDmOnHO7PExKU}gp*nJgff&tDC`w86s_5>oGx6qq zG-!iNgDeb~(VfjY7(E|NALI11UI4~%Da_^0b{onVbr%a8Qx17#_qs$~>VL-Pq+LJ_ zm#!*bID$6|A8O_uKYJ#HXW4w@_KAk+J$ZqiCqd_$Cp}Jqzgsl?&=PwY&WLNma-fQl z*MzfviQWT?dFIA66o%bwF{7_)76%f^HJPWGzFGdKCr*rpuT0^|MJKv1CM*1WaFhkN zSid$66}$SpS zUA!?k8gUXx-0in_6vKond|SGF!>%qx@5L2cqC&o zo=fd(B#pU4_+*r%%9Vyk&jzZTJ|8$bU8F8Vk@RM-z#7q!8Zz0O14fO09nmiq2X5#O ze4!(*V|09le<>1Rt{acN40LuZda^GmL*I3M!dfO$Moz42ajI$(;wdMf$dhp2@TNLl zI!f=FM(I@*tBNuD+N_^~Casl6j#PpP*_ZRE>w3bEGK9BnY;)>jhP_Qe*1%#A-?hSV z30%yE1&#%GecJQ7xbA?!@9PdrZ+N88d(|=2k#Yc2@`XzmIvF2?L*@D@qb84-2U^#I z9+gD(ap#Rvprqp+Q=nbVlIOLKqp#IBvay&eL2e5_!t$cT2 zzx{`o1Ft1SR1*85{p=w%M)vJv2=#qQ@y1L@V<~HR=a&4dPm$9afjWn^A#-F3d?x00 zw~g{nX%yPyt^u?X9V>PMrpsG4Na!?Cu+)Txl#QC5KsR%*Rr&Z!t)AHdCvVGMGk+`g zNAz%19j}J_iB&|+NB@j~WbbR!=FpoJGNM(`H`|f;-wtnJ1YOgGFTpe0FdP30ft;` z09FzTl`dO^#TU@v6>+J(>I#}IB@s%HGlUOZc#pPCpLry8j@~Y=Pr!z{AReq^%j)It z^QSepA08_hRkCZV+1SOlx*#9_YG;-@WinF2_2P+2&-kYc{N5&AIJFJ`F<*Vmi<=65 zRR86&>sLve-UImvcYX<0#v>s4O-T<|6n||viT(EJ5@It`Y;l8;$@VTA*#QdDz_;(E zfGY~b(+lRPU$}JjBoVC))-DuW1u=ALZ}9;EnXjI znfzrt&hGq?V{&}PYarckev#XNG{k6XT;bo^~A(Wpsz3%XC;u3ao z>{8O%Q|He7;MPJldU)RD8`PxI7$hd&<-vZUUd-()#vRT;_M!v>3K3Qb!O#5hepa1a zW*acVBi}c6*cF$}Q#7exF(f{sN_)JwonT(L*7GL?)t`_KnxZbc6r2#?ZBda(xRycEC=Zyyil zptBUp`9O&CR=fSD{lZ)ZX#9a{4p`C6RIb=MnV6_ zL$*(LsgxD~lB-Ct-$M|-JQ{vZl>WXL-`qg28&6-WO6fpvXS^S+*L)LIT?<;;GdHQ^ z>knm}JedLpkA=uJFf6+&xL_ZX&=luRy!(inG$0pv2H!>0!u>QFC{p%!AyC-_a!!TM zM!0EV& z4Io?jHCl&^r)-AUppbSlMTk3wv9mu42l7LD z^vpgb=9n|G`1^-@FLYOnxg46Q>d}7Y05vnPx0ttF0|l48Ksw`|2Ao#pOX8R=%|GSI zE4a=wgMMmtF_oV)vDIA?*}oV+{)kipIYU#h zu3abM@}ekO!85qx*?w33_iwM8unRJ^ew5~7~=zMqj>^ALH%FNB5}N_!>bA9%fssVSCC6T^kXHe0oAd=(MTr~&Q8uS z*?ETd5sOW|H{;YH^8`!mM5(_SLbvm5Q~ctn0#msd_zshwE>MNVX>+QcUT+ zng@zBQhCwa3$%?PTvMO{2CB_rw@lI-L zM>`pPAhLv9s;!ZWs{yqLaXK0**B5?@De?KPJ9*MTZO$z@Y1EP{FB^azx~)H*5HJfl z343@uN>)FyNEl3F%o3ybY@l12jN{+wC+#F2Nj^{G61hj_NWNX-SF&krg=qWsP#BWv z?S@>8Y}Pa?|7|=8CTdZ)3rjfWUjBx1WT74DNtFh^L686F>xC$76^*HHMzm3enYn=F z=TaL$*u`%@ZFn;@X*z>aXbpCLjlQB9^M45-A(;BaNRec0^ON(~V$*|q{}DbmfdE9@g?IYa#aM6ks~ohVj=e7YAz-D}9Hc8+x5$*3Puy4gvWP+kgs zcOmf)A?KW|=soj)Hb(wfe+I(+MBo1PmZ$m_*&4_V-krKGgU|}0BvE@)X#U8YzL*x; zUogt~qWm+4p|3>h=~SXj?`mS?(94{wkZ_{9YrQXj+CllZ`Zrih3@2re_PUwFS|HZU zo$bl^w*K*=?3fTOz})o>dXFL5+T!&nGYLfZejJwuUsV&BSgx1d1xOS@cg|8xx zSAcQbjC>L@gNn4p164^ov24K7)aq{JV>Xx$DHf8X)J=R^#t#9ElD!k=t8C)7I~f`* zj-GgmACp@kx1-U_F;F}fI`@d8+N5k}%+E6$ufg&2xp6lt9Rq#kQe-&B=cwll39rqs z7$Oek?tsq0$9%hiO<_gKIC=Q>KZK;cJ$cC62KV3SWmq5yEt1T6Lrw-DrhwQx>6M)S zPR%cixnN>7YTOi7ck_X|ombTJ?&;SvG>V0Q@VYqLWkAf0WM20+jV}J^to!`uvd;HB zSKCbMkC0DVVpeG1TE&UXKn6bw=*{5Yx3RQHop!v#u2`cMIYKo5mCa(z7>w1#gP`4^ zR)WU2vzsXF8<-eHN%{T$U|NY71E?jhEr}4o`ku^+%~C5E%{93I)ih2!USd~9k*}}) zOQ;xRI!FP;P@O##Zm?UQv=V1e{+Jr^1_yy78NwNCI#DYpjUCL#A{aS5Eye!1-LBr9 z1V`3^%kE8#Xcl-B&d6Z!_sf_(W1@I{+jYfmw;;|kCu%_hO2%VY-@;C*+lCIKrVU=< z->q(5mm?(`_$I#$Ebgi*eH0bXR~dP3$(hf(V4HLor(%Z>N$Zs45n!CQ&~Tj&WJlg!94@KD(;?;;rSLkX9?ex_@7Qzi0_eDQW2U@@s<6ruZ2 zyt`AU>XAuhI6Gb)`f8CaP4|a@>$rW3Jr;SRCC^t{YP#(uSYETuO zS%!q(3{`A!B;zql;XR&V7HPJHbQg(#u0|XuEc6AS(&;s^=thv>f?Jwd&v~6VSDO2# z3+}|`YC8wwXwvQDKKaKg1ldpJv7Pf{hUYlf5C?`jgsMp1-n`d&(ivPM(K|3hl z#gro4qp9FlK~zV6Nx(|#sIxVS9<@^cHB1@D?1{BAhQf-5dBy1JKph6Wg4$eavR+(l zMFWv?+>K0c-*3YxYgHa#rm(N#`4!pehg^*76>xgZmi~+L6LL%00qi&c?#d6I+co5o+!04JhGSel z*l-yC^)mT2v2B=J>m2)vAXI)*wZ7!u_L>fdm-VFE&nfljZ#8Z}p5~c>98tI|bg?@w z)*PYi%Pcm1=5IWigg_a{$Z z=syOd8UyY)*S7|0Snd}iV&6(S6N)*CXBGMBAzw<`0x^uf@_y+j&^f9CJ*OeS)52#?V%wA~`6rLN3u)iND&g91 zDUphijNpy6hrAUgA%&;hxipNnjK>#6!w48d(69TyPE#ngjNh-lwE%gJKYmDU&c?F- zZ)mZ-oee698P?=a7%o^Cyo*E`!QIyZmr#3r07t0!`#*7=DEC0*yrm~a3 z_ivltpVvQ$%P)Y#T+U zk&XGa6(_YR1`MQ*WYH}r`Qk5gIvHT>lzekq%JWUt%@`eo7UpGS?kGwHbM&N*oQHD;`7l-*~%vfb~&`{(5Ux zFk-Tki*Noy4$ZQp)&L&h39`Ggr_}&{{wGv31AN`?dt2#2P8dpOY~g4BRE7l|`M(Di z?0;U$2FL&r5<4hAn9`SAB~gHS7sGdsb)qzbW3W(JOAt+_|_?o%5b zD?83`8rnkij$d4R2ESQr-j%}zD_W%Z08RYgK})09NMRGy=A-~>kSUA>qw`@bf%>%nDJiqEvQ5UZl9rfuC{Fi2d98>`dBf# zPEt=~kepOm{)48}o|sfP+^_G*a;cQiMLz;z3N|?$2TG^w5a;AytcI9q3y3t~Jo^`Q z`owr9Swn?ElUJOpINQzcpvdH2C0^Tf^W*1*%iD*e3u?>)MbO01_N*~P8Be>6rXlV3eZsTkr&raH zu`=YyeCge%3lDVsB#86X<15=T*q$+MMpjC; z;P#9?vz_9bH=1ckT}|Pn*jFw~47dQ>H9>xHSNZ6NQMexLivYJgg(T``qA9QQ7^UG# zWM@xkC=kNt*F$%9qWzdm8lw)4V-hN~BEwig+}SLmn08jfgL7qFY(-?*cRn&+m6frsxau2RZ)s-t8LH^reCn_)IaRXWi*&r}pQ! zNuy7mn{uRozjU-A2<`8dR@`J|a4n90WzK6Q4fsZ;AQksK=wof1X(li9tL0=)bY4t) zRzcG`czAnp727er`Pm8M@tv8+w}v7Yz~dBQJ>6VGv7JP4q_>Nw$I7NnI$o*?*6-2t zC0y1vHka*wC+q>O5?T)wSatO>_^$DkS6poI+1n$xxy9GsiudDb#;jKUTw7d2{0`GS zBtJ~f{ZOBMzyruf)^5x*Q8?E*C8OFDM7!={^=$<%`^y$N_I&aR9cn9^d*?oPY7Gydj6m$K9-#4N!aFVJP~Ya7Gsl zFm7E)G=IUd8t8Jz)C}IOyPzVTC)s~i=&KA{dGGIMs3hlZ z$q&Z#C=8;)qG{_pcG-sJrSHUu49e(T@?cy5x6?GX_KGVs`l~PcF^#~%@4wmd7tgGzSpDD#COQGsS2#ZncH0Xp;^-u&X(ND zE7x$MY|j$ZB~hI$gOg3FlRG9aHz(x63qC}=-OM;h2}b4)I1W^U!|!o6M6Sb>DDHf0 zI0t!{yvg>@edS0Brw7_pG|zr$(Z=$|6YJ8nbuy?iOpDgkAu}hJPd0Y_DJ;g3mGFA8 z2fdLu?+1=dEnYz*lVyDydyg_Zy(?staU`ZIQdVVv;?{!pI}fv2nfy$-m7ASvfR>`| zq8DwkTNd!vbH1(QVc)r{bE@$Xxy4aMeGxF`gT@)fDDsvZ8>X8SP}y03S? z4?6DRy5_J)zk4)&`CaJ?AnO5Nj!tgaXngm|kD$c(ce%`RT9(?U9BAktN`J8B>t%be z2;{TJUwqV8O*=wxHPcF;O<^7@jd+5>X=%r&O!KW6J1UgPNeGglC80k}mE?I|aDKEQ z0l&}qd7RPJD8pN-SAMx(O?SW7``P7XuP_Jj*bxq49ld~ap^@T_zQ zUxn8F4Z;cA+}81#7#w-%S-Rwc&B2<S5T4IFyEt9;kOZVMyY0SL z-<)x^eg9%{YF`3Jj$=g2D>4@--uh^k`}*1ET)f?jrX?ka@6i-mZdEaVQ*kr!nsY_^ z!Q&`^1Ak5aOI4>gS8yS?;&q>ko9PPnRQaj7Nf6`aYug5eH}QF%M-6|J3(L>8IoNFn zKE8Pe3+?hoE1yu?2+SZ}#Yk2bQ=U`%i_auzzoReIE*XGF(!=omYt5+@SL$_!44nv; zvgdAjkBn3xw4}Kn|L8?ZVm!Cu(~LHG#ad0dc?)$~6pSJqA|x#xc9%&|Z-DNmQ6b17 zu~oPo-49E#>PU@!i!rj)-b|(!GV2o3JGq_a#Vte(uhYzZ>TE z`BCVt=N86+ZEBe@5qzx=IE$`7ug+{DO4`0SJ+j%Xb>j#bx-1`kP+^Vm>J__CyDv_I zl2SPEs>UCV$=9qek{$AWLnkRiX`Z3i_HbI}JwQS$GtB#XG#gwgur*KD4w z6pbYTEPIC5_ap$n(Z%o?O3Dv7fuU1Gr)GMXc^_Ai0CyH}59)w#Z+B)He%cy~yLV2I66ku!ZwWj`527MTPit4<``yV;dTuigYu zQ(Jk*hu=|x-^E1os!!77KL5>#wd#8BoNeYlub@fQ&b-eP+s6fjYi_&AwNj=$p-bzk zdT!oTnz23K$a0WCfCcc=)Z#BkCL2=@U#Vwc9z*=rBajMIj%+a|X?7> zlg_}MWJ`5Y2mx(mG4y8x>O`!pj_v}PiZ-t}n?oqrX6(5F7{`XIp2vP7WNZk-lM+J_%YH)8twW}aZmpnztTOXHQ-41 zmKHRH$px8BPB8UbB1(9c2b6&_?1PQ~j0{w(9mfU`Y2aro*RkiY2*CmN?z<(mfzEl;*Z@yz_~swaa|rgjuXGcOu+-?l30s=y_uX}KuifQTYAz1(#d6~* zs?8#4{p`wgP2}~LzSb*DtHZ@{9@q)@a0gP=4Jq}n>1CrMUe{LD6^uJj@W(#I4oEm0 z*Rj&>G2ILOxn=#lmE!O;!mJ;R?nCC80Ej3su6&TehKBMl{xwMAt z_*Q0@r;~i&N&QufJ}^lp(xgW%E)Q+3i9 z6`|lBp*V>Ghqp4a=Vr)`XJ*~5W*YQr`T8yInR&wJuQ~+@?3~Rh`ES5s{qR{U>v6|e z@ah5A%Z7JHD|*MglgRGgqa=3NQqVlh&HwO zr>=p+pd}zq6L%9jeKd?79P#|)_}rt(EK{Z{8<1rVYo~Q_(q40fJLxcHYP{La%iOU#4*Obr_fN6<>S4Iv zwSkAQeF%;cJF1|+FN_|4ls>ZI3+7oPLb8atPJY-!$|4?_Pmzt*5m{}IV_)%npg0bG zC?&Wr1!!0`4czMOoA&IkROU36_s^MXo|Ol45Ulmjh%x=^-DT*Q2R4;XG$_sJn8_ZB zSB_vzg$9O$<(Nmn!;P9a2o8ltjL6-d{oI|C*!cf=VivOPUABV4~PL-o!^5Re4v zdcwXwwv04NqhRN%nEQbzz4w$TB>XwW^JQ91IqC>OMx<7!#eZWchMl?LT&8hRbK|-X z?7oNf|Kh8|#u0C|X^sfnOg`VLzwvG2J{uvTeRl9DP-b%KQU>ge7FbP2ElO^s?~c52 zs~Pg)`+L_ys-Ks?-DvMN__OP5320e8xH&Y1{OY^ZX}RB!g7Kv3EHl5(R{oqR_C-S8 zxE-l5mCEJw=jU!paVbkyW_U7e@4`UpY;!7S?hsULe*CN+JIn%|bK~;SFam9Tmvv@P zcZA0GtxlT!oJIQlybn)p5Vw^AO>XSbz#sGRI{qk2mdw}IE4W$hzO>k=^ntm`?S*j_ zQ>kJ)MNy&C+3je%p1AW-L_OI8#hk&WMQ*|RZ;qx)x;4OKcx@oZ6Rm?Fhi;Eqkbjh| zoGrhrwvlKGs3iRr!yt<$w4kelTychneV@o>GWf?py)!pOVgHFV>%f<4B93}Ex>l@M zl4#Z2t(@&|1?R$W%&=R#9llf3O$64n=XMA1;BF~VoDYr%eXHlnC`J-829V{1B?Wcn zKl{rALDD^7oSq5S8O+nQay{}}-F+WVA*^Lr(n40XF%KDOZZd1Sd|mxW!1Z9)1=(k@ zz9JpxK1#D`f1!k@hX#i7bxkjBDPtvV$}^gs{9MH>-bweUo!_Z0@aaK5IG_m*3l&ni zVJ{r?)8l3vC*TD!Di?r~)w`=8C{>rC56kQF{(WzKu6PAGWVq$Z%`V467>u6!#tosw zU2FJua>b-&A@-q?+OSRX@c6a;Q^@HThp~MYYQfW6;N!qQS~H9;=9AV6(yx!4vx*lcOH{C zEt=f-BQ8azu6;gWZ9sT_Ulz>eZAW0$_Nddvws?IDrT#tg5PrL!<>hmJ{NmW7=ccZ% zB&hLBZP~kRt1ITvj;ES`l|u2>@#x^FLTCMkdupiW(2>hmDoTvClk3Yz(}L5Tp2;X0 zm5pYF`V*uEVSIvgeu``Ex_K%oGqq;w)cbztH~zGlc-c#o+{L&LQ^Ip;Iyx$~o6}Ef zwL42zk`BKrsD9O|4?V7SkB~dR=t2T7CFkwunzt|^(Z2g38HJh3BdPun*QEVh$YKH_ zv1h4^j1&)0t#dW^TMgYpnD)}@ZA9~(XI^}ZirhaEF3y_`B&94ila*v9LPkGGDB;?a z$oVr`ekCi+`>SvF+#8COT%%O8Ry8t z*+N9qTC)ZdnwJ*RYx*hD1}3J75I;VY69)VE!`?1inkIwgI&T7OkzM)tNPTHWih%mY zBiS7i;EwN{FL<7H22!=E-0tN^#a|u5_4}#(nrXR}pl#!@>x_d!aqIjG){mGRIS4((i4F}$WF%py=k&7sTRw@EB$^_hNEMOj=qI2d|}KU!Njt1Yhc_TMNt^WhoZ#LkeFbc0L}-HxhX)-;IUbN*pmd z)Y!rwZF%+HSG}&f@4Yy1t$oy^K4`-kx06Wb#WB_q_t{6Dqbas*=XKWZFkb@yN6P}Q zdF1SmNOvyzEr0NfZ`rL;_gvT>|5M-;iLb&d`DYSP-B{E7`gLb+p9Q9LgwcB4v!>@n!h78Xt zSPu=~Pd&BQBmPc4JBub&fL#>Bh&P=Go=zHtA(43EqXsiOdf_aa&s!XDn&1j5Q4~KB zj6Hp619YwjObf11&pVz%JP!lV;Y+R2v4LTUli z%$Dd|w`(ajeqDK21D6HpChj^z2ET=0E>k|?X%Vr4u*ubKuEP=UIBv(f}EJe%@7s zQU0ecF+JM@P|q9kFn!CcXFQx*us>s8zR^SVOwL%tN`xKSea0anCX8xR=QH4~cL?Z* zV58t2ReVs7nC1NlBEPS|1;8T8-n_!{eGuv-eURn^k$P7P~Azx1bE@O z$!E&cHo-pgjKDPL=||~@Gf10>Ti202pTgu32mLoy9}FQP@L?G3mP!>LzKzjh<8G~1 znBmV2{=!}IT0jf9CEKRVHYlAmr!W^?AycEqY`lV}s+q7%+{-$gv!38Ym(3Fbhd=@w zVG|xwHoj7c8;Hvqh7IY(Nx!Q(`NrpLKbM5?h@JBh&|4)%E)8NG`>u+FzJwf)4?voP|m z%(U8>z-UtRpvh}5GWGX|R_+kPKM}Rx61a9l51^hFMB-G_OOQapg=2c4`xX@?{0PpS ziom_bTDMIT2hgKYA0_D0^@N{gZo%F|{3$m-NM65GnI#9n4Nq5pEE$xETW4HYTTG`{ zmqP2##nz^&S6JE&<*MUDlDXcdk!ce&IrC2z(+m*pm8YsH!>A9J8hv4D7AiF66y>OU z3i_L#=@$kk^EFP_ZbTV&cm@pna{Sb)S5rQ~XDC7BCy19W7h!pR=hr{Bp;)S5C-rA( zJijgxbSNC6OjKq$E(p7((z+=bV5V7<0q+!C+(u>E6sd7MVL#Lsl%is-mWC7Jc z*r5F|RNIHI3LL0U4iG6}vlQ)#A!3h`u*2i13^ssnD$fy!pCM9} zoQB)ayMMVTK*M2|>Og~)4x=aKjo1@|?j3|G9qP~15E1;rw7w#EGUTi4GvbwToPp;T zXNn};bmZsT_bFzOB}yddlk_s09jA}Zx23Oq*$Pm*N0#rV83LZLvE#mwUAbNW2m%pk z6)6WNx>{~5w%VwJhtD(kK3$6@b}-O>d|F}ZW4tygs=qh4u2@z4CtgeLDO0J*9h4&G zPw|Sx+sS&Y<$Nlv`|Qv?o4SNOYx?35EhPL(KpynrDnBk1I=v{VB(*-Mrp=DD!>97l zqVYuW{>GjA3C-~t@Z>@8UaR5DrK4a;NCjQVf|Tu+3c92q=d7L$6*+WSxq;OtV1ZR3 zYrXwiXgTKQ=}nRI44%LtniBg621+>%viySBX|WjggxZN8`41kp{jjyBWZNG9>neWx9k(QfejGVH$`6ZibId|3`h zq{R$}K$8i>$gOGOQ!|K9pNNV=3*ERqi4o3)KQ5r`ptl5>SjsNGp@v6PPs}_X5&Gi* zXh%=?T}$P7mQ`oElhpTxQ{*u)qqOxTDs!S94khJ_CD-aGTKky(BpP3*w*g_i{4HMT z7Qpg)Nj$x$mG&JacG2@#tyq2_G4Urm%`8bJ2Ok%<4$Q!V*u+x1!H(Gt< z={9t+C&9gO?H7pSeY6Z%tCwi4KCTe7Ntyj`L(#d z<9+h5)Pry_y^|L5V?qh7`NMXm?Q(;^vjAJ~nbdSn7rdYVx2Tfbw*Y*WcOtbsjE6*8 zTBhIjP%sk1m>x%9H-4EMce17&Mv1P@u1$&2qISDBh_~Ka1CFu%lw)m*oQ_}ARmhrd z^>0br_zvc1Nt0YYbuQ)=)?e|L^F3vTh=S&sU{ZghkXF-b=DwAoD<(8h9ul|nsVho< zy%*ky^q8>R!{ zX_*7ex~Z}{FL)yk1$Ga&-+?ktDnS{_)zxH0War{(yDyW2R3CAMXuBfhpD9V2U^~z%omHIKn=c!-eJC@V2SBLnE|BTO%$?nRGrM7C&CGRAxnW3>GUcP z42OKR*%Cm#T2TW*gNpnnEH|-$oxDY$-Mw6x0dApd_Q;Vbaa^BgEp0A{UQk>Eda69? zE1|vo<4*JnMq%0n(_z&3f9-u$R9r#VB~9ZV+zAq#;Oa?%>sB4v=j>Ck4-*Xc-}M_Lv*DV7XTj4i#Sl~lS7v6Kp!sR5`#E({6im6qq80Orl8tU2jTfziSYY*Ch$<`rv)#2cK znwnrU@nab*;g6sxM6dcTY(( zHA2*B=${VbTZGx#z7jbto+U4f=Hb3JHFGVrydBdqsKq&m{G+mc?e&(Fv7n70xV|_`!Y`z@i@Nb4kJMn zLG;`(*z{e~iZC0T-~$`iEH`zt%Z1hV69SJcrGM9fQ6h>hrLL;*9Am1OJ_}*6n|t38Z}Wp;bUS7-77PhcGg=%mw!hkg>8e*xNArD0kp+R2!c^0YqcQ!^e=s1^(Etgt0K0@H{KIG+id#8DQjO~(mA%Y zUzstQnJHZUN}}hXQeqlMM1;~RgkdlSp1fY<{SGUI!_3ezQiP8&ya?9lyIS-~$7rvC z4Wi~@&l$dMe+;%az?=kU3ym?hepSVyT-b-R>CQO@0Lu7Bv5Po#Sn|jhzcS?QJCS7- z^IE>g&8p3$lI^Pq;NUxOZsVOsAwmd9oXzGRUP!qPP;T6iu-U+qMat&GBV>;74rb{U^zyqt1 zevSmSW7dP&-O@kmq-G=ca7hN?PRcMii3XQbg(?t zLWF5T`s#+#5sXOOr^}GRTJYK$>H@TufDT@fKQg(_T^1HrWB}p8b_n~o+t~ij{f-)4v$qEz zyup6LlbhxtkQ*{cu)}3D&-=OupI|uf`}h2TmcglklE5jv+<2SfFW-_VEKAe>?9stDgAgpTR9UtVLAxHCOASsw{CZ0NcCL$IL5Cp=ust3=mO zv%8<5o>0u8OG1O+Bq3jV;?tJRr@~V?V*gxmE8P$6a-9PxBg8*$EhT5_z)A18y-@Rs zQ&|Z+88mLc^@}?*?#!-sg)_y*dSaO}qlGxYn?DhM@Niq2JurWKRmv1spaS%va`%h zRI!w%L_Es}&8HRoH>?O5md!5ywnVN)f*2rj;zpSF%##=f$eumtyl;*fPx*mUa;Mo4 zLyVeW6$gw2<%s>9r08#scPL%}CwdoDnxPxZJBeg}H`Qw}|Hvdil3V)kIsLBxDoSlr zl#@QI#FEIi@mhBxf7>L>4Az7-k`t4}FAIJ91D@p@5TE?Gi6Dj|L~i5!#*&b1@xg#T zmJNY***w6pi4B36%c6`~W(RiF5Cd}w$qmxrXVu$^5tM+eiB6<)WIo97D{hzZ|shV&OuGrwMaia4vTR)&mNq z4V{*3FqlmSLdBOj+Yja0W_lpJZi2DzrvauCGx444$5_ZU?EvW{i9* zM|4V_{904$uQsib+D!{30@Cdvvax8E zCNYoEm!Y(S!-YvLh1cT~3^z5aroxb(?_iz@cpZ~r74U6N>;xhWQb>FROnN7ELvUdB z;v&rcFJvTQ>!2m1ZjkOl67} z%;@O}wk-;}qL=nZL+h=0s9+mi5t{c*y0A!tM_C#sWs z7VuFm1+l$sBEAF<2X|8!gu??55qSBUz{`PQM?HnJ#Sub8MWi(%qVsATC$u2Uc!Ilk z?BHhHn891S38tqpD`qzVAC+tF=FNq3orZfCVow0GvTdhW3&JP?5Mkn|m!fK>iU0b8 zO2>o|%X`44TIpC{1ngG}!4Q)@Uv;%r^WQ4|lCroGzO8}fE;T1Jvb`#=_Zp#9pgR>${TO=F>D%o`! zdmpT_e)TiOn5@1?N)`@>l$bfG`J3qej>~O06Q7Juwpn8PTa?y+ zPRKU$5W1W%@YSH;fP5Deu3MuPhJFGuw~ecAFnD$3(!M@xBqhcbVEHlio{tTJ=oXlP ziMIUi=$=+rWjh$tXDTgQWOm%3|AndkY`5D6e73}JEi$qCc`xC4u{2m6H6$D*22KXT z9_zJrzVnxa*X_XsQ`ixXcD?x;w2mmC@Pa3R?oLTOCy$s#O1M_?l$nXm<-7A6)g*;> zU}JY_u%?YfpXu1d%K7 z@}sbxs0}>Zf=@l9zcaVfV6b2cu>Udf5j{%sZ!l4Ffgj1W?O!9-Ng$KuB}sv^99%v& z&Nm1I@L%Jo2y>}&%Y0Sm(cxliAis98jPG%CDsnfcm&dHc4@d-}n1E=lQ82B8g#+|% zH3lbO@j3`KUYH;j%egP?VPv)^Dh+7KukBfg;K}x;iI-<+;jq@wggNlZofz8uuoAs? zi6aKUT-_Uh;^-?U-wRY&(3mibmta$sS2JupK`JbE>BM}wjezx{*dy#hYtxXlJYiqzsWAu z(1uBQk0*-oYmHI~-tqwFl%O&ncmbwSi|?OH_@!!0hP|omhzn~o0#Kt;{%I85EPvlj zrKKI7G)T|xk&j%1Itn2;FawLwycwWcixoIU62g!WWRftlImGdEHR#}gam65K(tx>9;Z}h};ykT+ZTPv8@wwHAfj6fdZTJ+p1 zr1;F97D+y0^U++uVh0D5AJ%hRz>-$b&Pk^@+Q1QG+24VJ&J;o>Tr*yA5tX*jS<%YO zCmNE#J$x74m0CBa3nodqZB@2}t`XVz_TUI%p1i3U&PHI-6@gW|?Iz&c82zBAJ@KWR zBQsifJcx3-=)xx7tJ*Em0T!vW2#zC+^O{b`KI=RtYO?j3 zgi)nfeU?oZG0|ykDdc~?K-=jRCj2RZDMUc_+K$jp-xEzHVive@UtD!oUQ+_=VWCM( z4!kf)?CY|z8S?KU7wWxv=GtWP1cxZy7CGgcutQyd5)pN!EMm5#Ruh(uV|I6 zEp9nZi~~$E;s@WZ#~k)vZ1@~w5nmdJA<37{Is5!w9{#+E-c7m?l$~&(L>1!eWh^Lr z605tK?`Gr6SgW!Kjh3 zmr1_%>i#$Ah4QEomTc#|%=TLY#h&YO@W%omR)xUAXdgougc3c#`4t4|Z3S>y>jKQvD(bHq*Xe6q-4W-`k2Ga_zTe0T}mwuCYMFr=2tX8{K$O2;tMe1yu3 zDMseKZG?R(pFk;hNBK1a6sPHjVB=()@yql%1|BkqjP!5b(M7xRY<)-+!L}yf=SheR zl@LW%Jo#CZH*&kS3tk2N%}?C+$I$lJ?NV^Ea>!u?S6BiZYgJcNfZ2{dX9+TAo<5X$ zzf@{W@p-Ji(E6`~PD2dQo4oP)oc?WO{0+z|?k;&b1iwQz6HP3EO*y^fFGAYs5?<*w zDkmD&+4$2NJ@BqQns~l_`VxJiWai74eS<3iP2{#^i4nHLuLYWd@(_~CatOD*Hs0|6 zDs&3k{LD-(MQRh55$t1ykKUoWBkYY)JCZ+OT7aIO-;IhfpS3uYMVw#lgF>ybb3awQ zgODY)ImeTyx7+INHPEMgD9t|j0sNT^w=qU&g-zoX`hIjfl;`i)1JqmToM||}>N9OJ ztTJ)y%0*1%n=heFO79gPtBIdmEJO?;mW=q7`bvJr?brn?TI=}Du^%qxl)Tko0H2~) zQTGiDb31Z^q?vD+wE1CvY!(VbdSR86;Rn^Co+%tz3*K>*2( z+BWWVQ8d{PzY5(A3~GM(Fauid;7L3`q+(k?MDjqGP%!)p#O&oJUbCSujMg~EEbB35 z(#Rh$3OoB4vdQ&^jsv1_nl&YVBA#u089wdmij_Dg@L2x&Yy= zf5-&{6#szZ$qLWxik+hQn8sUduZGyiM{{yzfNo^rAvl#yBnSVP|5c;y7*~`sKb)mX z^E%YTuU8*${mH=ld~T!}3Itl~xp8 zQX47OmJ;#nRr-6fv}wv~M}`dH(X`ps3hKL_3Q_s`%A#sby*CsvxccajtG%jaj(lPR zOAxt8)@CDrORq)kMn+B@)+E?&!)R}nVExFx2C&zOIXnFDv4>vv52XRwuT0YOf2Ug7 zvLdcKZdC*5ZUW@tj@%lX)%z&#IUYJqKrpG{AkBe?P);}YQf`eUjfha-e4B0ZaH+QH zh}I_IT*W6?VMXRsEc0yW`|N(22qwRzzqP(mkL!O*C@QfaKbCz%2sjO6&Qfx zhPCHT^^F#>3RlibKVA4AIlEDqSCb3ybO&L%ZAf9@_;@%9Jd$$i_@<=q86%zgo_{=( zoLzu`uX<9(Q+zc;Y5V-b8)YUDM^YP71*BJ|`pkpszL(6WsJ`g8gcOu}8bM0d@?`thg-5Znc$`UB zSvrm+j|c41Iq>3B3`lYWV(lFoVX0E&IQ3+1oEwh&zv$yiPp#7^Ud;jfg)vU>}+gHi~uR`*or zvw76v#CvdR#r`&74%K7n1iKrcM;KwYby#3*Yj1W?y!XmV!^ie1&wz9WdiAqnaP1@^ zTDFdv7aW{g;r)Z(Q}b|oIRG1<6O{*5Dm&3k$`=(ZG1_e8H6ruw-eo&qsmMI#*vrBt zV)lt9DFVk=jzva$bUNt+<0Rf_`;W>mK2er=dlb_Qfh;_CFwpymTzdg?y^>q*!Ivws zm?xxyA<%*2=rWRrTgox51Sk5 zHM&lLR=IA`f5ZtbB6E?Nx-TM)Oy%Jli`sE?1P^d1g%IelZML+#pt2NUD(&P|e-RX2 zgroa;I#A9-KZbvpLSv<_6t-g)478NvhIYS=zB8hF{+EbTLuki}Rzi471%w8+s zRyZA%I_EQYiKYvYcS+&$a7YY(B)v}i^4vrkG4oYTC#@pN--r*-B{>25FAeg&aR`a$6>SJXCtm9ea|+Ep06D)S4BVpGW9}WG%yJW2c9R7j^CeZYH1d zu71T$lC{77I7ZkhDS&7ir}f(I_)@=g7K}|^`d;==|K79=w+$t+2VC;R$vL0tX*i#T zjdI0+HFO=z$$7SWA{x-_(~mu$3Mtx{>_sQoxc6vyN)KEsjKC)EAU{~k$&+;x`>Yz)sq!N( zK#FvzBOXmPAf1=w{hrsqc;zUMNXp?7sI3R0BprwgcV>#sZ)i;KvW`zDd>P6l_a5CV zQjKn<{=NI(xd6=Gg!xcW4-_KdBV5q%M zFTrnZ=anSzNbex#fYNBq-1|uHI`@4yxC4YOo5;B@ahL#P79L0C79+&p63 zhAgD)HR91JJ$8;`gXv!<49{Nlj`yk1ls@&ONuq&dmTj5ZMv8s@W3~g;P@{2V?^-`| zn$0>R5H82yuc|r4^AU{@R9Qv%wlDEDC7c#fgF7>6+^`}_D@0uZ2V_dW?8z+tXfUeD z%Q{j?1^ZG5SJAKZh*hX1>>$rfAK#4he>0uF{_<<_XZJo$0z1Do5t3QVZ}BPO?r!+B z^b~8MIN$SjBoozMEch^+?sCy}IB8IBF&J7JnD9-O@$w@MCInM%S#BD8OpgT`)&Y6* zetDphXTel&tC-ubH1nl@`qtd36kUyUQ2gu*E<%VR-6u=9rOmEC_ux;a99g@HF6AQXDiycp zDhK%X0IR5pw+%BZQSu?$s!T#8mQ+v!Y1Xc7qWR%on?G&UI_9CHf_qZMA5^T5BHHFF z)}uGFkT&I;7v(sYj2wQzNbSS;zE!K!?tMqIurpQllyiHh<+9PV|A#}?bPUqa!6Zb9 zz$v8wQyVS)sdnitFSi`9apl7VLEIK(es^eWZM~`Lc^cBIR=6K}vb8~)nOm*A`!H4X zH9ns?gtC<5O`@|?j26tQD?i3F1ofJu$s^2%jJhYqH>d~Ek01x@>Vr2#sxwhG1@E{u zu`iR`3{p}x1-E5+myL16-aLrRW>t#di!S+{BiBUdt?5m7KTsb>w`yR^Ohj^OTM3 zLBXCHZ!L2k|Ga3Lht5w{U=aDt6}Yu6XegZYgZoMo@>f;U! z>k#d-S-*Fk4|+$Nna~2*xn`d-yl6Pa@?^*{7B*&#WflPucE3nz8J^4$M}DvRs9`|c3Rz;lD~l~-}x*pha)IAydc4>-1YTv^LsqM zMk~){o#L)-pA?6#i+M=r#~=Qoe2 z!Xu`b=ke)T@#jl29rz2T3h2|!>>cp`q9Z$j>8Me@P6hsLk5i^2>?(%!z{rCzRkBh+ zwdnncR83MkJE*5BE;`E#+Xhw;t_)#JgW;YvBIZB4YWC3DiV%zh;MkZMe^Ew^dNgDm z5MY)MrK-B%X~aVQnk?fJX!wL<_m-JWpg}-{R(llBK#3{rswh2zRcY>P25o7muNQ%2 z>|y6?_pD!EY#0}HsKW4{DGhOju48vr_T`IRnt!=gc(Jh-H|ioBA)f=Me&C;BlpS{n5RSel4(M$D3xA>%bBsHwpz zz=~7H--Jv_kHr=1ciIWV{z}dZ9g=O*WDPc}p(M1cs8FnvP$^9DNS5t4>7pIgDHhjc z3^{sH!qyMH-}j$I+;JHc+Aw)c3u2zS4jrRO&TNxbuXRTqUlr3AVdC?PHM$q@3~&Bm--iS>uqeCF!rstSWnoLo7_HWJ<~bgmlDxz^DNk+Lt& z-whQ45wB!f)_haGJL2XNY^#DvNb7>B<>>~k5nE4#&{zcx7EV-wY9ehz4^5)i*k#gs zt-LmR-p8-|X)FDNNmQ8aVrt}s zvbr>7ZFW5**r(gN)4O3fSoyGK)4x5{*Sboci;pV&mi`epPs%{>3Xai6x#w7Ju{?!h zOt!|_Fjd_Z=wk}QOe2Yf?J)_b@RB0BtKn%%xqBpDXzz(* zMGg%BK^Yz#urI*TQ>an!%BLt*Kk_9Rl(|yP+?b=H-)|Qo7H)h4 z#Ziz{;!VNiXh3R}z@l6ONs`m7yP~JBGb{Au%4wF2?BvF7W(2Q*k2HpQ^(-&&D}U9a zQim@JkTBL$?C-VG^FV!nNao;|XTR`IpXiUk4nd!3Cu`b^FWnzM-3<;Hf>BdYOk=L~ zoL_)>xItgu)9q-#lMr&bS1;;}Y!V#Xv1W~)b{g7Ox9n8U1tuf;$frVU$Fve=M+3@Y zbU&Q7#!8>R*MS6M|1F1NRdz-n33Us5dI`A1U7m57tnVoqF3u#9D}PpgzVX55Rfa)D z4DPzWrEZ<}R6>6ZYGhATPnYQ_hRa&NEwc}-ZCi?c3HHD=w4+pLUL0}Cy?7!Qv>|JY z-6EZ%czg~fw#m3iOjxaDgLdW&> z?7sCs`!m!C#4qX0R_2L+6~JG1lLCMSQN%2oM?|>or~PIZat~?wn<9(5+|zi zS|4E6ml_xc()PExOwgik6%&%?j$@hkobPvKb3T1LG6h@BOg`3ENC=-keI9+1j%qYP z&POTXdYY1LU1CaQ=~Zlz6hC>r9C1NOo8OtMWjFt_I2_94z>Iw3k;7{xvM?oyCOmzQ zRPa$?OkFUm;nW($Wj7G((kE`mqPtWApTQ@%mj&lM99D088;_rQI*ZfCri?dSmtgYc| zg|XbUn?LG_=H`?OTL@zUu9XJtQ>6%fkeikDZ`HlHmKn>Wp7`pY;EDu|z=FEeHv|B29jAo=!~il z`g~KTo=7|YFwXd!bRf|@C;B@g+OJgxgM-cfLfc;`{FJXLv;Jmi?6 zw)yw(>G8(-Q+|6;ZIp6qK@y^yx>Jx#dc4=_r}`S1Rcct#wr@WCY>&aDp)oO{?;|w| zW+D7ff7cMzrJa#z5+laSrxVG%Uv(9p!n+ljwfNNJcY}Y>rw*Kr@!AQVQteHP?J0SzH0@4}Ot<%9ORHkWQ=ZZg9&lSH8 zuoI`0{&B*sqO6cN3M&jNLH_TqmD9^#UF~*E_y+VNi0l%{XGVPA9zj-_9li7y!{KOj zJDXNtvXs8=bD_fSQOcmT*|v=m@0m)=rUdYtwj9^{zQlu{7yHdVXNizVpe-^6g9OG3 zak9L?AB?8f4+cFA_AR}_$hH2nuiOVQf!@J7_#vbX6ow-i1|Oq)^ic(*Aur7{{N7^Y zJRMn!D?jC@f`5RGH5B*y8LgMrv}gXIuJBXz?}d=T<|=yb)yKOl%?f@KRY*SdS15F% zIK^}~P058Hg=d+C@~!OtG_}*k<%z_qvI-+eF+ACte%lf^A)BMFO^$+FZxBQWKaMOh z-^B`pgI$fGi&F+z8kGS3ArRsc)?NJgs@zlAb8d!)w3(>&5*nMX?@C9ZZ-QtA_inh+ zfn0|bt&=PyHZlKsHbcyo|g=d#hifaxMb#{bg~|vglIn% z|K`t~M>oky9i&;8Ou zl-IOg9))v$V)(I-9Jb>s0ko?0@U!=rozIl{M)wU=0O-6g@bbvg<$9XwF2DiS{h9bM z4`S7TdU@bFy@rTUnX2wj@YvPf2Dp7jDj1NHz6UG7O5`49e`jy)i}=muGWl?-z4TYG zoJaNB)uTGpAR;oF=v3qd6o0jZiUW^rhwWEpqCkQ;%;s;jS7j?Ja|=M&ju=p$1$Dt{;^1$^0j{9sD*nS7ye z3K2Dh>_S%8Qj5c99I)9Ir*`#WEGEvb~{m4l3Q8&A0(85G8$k((Hl+uQN@Y*Q0K@}RD? zy)R_l5aim4D&I}*b9KKgL$*Zol3(;Hrr@NNXi z*yuxF60<D-ZMEs8HnvK~2(snWcOOG{v1I8c5iKTUpY@b8ypFYd1*-GGj^l%8drY zJ*^GknH&he7*hX_l#`~%=9?@M@R}P1SInX#WIs4#+mP2JuOe-(!+ijmnlwi+>U6*! zm31e&JzuNxQ~pnVWD@$)J9T*6gI+}_I0GDsG@k289X^5QrREP&?Oy!H?8_uE^Dxg;WA_VS|>&1fex%H{Q6H z!p(=k6e^-pFr^1#5cR^J&=i$Ym-V_HwRsJ}I1=rW4KvZGegIw+jGf%rRaOC}S>eOt zAhc`Qm};pKNN01ysS`Jo@0UYGsg zO{EQqp-B{PXYzbPDK!u|E2@%_`3SlBd}3p6DrkS_u3Xx@Gi?)hC+3N{a~Q`ueij%)Xl@HorKsTu3s{%I%NjNv`_W)&S2a_2=$QkJb=wA`KXtXWu zL<=KVU!t^03tV+`%cG$}9>KiFg#-vK3L6076|*nOI;b151#M!1g7_Z;nG8gG9=0r* zb2*9NxVyK*-jyb_E?i0+5*YJHHUID|IhKd8r%dI|D~we6s#o4mv^oBrp_UgTJOn#V z*)(qTo+@mWFQW5Qd7qSPxP=J|Ry|{RL;)t_FtsY(f_s$}1b|XZ4cMhCP`%7hmNO5b zxdL;M25v}XJDhwniB}@$swl;gIx2$h8l_SdwjAs-yHj6l?Z3rPxDdr5fAl_N&z9Pk zJ~mvVXCtJg^q9B;odZXc17*77Jj%Y_V-QWD6MB)CW5V)Pgpn4r}CSM zR#Ay(8uL4-;ABm5%jrMI>Y?QD=_r+1h5ke_#uOv@=ew2Y5D6HRs-xdK_zb=cOB*#r z!Z|h0Z95hv|E3Z|xD0F+#W4YOzhC0I_1s2YZX-o1Ne{$m{UL_>K&f?#i@M5uhFG>Y zpsAF1i`Z00_6>ba9k8a;Sp2@M;Fp-=bfzeQ{Ifcfwz^5I%c#i5!bv3TP0X~DwAJvRFaW!PXb?l097(>CTsPiGK6!a`B#D&XZJ-+PIT2pp z@Tb}J2E9=hhWhewH&rbSyrs7_+5XM^Kx9)Ju4I_gh4Db{VcGr0hIgIQp9Nskp5m-L+*op5)eeE!BV&&N6RB#uBz01EGd$*Y_ zbi6cXMzwQTw%W@qcOg)0*+-SI(2FyphPX=G@H+oa7V40S?NqJ z)KrX*bW2f%r=U!|po86&S{Ny9e~)T?pZ!oXF=QK6xN^x!W)KuiF&7&3DiN=)PiZ;T%UMD7QSvIB_(W_n=pRdP>0pL0$MuXdR`O4dMukJ-&+pvY+_ zFkHnqp=H&#(3mSPFzQ|rv_qGJjI~B~DW6@|E-bJZ#HRlsF8sdifOyVMJP2ic-;de@ z#VI0bkKLn6MS%LmuUCKXlX0?V;%nlq7~q8n{e>zTF~GMG2@*nvDkb_UHMr4$LF#^M zRMZdf*KNPF%>H7y2-k1e%km35M`%>!hBi0)uUEWE$CY=g98e|Bbt+HLJQ&_rj&dd}(YnJ6UC^*j;_4m`&1GJ-eE7&P)2Y z$-&&@+?ghV=%R>n_sAUA%XUq;AJM6}_s?;9Rg4ng{gf-6>8l(`uxrZ)rA}A#(|4`s1+25|y1+A zEXh|8O1zj8pfFKNiHIe)jzWk1;?vO17)*QhiG!>$>3B%OT!rpXlK%)t@f!bK^sHL}92)oO8OwLoD% z+|N9@brU>mB@~tbJs9O~K(x_=AIgZ}Bn=^a_^3G&MLy1lBYS#3e5Lt?S7~G#2V`62 zgMjqN@4H+9H{D%6Zo@009X~tG?~3=!w4e`2hjP1_?zv-hnz~9EXakwR=8iBcy6^X}U*F1XW$K{|qo8$$O#BO~NqOP1#SX2=m?Ar+5 zeI6-GCyS0RV{UueV3(*Jj^sJwoYGPpNg%V%kl8a&A6fRAQ0AR?V8-sv1s3Ucdgb@~ z$gKdTRr)9qq^qq+k~UcO{R~MagN;g^!fL}*DXumMZ7L?FMbg@n^3*bmj-ec2qS5U> zWSe}IQb%uQq%3XD4W8{{@R+q3pW19|h3fbx>#CgL1W|Wb8Hds)(z=`gcO(yt((Th8 z%n0cCZY@iN`a;g*iMGT_A1MX|bSZX@rDg zjf#+hXYq0-(SpnazlQxH!FpKaxbt5_DD zD<2e|mnk3=xG=!MMB%nu;P_p~aQ_x=KmP|?$W__3LqT|6i@R zgMS!Jc~HCzvnRAHc`zMiUvb0D9(ezWlIcl+c>;?%=|A23Zvy`r5pKX2Ft63G|Id&A zxyZ&)A^fi!@b;u!k&Y?Im6?yJe0F`k%iEe&Iz~h7bE+-x2^m2>)j^E6{%* x?f-ZAzs^|p$A7B`2nZ%01uS97eGWSWBn{11m~!=C^E literal 0 HcmV?d00001 diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py new file mode 100644 index 000000000..8ddb81527 --- /dev/null +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -0,0 +1,36 @@ +from linkup import LinkupClient +from pydantic import PrivateAttr + +class LinkupSearchTool: + name: str = "Linkup Search Tool" + description: str = "Performs an API call to Linkup to retrieve contextual information." + _client: LinkupClient = PrivateAttr() + + def __init__(self, api_key: str): + """ + Initialize the tool with an API key. + """ + self._client = LinkupClient(api_key=api_key) + + def _run(self, query: str, depth: str = "standard", output_type: str = "searchResults") -> dict: + """ + Executes a search using the Linkup API. + + :param query: The query to search for. + :param depth: Search depth (default is "standard"). + :param output_type: Desired result type (default is "searchResults"). + :return: A dictionary containing the results or an error message. + """ + try: + response = self._client.search( + query=query, + depth=depth, + output_type=output_type + ) + results = [ + {"name": result.name, "url": result.url, "content": result.content} + for result in response.results + ] + return {"success": True, "results": results} + except Exception as e: + return {"success": False, "error": str(e)} From 5e00b74cd493f2fd43e4cdaed49f4797a162734f Mon Sep 17 00:00:00 2001 From: Pedro Pereira Date: Thu, 19 Dec 2024 17:38:30 +0000 Subject: [PATCH 194/391] chore: update readme --- src/crewai_tools/tools/selenium_scraping_tool/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/README.md b/src/crewai_tools/tools/selenium_scraping_tool/README.md index 631fcfe0e..e2ddefba1 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/README.md +++ b/src/crewai_tools/tools/selenium_scraping_tool/README.md @@ -31,3 +31,4 @@ tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.mai - `css_element`: Mandatory. The CSS selector for a specific element to scrape from the website. - `cookie`: Optional. A dictionary containing cookie information. This parameter allows the tool to simulate a session with cookie information, providing access to content that may be restricted to logged-in users. - `wait_time`: Optional. The number of seconds the tool waits after loading the website and after setting a cookie, before scraping the content. This allows for dynamic content to load properly. +- `return_html`: Optional. If True, the tool returns HTML content. If False, the tool returns text content. From f11756387d25807434d82327fce448f8a5b84d58 Mon Sep 17 00:00:00 2001 From: Pedro Pereira Date: Thu, 19 Dec 2024 21:06:51 +0000 Subject: [PATCH 195/391] chore: add tests for SeleniumScrapingTool --- tests/tools/selenium_scraping_tool_test.py | 93 ++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 tests/tools/selenium_scraping_tool_test.py diff --git a/tests/tools/selenium_scraping_tool_test.py b/tests/tools/selenium_scraping_tool_test.py new file mode 100644 index 000000000..271047449 --- /dev/null +++ b/tests/tools/selenium_scraping_tool_test.py @@ -0,0 +1,93 @@ +from unittest.mock import MagicMock, patch + +from bs4 import BeautifulSoup + +from crewai_tools.tools.selenium_scraping_tool.selenium_scraping_tool import ( + SeleniumScrapingTool, +) + + +def mock_driver_with_html(html_content): + driver = MagicMock() + mock_element = MagicMock() + mock_element.get_attribute.return_value = html_content + bs = BeautifulSoup(html_content, "html.parser") + mock_element.text = bs.get_text() + + driver.find_elements.return_value = [mock_element] + driver.find_element.return_value = mock_element + + return driver + + +def initialize_tool_with(mock_driver): + tool = SeleniumScrapingTool() + tool.driver = MagicMock(return_value=mock_driver) + + return tool + + +def test_tool_initialization(): + tool = SeleniumScrapingTool() + + assert tool.website_url is None + assert tool.css_element is None + assert tool.cookie is None + assert tool.wait_time == 3 + assert tool.return_html is False + + +@patch("selenium.webdriver.Chrome") +def test_scrape_without_css_selector(_mocked_chrome_driver): + html_content = "
test content
" + mock_driver = mock_driver_with_html(html_content) + tool = initialize_tool_with(mock_driver) + + result = tool._run(website_url="https://example.com") + + assert "test content" in result + mock_driver.get.assert_called_once_with("https://example.com") + mock_driver.find_element.assert_called_with("tag name", "body") + mock_driver.close.assert_called_once() + + +@patch("selenium.webdriver.Chrome") +def test_scrape_with_css_selector(_mocked_chrome_driver): + html_content = "
test content
test content in a specific div
" + mock_driver = mock_driver_with_html(html_content) + tool = initialize_tool_with(mock_driver) + + result = tool._run(website_url="https://example.com", css_element="div.test") + + assert "test content in a specific div" in result + mock_driver.get.assert_called_once_with("https://example.com") + mock_driver.find_elements.assert_called_with("css selector", "div.test") + mock_driver.close.assert_called_once() + + +@patch("selenium.webdriver.Chrome") +def test_scrape_with_return_html_true(_mocked_chrome_driver): + html_content = "
HTML content
" + mock_driver = mock_driver_with_html(html_content) + tool = initialize_tool_with(mock_driver) + + result = tool._run(website_url="https://example.com", return_html=True) + + assert html_content in result + mock_driver.get.assert_called_once_with("https://example.com") + mock_driver.find_element.assert_called_with("tag name", "body") + mock_driver.close.assert_called_once() + + +@patch("selenium.webdriver.Chrome") +def test_scrape_with_return_html_false(_mocked_chrome_driver): + html_content = "
HTML content
" + mock_driver = mock_driver_with_html(html_content) + tool = initialize_tool_with(mock_driver) + + result = tool._run(website_url="https://example.com", return_html=False) + + assert "HTML content" in result + mock_driver.get.assert_called_once_with("https://example.com") + mock_driver.find_element.assert_called_with("tag name", "body") + mock_driver.close.assert_called_once() From bb19f1c74c141fbef2d74dc32c61391fe84da412 Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 24 Dec 2024 12:12:18 +0200 Subject: [PATCH 196/391] using command list instead of string to avoid parsing issues --- .../tools/code_interpreter_tool/code_interpreter_tool.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 61c180fe3..9588ace1e 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -79,7 +79,7 @@ class CodeInterpreterTool(BaseTool): Install missing libraries in the Docker container """ for library in libraries: - container.exec_run(f"pip install {library}") + container.exec_run(["pip", "install", library]) def _init_docker_container(self) -> docker.models.containers.Container: container_name = "code-interpreter" @@ -108,8 +108,7 @@ class CodeInterpreterTool(BaseTool): container = self._init_docker_container() self._install_libraries(container, libraries_used) - cmd_to_run = f'python3 -c "{code}"' - exec_result = container.exec_run(cmd_to_run) + exec_result = container.exec_run(["python3", "-c", code]) container.stop() container.remove() From 331840e6cc96567d442eb9ac4a88594eb28a7c76 Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 24 Dec 2024 12:17:57 +0200 Subject: [PATCH 197/391] improved imports from docker for better type hinting --- .../code_interpreter_tool.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 9588ace1e..1809dcdda 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -2,7 +2,9 @@ import importlib.util import os from typing import List, Optional, Type -import docker +from docker import from_env as docker_from_env +from docker.models.containers import Container +from docker.errors import ImageNotFound, NotFound from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -39,12 +41,12 @@ class CodeInterpreterTool(BaseTool): """ Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker.from_env() + client = docker_from_env() try: client.images.get(self.default_image_tag) - except docker.errors.ImageNotFound: + except ImageNotFound: if self.user_dockerfile_path and os.path.exists(self.user_dockerfile_path): dockerfile_path = self.user_dockerfile_path else: @@ -73,7 +75,7 @@ class CodeInterpreterTool(BaseTool): return self.run_code_in_docker(code, libraries_used) def _install_libraries( - self, container: docker.models.containers.Container, libraries: List[str] + self, container: Container, libraries: List[str] ) -> None: """ Install missing libraries in the Docker container @@ -81,9 +83,9 @@ class CodeInterpreterTool(BaseTool): for library in libraries: container.exec_run(["pip", "install", library]) - def _init_docker_container(self) -> docker.models.containers.Container: + def _init_docker_container(self) -> Container: container_name = "code-interpreter" - client = docker.from_env() + client = docker_from_env() current_path = os.getcwd() # Check if the container is already running @@ -91,7 +93,7 @@ class CodeInterpreterTool(BaseTool): existing_container = client.containers.get(container_name) existing_container.stop() existing_container.remove() - except docker.errors.NotFound: + except NotFound: pass # Container does not exist, no need to remove return client.containers.run( From 64b98667a35f643f2a8bbedce1acb0bde4541b6f Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 24 Dec 2024 14:13:51 +0200 Subject: [PATCH 198/391] fixed code interpreter tests --- tests/tools/test_code_interpreter_tool.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/tools/test_code_interpreter_tool.py b/tests/tools/test_code_interpreter_tool.py index a9ffb9dbc..c45014e91 100644 --- a/tests/tools/test_code_interpreter_tool.py +++ b/tests/tools/test_code_interpreter_tool.py @@ -7,30 +7,30 @@ from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( class TestCodeInterpreterTool(unittest.TestCase): - @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker") + @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") def test_run_code_in_docker(self, docker_mock): tool = CodeInterpreterTool() code = "print('Hello, World!')" - libraries_used = "numpy,pandas" + libraries_used = ["numpy", "pandas"] expected_output = "Hello, World!\n" - docker_mock.from_env().containers.run().exec_run().exit_code = 0 - docker_mock.from_env().containers.run().exec_run().output = ( + docker_mock().containers.run().exec_run().exit_code = 0 + docker_mock().containers.run().exec_run().output = ( expected_output.encode() ) result = tool.run_code_in_docker(code, libraries_used) self.assertEqual(result, expected_output) - @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker") + @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") def test_run_code_in_docker_with_error(self, docker_mock): tool = CodeInterpreterTool() code = "print(1/0)" - libraries_used = "numpy,pandas" + libraries_used = ["numpy", "pandas"] expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n" - docker_mock.from_env().containers.run().exec_run().exit_code = 1 - docker_mock.from_env().containers.run().exec_run().output = ( + docker_mock().containers.run().exec_run().exit_code = 1 + docker_mock().containers.run().exec_run().output = ( b"ZeroDivisionError: division by zero\n" ) result = tool.run_code_in_docker(code, libraries_used) From ba8f95964f5f967e543e44db8caaf49cb364f07b Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 24 Dec 2024 14:14:09 +0200 Subject: [PATCH 199/391] added unit testing for multi-line output --- tests/tools/test_code_interpreter_tool.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/tools/test_code_interpreter_tool.py b/tests/tools/test_code_interpreter_tool.py index c45014e91..6470c9dc1 100644 --- a/tests/tools/test_code_interpreter_tool.py +++ b/tests/tools/test_code_interpreter_tool.py @@ -36,3 +36,18 @@ class TestCodeInterpreterTool(unittest.TestCase): result = tool.run_code_in_docker(code, libraries_used) self.assertEqual(result, expected_output) + + @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") + def test_run_code_in_docker_with_script(self, docker_mock): + tool = CodeInterpreterTool() + code = """print("This is line 1") +print("This is line 2")""" + libraries_used = [] # No additional libraries needed for this test + expected_output = "This is line 1\nThis is line 2\n" + + # Mock Docker responses + docker_mock().containers.run().exec_run().exit_code = 0 + docker_mock().containers.run().exec_run().output = expected_output.encode() + + result = tool.run_code_in_docker(code, libraries_used) + self.assertEqual(result, expected_output) From 0ac6f915fb83c64cdb0b3aa1c5662077caaf66fa Mon Sep 17 00:00:00 2001 From: DarshanDeshpande Date: Thu, 26 Dec 2024 17:37:22 -0500 Subject: [PATCH 200/391] Add all Patronus eval tools and update example --- .../tools/patronus_eval_tool/example.py | 36 ++- .../patronus_eval_tool/patronus_eval_tool.py | 302 +++++++++++++++++- 2 files changed, 314 insertions(+), 24 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index 4015a5f4a..1b0ba028d 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -1,15 +1,37 @@ from crewai import Agent, Crew, Task -from patronus_eval_tool import PatronusEvalTool - - -patronus_eval_tool = PatronusEvalTool( - evaluators=[{"evaluator": "judge", "criteria": "patronus:is-code"}], tags={} +from patronus_eval_tool import ( + PatronusEvalTool, + PatronusPredifinedCriteriaEvalTool, + PatronusLocalEvaluatorTool, ) +from patronus import Client, EvaluationResult + +# Test the PatronusEvalTool where agent can pick the best evaluator and criteria +patronus_eval_tool = PatronusEvalTool() + +# Test the PatronusPredifinedCriteriaEvalTool where agent uses the defined evaluator and criteria +patronus_eval_tool = PatronusPredifinedCriteriaEvalTool( + evaluators=[{"evaluator": "judge", "criteria": "contains-code"}] +) + +# Test the PatronusLocalEvaluatorTool where agent uses the local evaluator +client = Client() + + +@client.register_local_evaluator("local_evaluator_name") +def my_evaluator(**kwargs): + return EvaluationResult(pass_="PASS", score=0.5, explanation="Explanation test") + + +patronus_eval_tool = PatronusLocalEvaluatorTool( + evaluator="local_evaluator_name", evaluated_model_gold_answer="test" +) + # Create a new agent coding_agent = Agent( role="Coding Agent", - goal="Generate high quality code and verify that the code is correct by using Patronus AI's evaluation tool to check validity of your output code.", + goal="Generate high quality code and verify that the output is code by using Patronus AI's evaluation tool.", backstory="You are an experienced coder who can generate high quality python code. You can follow complex instructions accurately and effectively.", tools=[patronus_eval_tool], verbose=True, @@ -17,7 +39,7 @@ coding_agent = Agent( # Define tasks generate_code = Task( - description="Create a simple program to generate the first N numbers in the Fibonacci sequence. Use the evaluator as `judge` from Patronus AI with the criteria `patronus:is-code` and feed your task input as input and your code as output to verify your code validity.", + description="Create a simple program to generate the first N numbers in the Fibonacci sequence. Select the most appropriate evaluator and criteria for evaluating your output.", expected_output="Program that generates the first N numbers in the Fibonacci sequence.", agent=coding_agent, ) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index 88ad28253..d765c1701 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -1,12 +1,247 @@ import os import json import requests - -from typing import Any, List, Dict +import warnings +from typing import Any, List, Dict, Optional, Type from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from patronus import Client + + +class FixedBaseToolSchema(BaseModel): + evaluated_model_input: Dict = Field( + ..., description="The agent's task description in simple text" + ) + evaluated_model_output: Dict = Field( + ..., description="The agent's output of the task" + ) + evaluated_model_retrieved_context: Dict = Field( + ..., description="The agent's context" + ) + evaluated_model_gold_answer: Dict = Field( + ..., description="The agent's gold answer only if available" + ) + evaluators: List[Dict[str, str]] = Field( + ..., + description="List of dictionaries containing the evaluator and criteria to evaluate the model input and output. An example input for this field: [{'evaluator': '[evaluator-from-user]', 'criteria': '[criteria-from-user]'}]", + ) + + +class FixedLocalEvaluatorToolSchema(BaseModel): + evaluated_model_input: Dict = Field( + ..., description="The agent's task description in simple text" + ) + evaluated_model_output: Dict = Field( + ..., description="The agent's output of the task" + ) + evaluated_model_retrieved_context: Dict = Field( + ..., description="The agent's context" + ) + evaluated_model_gold_answer: Dict = Field( + ..., description="The agent's gold answer only if available" + ) + evaluator: str = Field(..., description="The registered local evaluator") class PatronusEvalTool(BaseTool): + name: str = "Patronus Evaluation Tool" + evaluate_url: str = "https://api.patronus.ai/v1/evaluate" + evaluators: List[Dict[str, str]] = [] + criteria: List[Dict[str, str]] = [] + description: str = "" + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + temp_evaluators, temp_criteria = self._init_run() + self.evaluators = temp_evaluators + self.criteria = temp_criteria + self.description = self._generate_description() + warnings.warn("You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredifinedCriteriaEvalTool` instead.") + + def _init_run(self): + content = json.loads( + requests.get( + "https://api.patronus.ai/v1/evaluators", + headers={ + "accept": "application/json", + "X-API-KEY": os.environ["PATRONUS_API_KEY"], + }, + ).text + )["evaluators"] + ids, evaluators = set(), [] + for i in content: + if not i["deprecated"] and i["id"] not in ids: + evaluators.append( + { + "id": i["id"], + "name": i["name"], + "description": i["description"], + "aliases": i["aliases"], + } + ) + ids.add(i["id"]) + + content = json.loads( + requests.get( + "https://api.patronus.ai/v1/evaluator-criteria", + headers={ + "accept": "application/json", + "X-API-KEY": os.environ["PATRONUS_API_KEY"], + }, + ).text + )["evaluator_criteria"] + criteria = [] + for i in content: + if i["config"].get("pass_criteria", None): + if i["config"].get("rubric", None): + criteria.append( + { + "evaluator": i["evaluator_family"], + "name": i["name"], + "pass_criteria": i["config"]["pass_criteria"], + "rubric": i["config"]["rubric"], + } + ) + else: + criteria.append( + { + "evaluator": i["evaluator_family"], + "name": i["name"], + "pass_criteria": i["config"]["pass_criteria"], + } + ) + elif i["description"]: + criteria.append( + { + "evaluator": i["evaluator_family"], + "name": i["name"], + "description": i["description"], + } + ) + + return evaluators, criteria + + def _generate_description(self) -> str: + criteria = "\n".join([json.dumps(i) for i in self.criteria]) + return f"""This tool calls the Patronus Evaluation API that takes the following arguments: +1. evaluated_model_input: str: The agent's task description in simple text +2. evaluated_model_output: str: The agent's output of the task +3. evaluated_model_retrieved_context: str: The agent's context +4. evaluators: This is a list of dictionaries containing one of the following evaluators and the corresponding criteria. An example input for this field: [{{"evaluator": "Judge", "criteria": "patronus:is-code"}}] + +Evaluators: +{criteria} + +You must ONLY choose the most appropriate evaluator and criteria based on the "pass_criteria" or "description" fields for your evaluation task and nothing from outside of the options present.""" + + def _run( + self, + evaluated_model_input: Optional[str], + evaluated_model_output: Optional[str], + evaluated_model_retrieved_context: Optional[str], + evaluators: List[Dict[str, str]], + ) -> Any: + + # Assert correct format of evaluators + evals = [] + for e in evaluators: + evals.append( + { + "evaluator": e["evaluator"].lower(), + "criteria": e["name"] if "name" in e else e["criteria"], + } + ) + + data = { + "evaluated_model_input": evaluated_model_input, + "evaluated_model_output": evaluated_model_output, + "evaluated_model_retrieved_context": evaluated_model_retrieved_context, + "evaluators": evals, + } + + headers = { + "X-API-KEY": os.getenv("PATRONUS_API_KEY"), + "accept": "application/json", + "content-type": "application/json", + } + + response = requests.post( + self.evaluate_url, headers=headers, data=json.dumps(data) + ) + if response.status_code != 200: + raise Exception( + f"Failed to evaluate model input and output. Response status code: {response.status_code}. Reason: {response.text}" + ) + + return response.json() + + +class PatronusLocalEvaluatorTool(BaseTool): + name: str = "Patronus Local Evaluator Tool" + evaluator: str = "The registered local evaluator" + evaluated_model_gold_answer: str = "The agent's gold answer" + description: str = ( + "This tool is used to evaluate the model input and output using custom function evaluators." + ) + client: Any = None + args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema + + class Config: + arbitrary_types_allowed = True + + def __init__(self, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): + super().__init__(**kwargs) + self.client = Client() + if evaluator: + self.evaluator = evaluator + self.evaluated_model_gold_answer = evaluated_model_gold_answer + self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" + self._generate_description() + print( + f"Updating judge criteria, project name, experiment name, and output file, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + ) + + def _run( + self, + **kwargs: Any, + ) -> Any: + evaluated_model_input = kwargs.get("evaluated_model_input") + evaluated_model_output = kwargs.get("evaluated_model_output") + evaluated_model_retrieved_context = kwargs.get( + "evaluated_model_retrieved_context" + ) + evaluated_model_gold_answer = self.evaluated_model_gold_answer + evaluator = self.evaluator + + result = self.client.evaluate( + evaluator=evaluator, + evaluated_model_input=( + evaluated_model_input + if isinstance(evaluated_model_input, str) + else evaluated_model_input.get("description") + ), + evaluated_model_output=( + evaluated_model_output + if isinstance(evaluated_model_output, str) + else evaluated_model_output.get("description") + ), + evaluated_model_retrieved_context=( + evaluated_model_retrieved_context + if isinstance(evaluated_model_retrieved_context, str) + else evaluated_model_retrieved_context.get("description") + ), + evaluated_model_gold_answer=( + evaluated_model_gold_answer + if isinstance(evaluated_model_gold_answer, str) + else evaluated_model_gold_answer.get("description") + ), + tags={}, + ) + output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" + return output + + +class PatronusPredifinedCriteriaEvalTool(BaseTool): """ PatronusEvalTool is a tool to automatically evaluate and score agent interactions. @@ -15,32 +250,65 @@ class PatronusEvalTool(BaseTool): name: str = "Call Patronus API tool for evaluation of model inputs and outputs" description: str = ( - """This tool calls the Patronus Evaluation API that takes the following arguments: -1. evaluated_model_input: str: The agent's task description -2. evaluated_model_output: str: The agent's output code -3. evaluators: list[dict[str,str]]: list of dictionaries, each with a an evaluator (such as `judge`) and a criteria (like `patronus:[criteria-name-here]`).""" + """This tool calls the Patronus Evaluation API that takes the following arguments:""" ) evaluate_url: str = "https://api.patronus.ai/v1/evaluate" + args_schema: Type[BaseModel] = FixedBaseToolSchema + evaluators: List[Dict[str, str]] = [] + + def __init__(self, evaluators: List[Dict[str, str]], **kwargs: Any): + super().__init__(**kwargs) + if evaluators: + self.evaluators = evaluators + self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluators}" + self._generate_description() + print(f"Updating judge criteria to: {self.evaluators}") def _run( self, - evaluated_model_input: str, - evaluated_model_output: str, - evaluators: List[Dict[str, str]], - tags: dict, + **kwargs: Any, ) -> Any: - api_key = os.getenv("PATRONUS_API_KEY") + evaluated_model_input = kwargs.get("evaluated_model_input") + evaluated_model_output = kwargs.get("evaluated_model_output") + evaluated_model_retrieved_context = kwargs.get( + "evaluated_model_retrieved_context" + ) + evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer") + evaluators = self.evaluators + headers = { - "X-API-KEY": api_key, + "X-API-KEY": os.getenv("PATRONUS_API_KEY"), "accept": "application/json", "content-type": "application/json", } + data = { - "evaluated_model_input": evaluated_model_input, - "evaluated_model_output": evaluated_model_output, - "evaluators": evaluators, - "tags": tags, + "evaluated_model_input": ( + evaluated_model_input + if isinstance(evaluated_model_input, str) + else evaluated_model_input.get("description") + ), + "evaluated_model_output": ( + evaluated_model_output + if isinstance(evaluated_model_output, str) + else evaluated_model_output.get("description") + ), + "evaluated_model_retrieved_context": ( + evaluated_model_retrieved_context + if isinstance(evaluated_model_retrieved_context, str) + else evaluated_model_retrieved_context.get("description") + ), + "evaluated_model_gold_answer": ( + evaluated_model_gold_answer + if isinstance(evaluated_model_gold_answer, str) + else evaluated_model_gold_answer.get("description") + ), + "evaluators": ( + evaluators + if isinstance(evaluators, list) + else evaluators.get("description") + ), } response = requests.post( @@ -48,7 +316,7 @@ class PatronusEvalTool(BaseTool): ) if response.status_code != 200: raise Exception( - f"Failed to evaluate model input and output. Reason: {response.text}" + f"Failed to evaluate model input and output. Status code: {response.status_code}. Reason: {response.text}" ) return response.json() From 7da783ef0ebde2895f7507d1abeba8b42aac97fa Mon Sep 17 00:00:00 2001 From: DarshanDeshpande Date: Thu, 26 Dec 2024 17:44:04 -0500 Subject: [PATCH 201/391] Minor formatting changes --- src/crewai_tools/tools/patronus_eval_tool/example.py | 2 -- src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index 1b0ba028d..56b8f90d6 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -17,12 +17,10 @@ patronus_eval_tool = PatronusPredifinedCriteriaEvalTool( # Test the PatronusLocalEvaluatorTool where agent uses the local evaluator client = Client() - @client.register_local_evaluator("local_evaluator_name") def my_evaluator(**kwargs): return EvaluationResult(pass_="PASS", score=0.5, explanation="Explanation test") - patronus_eval_tool = PatronusLocalEvaluatorTool( evaluator="local_evaluator_name", evaluated_model_gold_answer="test" ) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index d765c1701..1dfee31ba 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -198,7 +198,7 @@ class PatronusLocalEvaluatorTool(BaseTool): self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" self._generate_description() print( - f"Updating judge criteria, project name, experiment name, and output file, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + f"Updating judge evaluator, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" ) def _run( From c7c8cd0a3cdb52234ec593f89f760e574fc36c41 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 00:54:49 +0000 Subject: [PATCH 202/391] feat: add URL validation and return_html examples - Add comprehensive URL validation in schema and _create_driver - Add URL format, length, and character validation - Add meaningful error messages for validation failures - Add return_html usage examples in README.md Co-Authored-By: Joe Moura --- .../tools/selenium_scraping_tool/README.md | 10 ++++++ .../selenium_scraping_tool.py | 36 +++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/README.md b/src/crewai_tools/tools/selenium_scraping_tool/README.md index e2ddefba1..2d54eb970 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/README.md +++ b/src/crewai_tools/tools/selenium_scraping_tool/README.md @@ -24,6 +24,16 @@ tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.mai # Example 4: Scrape using optional parameters for customized scraping tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.main-content', cookie={'name': 'user', 'value': 'John Doe'}) + +# Example 5: Scrape content in HTML format +tool = SeleniumScrapingTool(website_url='https://example.com', return_html=True) +result = tool._run() +# Returns HTML content like: ['
Hello World
', ''] + +# Example 6: Scrape content in text format (default) +tool = SeleniumScrapingTool(website_url='https://example.com', return_html=False) +result = tool._run() +# Returns text content like: ['Hello World', 'Copyright 2024'] ``` ## Arguments diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 5f7d9391b..d7a55428d 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -1,8 +1,10 @@ +import re import time from typing import Any, Optional, Type +from urllib.parse import urlparse from crewai.tools import BaseTool -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, validator from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By @@ -15,12 +17,35 @@ class FixedSeleniumScrapingToolSchema(BaseModel): class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): """Input for SeleniumScrapingTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file") + website_url: str = Field(..., description="Mandatory website url to read the file. Must start with http:// or https://") css_element: str = Field( ..., description="Mandatory css reference for element to scrape from the website", ) + @validator('website_url') + def validate_website_url(cls, v): + if not v: + raise ValueError("Website URL cannot be empty") + + if len(v) > 2048: # Common maximum URL length + raise ValueError("URL is too long (max 2048 characters)") + + if not re.match(r'^https?://', v): + raise ValueError("URL must start with http:// or https://") + + try: + result = urlparse(v) + if not all([result.scheme, result.netloc]): + raise ValueError("Invalid URL format") + except Exception as e: + raise ValueError(f"Invalid URL: {str(e)}") + + if re.search(r'\s', v): + raise ValueError("URL cannot contain whitespace") + + return v + class SeleniumScrapingTool(BaseTool): name: str = "Read a website content" @@ -103,6 +128,13 @@ class SeleniumScrapingTool(BaseTool): return elements_content def _create_driver(self, url, cookie, wait_time): + if not url: + raise ValueError("URL cannot be empty") + + # Validate URL format + if not re.match(r'^https?://', url): + raise ValueError("URL must start with http:// or https://") + options = Options() options.add_argument("--headless") driver = self.driver(options=options) From c3ebbba8aefdc5d5c6cf0be1ab855720cc2e29d5 Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Sat, 28 Dec 2024 09:11:32 +0100 Subject: [PATCH 203/391] Update __init__.py --- src/crewai_tools/tools/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 23565dbea..00f992833 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -31,6 +31,7 @@ from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( ScrapeElementFromWebsiteTool, ) +from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ScrapeGraphScrapeTool, ScrapegraphScrapeToolSchema from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( ScrapflyScrapeWebsiteTool, From e5c47e46a8fa9b078f13f12f5973955536c4033f Mon Sep 17 00:00:00 2001 From: juliette_sivan Date: Sat, 28 Dec 2024 10:59:06 -0500 Subject: [PATCH 204/391] add import tools --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 12523a214..68e778006 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -17,6 +17,7 @@ from .tools import ( FirecrawlSearchTool, GithubSearchTool, JSONSearchTool, + LinkupSearchTool, LlamaIndexTool, MDXSearchTool, MultiOnTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 23565dbea..67c9c79e7 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -20,6 +20,7 @@ from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool from .json_search_tool.json_search_tool import JSONSearchTool +from .linkup_search_tool.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool From 63e23c06c56e3ba2217b3ada35bb5af596dcfedf Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 21:55:35 +0000 Subject: [PATCH 205/391] Fix FileReadTool infinite loop by maintaining original schema Co-Authored-By: Joe Moura --- src/crewai_tools/tools/file_read_tool/file_read_tool.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index fe34c9d8b..8a6c2e2d8 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -27,7 +27,6 @@ class FileReadTool(BaseTool): if file_path is not None: self.file_path = file_path self.description = f"A tool that can be used to read {file_path}'s content." - self.args_schema = FixedFileReadToolSchema self._generate_description() def _run( From 5e2c38c34933aba3cfd91106a58b26d13d98545c Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 22:26:37 +0000 Subject: [PATCH 206/391] Improve FileReadTool error handling and validation Co-Authored-By: Joe Moura --- .../tools/file_read_tool/file_read_tool.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 8a6c2e2d8..32db13f21 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -4,13 +4,7 @@ from crewai.tools import BaseTool from pydantic import BaseModel, Field -class FixedFileReadToolSchema(BaseModel): - """Input for FileReadTool.""" - - pass - - -class FileReadToolSchema(FixedFileReadToolSchema): +class FileReadToolSchema(BaseModel): """Input for FileReadTool.""" file_path: str = Field(..., description="Mandatory file full path to read the file") @@ -33,9 +27,16 @@ class FileReadTool(BaseTool): self, **kwargs: Any, ) -> Any: + file_path = kwargs.get("file_path", self.file_path) + if file_path is None: + return "Error: No file path provided. Please provide a file path either in the constructor or as an argument." + try: - file_path = kwargs.get("file_path", self.file_path) with open(file_path, "r") as file: return file.read() + except FileNotFoundError: + return f"Error: File not found at path: {file_path}" + except PermissionError: + return f"Error: Permission denied when trying to read file: {file_path}" except Exception as e: - return f"Fail to read the file {file_path}. Error: {e}" + return f"Error: Failed to read file {file_path}. {str(e)}" From aaf2641cc82e03324ce19c288c256269c5c18042 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 22:29:46 +0000 Subject: [PATCH 207/391] Add comprehensive tests for FileReadTool Co-Authored-By: Joe Moura --- tests/file_read_tool_test.py | 84 ++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 tests/file_read_tool_test.py diff --git a/tests/file_read_tool_test.py b/tests/file_read_tool_test.py new file mode 100644 index 000000000..4646df24c --- /dev/null +++ b/tests/file_read_tool_test.py @@ -0,0 +1,84 @@ +import os +import pytest +from crewai_tools import FileReadTool + +def test_file_read_tool_constructor(): + """Test FileReadTool initialization with file_path.""" + # Create a temporary test file + test_file = "/tmp/test_file.txt" + test_content = "Hello, World!" + with open(test_file, "w") as f: + f.write(test_content) + + # Test initialization with file_path + tool = FileReadTool(file_path=test_file) + assert tool.file_path == test_file + assert "test_file.txt" in tool.description + + # Clean up + os.remove(test_file) + +def test_file_read_tool_run(): + """Test FileReadTool _run method with file_path at runtime.""" + # Create a temporary test file + test_file = "/tmp/test_file.txt" + test_content = "Hello, World!" + with open(test_file, "w") as f: + f.write(test_content) + + # Test reading file with runtime file_path + tool = FileReadTool() + result = tool._run(file_path=test_file) + assert result == test_content + + # Clean up + os.remove(test_file) + +def test_file_read_tool_error_handling(): + """Test FileReadTool error handling.""" + # Test missing file path + tool = FileReadTool() + result = tool._run() + assert "Error: No file path provided" in result + + # Test non-existent file + result = tool._run(file_path="/nonexistent/file.txt") + assert "Error: File not found at path:" in result + + # Test permission error (create a file without read permissions) + test_file = "/tmp/no_permission.txt" + with open(test_file, "w") as f: + f.write("test") + os.chmod(test_file, 0o000) + + result = tool._run(file_path=test_file) + assert "Error: Permission denied" in result + + # Clean up + os.chmod(test_file, 0o666) # Restore permissions to delete + os.remove(test_file) + +def test_file_read_tool_constructor_and_run(): + """Test FileReadTool using both constructor and runtime file paths.""" + # Create two test files + test_file1 = "/tmp/test1.txt" + test_file2 = "/tmp/test2.txt" + content1 = "File 1 content" + content2 = "File 2 content" + + with open(test_file1, "w") as f1, open(test_file2, "w") as f2: + f1.write(content1) + f2.write(content2) + + # Test that constructor file_path works + tool = FileReadTool(file_path=test_file1) + result = tool._run() + assert result == content1 + + # Test that runtime file_path overrides constructor + result = tool._run(file_path=test_file2) + assert result == content2 + + # Clean up + os.remove(test_file1) + os.remove(test_file2) From d3391d9ba4c1b3696dbbe7188aa59e6dc6ce8761 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 23:10:51 +0000 Subject: [PATCH 208/391] Add comprehensive documentation and type hints to FileReadTool Co-Authored-By: Joe Moura --- .../tools/file_read_tool/file_read_tool.py | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 32db13f21..323a26d51 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -11,22 +11,49 @@ class FileReadToolSchema(BaseModel): class FileReadTool(BaseTool): + """A tool for reading file contents. + + This tool inherits its schema handling from BaseTool to avoid recursive schema + definition issues. The args_schema is set to FileReadToolSchema which defines + the required file_path parameter. The schema should not be overridden in the + constructor as it would break the inheritance chain and cause infinite loops. + + The tool supports two ways of specifying the file path: + 1. At construction time via the file_path parameter + 2. At runtime via the file_path parameter in the tool's input + + Args: + file_path (Optional[str]): Path to the file to be read. If provided, + this becomes the default file path for the tool. + **kwargs: Additional keyword arguments passed to BaseTool. + + Example: + >>> tool = FileReadTool(file_path="/path/to/file.txt") + >>> content = tool.run() # Reads /path/to/file.txt + >>> content = tool.run(file_path="/path/to/other.txt") # Reads other.txt + """ name: str = "Read a file's content" - description: str = "A tool that can be used to read a file's content." + description: str = "A tool that reads the content of a file. To use this tool, provide a 'file_path' parameter with the path to the file you want to read." args_schema: Type[BaseModel] = FileReadToolSchema file_path: Optional[str] = None - def __init__(self, file_path: Optional[str] = None, **kwargs): + def __init__(self, file_path: Optional[str] = None, **kwargs: Any) -> None: + """Initialize the FileReadTool. + + Args: + file_path (Optional[str]): Path to the file to be read. If provided, + this becomes the default file path for the tool. + **kwargs: Additional keyword arguments passed to BaseTool. + """ super().__init__(**kwargs) if file_path is not None: self.file_path = file_path - self.description = f"A tool that can be used to read {file_path}'s content." - self._generate_description() + self.description = f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file." def _run( self, **kwargs: Any, - ) -> Any: + ) -> str: file_path = kwargs.get("file_path", self.file_path) if file_path is None: return "Error: No file path provided. Please provide a file path either in the constructor or as an argument." @@ -40,3 +67,15 @@ class FileReadTool(BaseTool): return f"Error: Permission denied when trying to read file: {file_path}" except Exception as e: return f"Error: Failed to read file {file_path}. {str(e)}" + + def _generate_description(self) -> None: + """Generate the tool description based on file path. + + This method updates the tool's description to include information about + the default file path while maintaining the ability to specify a different + file at runtime. + + Returns: + None + """ + self.description = f"A tool that can be used to read {self.file_path}'s content." From 029afd3e145030ed6a0d0141a899beaa75311099 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 29 Dec 2024 12:23:08 -0300 Subject: [PATCH 209/391] Update __init__.py --- src/crewai_tools/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 87aca8531..65a90a01b 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -26,6 +26,8 @@ from .tools import ( PGSearchTool, RagTool, ScrapeElementFromWebsiteTool, + ScrapeGraphScrapeTool, + ScrapegraphScrapeToolSchema ScrapeWebsiteTool, ScrapflyScrapeWebsiteTool, SeleniumScrapingTool, From 15d6314379cd32b29431fcff101185246fb315d3 Mon Sep 17 00:00:00 2001 From: Rebecca Qian Date: Tue, 31 Dec 2024 03:02:15 -0500 Subject: [PATCH 210/391] Create separate tool classes --- .../tools/patronus_eval_tool/example.py | 38 ++- .../patronus_eval_tool/patronus_eval_tool.py | 250 +++--------------- .../patronus_local_evaluator_tool.py | 89 +++++++ .../patronus_predefined_criteria_eval_tool.py | 108 ++++++++ 4 files changed, 257 insertions(+), 228 deletions(-) create mode 100644 src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py create mode 100644 src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index 56b8f90d6..83787c86e 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -1,31 +1,43 @@ from crewai import Agent, Crew, Task from patronus_eval_tool import ( PatronusEvalTool, - PatronusPredifinedCriteriaEvalTool, +) +from patronus_local_evaluator_tool import ( PatronusLocalEvaluatorTool, ) -from patronus import Client, EvaluationResult - -# Test the PatronusEvalTool where agent can pick the best evaluator and criteria -patronus_eval_tool = PatronusEvalTool() - -# Test the PatronusPredifinedCriteriaEvalTool where agent uses the defined evaluator and criteria -patronus_eval_tool = PatronusPredifinedCriteriaEvalTool( - evaluators=[{"evaluator": "judge", "criteria": "contains-code"}] +from patronus_predefined_criteria_eval_tool import ( + PatronusPredefinedCriteriaEvalTool, ) +from patronus import Client, EvaluationResult +import random + # Test the PatronusLocalEvaluatorTool where agent uses the local evaluator client = Client() -@client.register_local_evaluator("local_evaluator_name") +# Example of an evaluator that returns a random pass/fail result +@client.register_local_evaluator("random_evaluator") def my_evaluator(**kwargs): - return EvaluationResult(pass_="PASS", score=0.5, explanation="Explanation test") + score = random.random() + return EvaluationResult( + score_raw=score, + pass_=score >= 0.5, + explanation="example explanation" # Optional justification for LLM judges + ) +# 1. Uses PatronusEvalTool: agent can pick the best evaluator and criteria +# patronus_eval_tool = PatronusEvalTool() + +# 2. Uses PatronusPredefinedCriteriaEvalTool: agent uses the defined evaluator and criteria +# patronus_eval_tool = PatronusPredefinedCriteriaEvalTool( +# evaluators=[{"evaluator": "judge", "criteria": "contains-code"}] +# ) + +# 3. Uses PatronusLocalEvaluatorTool: agent uses user defined evaluator patronus_eval_tool = PatronusLocalEvaluatorTool( - evaluator="local_evaluator_name", evaluated_model_gold_answer="test" + evaluator="random_evaluator", evaluated_model_gold_answer="example label" ) - # Create a new agent coding_agent = Agent( role="Coding Agent", diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index 1dfee31ba..9136cfb59 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -8,40 +8,6 @@ from pydantic import BaseModel, Field from patronus import Client -class FixedBaseToolSchema(BaseModel): - evaluated_model_input: Dict = Field( - ..., description="The agent's task description in simple text" - ) - evaluated_model_output: Dict = Field( - ..., description="The agent's output of the task" - ) - evaluated_model_retrieved_context: Dict = Field( - ..., description="The agent's context" - ) - evaluated_model_gold_answer: Dict = Field( - ..., description="The agent's gold answer only if available" - ) - evaluators: List[Dict[str, str]] = Field( - ..., - description="List of dictionaries containing the evaluator and criteria to evaluate the model input and output. An example input for this field: [{'evaluator': '[evaluator-from-user]', 'criteria': '[criteria-from-user]'}]", - ) - - -class FixedLocalEvaluatorToolSchema(BaseModel): - evaluated_model_input: Dict = Field( - ..., description="The agent's task description in simple text" - ) - evaluated_model_output: Dict = Field( - ..., description="The agent's output of the task" - ) - evaluated_model_retrieved_context: Dict = Field( - ..., description="The agent's context" - ) - evaluated_model_gold_answer: Dict = Field( - ..., description="The agent's gold answer only if available" - ) - evaluator: str = Field(..., description="The registered local evaluator") - class PatronusEvalTool(BaseTool): name: str = "Patronus Evaluation Tool" @@ -56,10 +22,10 @@ class PatronusEvalTool(BaseTool): self.evaluators = temp_evaluators self.criteria = temp_criteria self.description = self._generate_description() - warnings.warn("You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredifinedCriteriaEvalTool` instead.") + warnings.warn("You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredefinedCriteriaEvalTool` instead.") def _init_run(self): - content = json.loads( + evaluators_set = json.loads( requests.get( "https://api.patronus.ai/v1/evaluators", headers={ @@ -69,19 +35,19 @@ class PatronusEvalTool(BaseTool): ).text )["evaluators"] ids, evaluators = set(), [] - for i in content: - if not i["deprecated"] and i["id"] not in ids: + for ev in evaluators_set: + if not ev["deprecated"] and ev["id"] not in ids: evaluators.append( { - "id": i["id"], - "name": i["name"], - "description": i["description"], - "aliases": i["aliases"], + "id": ev["id"], + "name": ev["name"], + "description": ev["description"], + "aliases": ev["aliases"], } ) - ids.add(i["id"]) + ids.add(ev["id"]) - content = json.loads( + criteria_set = json.loads( requests.get( "https://api.patronus.ai/v1/evaluator-criteria", headers={ @@ -91,31 +57,31 @@ class PatronusEvalTool(BaseTool): ).text )["evaluator_criteria"] criteria = [] - for i in content: - if i["config"].get("pass_criteria", None): - if i["config"].get("rubric", None): + for cr in criteria_set: + if cr["config"].get("pass_criteria", None): + if cr["config"].get("rubric", None): criteria.append( { - "evaluator": i["evaluator_family"], - "name": i["name"], - "pass_criteria": i["config"]["pass_criteria"], - "rubric": i["config"]["rubric"], + "evaluator": cr["evaluator_family"], + "name": cr["name"], + "pass_criteria": cr["config"]["pass_criteria"], + "rubric": cr["config"]["rubric"], } ) else: criteria.append( { - "evaluator": i["evaluator_family"], - "name": i["name"], - "pass_criteria": i["config"]["pass_criteria"], + "evaluator": cr["evaluator_family"], + "name": cr["name"], + "pass_criteria": cr["config"]["pass_criteria"], } ) - elif i["description"]: + elif cr["description"]: criteria.append( { - "evaluator": i["evaluator_family"], - "name": i["name"], - "description": i["description"], + "evaluator": cr["evaluator_family"], + "name": cr["name"], + "description": cr["description"], } ) @@ -124,15 +90,15 @@ class PatronusEvalTool(BaseTool): def _generate_description(self) -> str: criteria = "\n".join([json.dumps(i) for i in self.criteria]) return f"""This tool calls the Patronus Evaluation API that takes the following arguments: -1. evaluated_model_input: str: The agent's task description in simple text -2. evaluated_model_output: str: The agent's output of the task -3. evaluated_model_retrieved_context: str: The agent's context -4. evaluators: This is a list of dictionaries containing one of the following evaluators and the corresponding criteria. An example input for this field: [{{"evaluator": "Judge", "criteria": "patronus:is-code"}}] + 1. evaluated_model_input: str: The agent's task description in simple text + 2. evaluated_model_output: str: The agent's output of the task + 3. evaluated_model_retrieved_context: str: The agent's context + 4. evaluators: This is a list of dictionaries containing one of the following evaluators and the corresponding criteria. An example input for this field: [{{"evaluator": "Judge", "criteria": "patronus:is-code"}}] -Evaluators: -{criteria} + Evaluators: + {criteria} -You must ONLY choose the most appropriate evaluator and criteria based on the "pass_criteria" or "description" fields for your evaluation task and nothing from outside of the options present.""" + You must ONLY choose the most appropriate evaluator and criteria based on the "pass_criteria" or "description" fields for your evaluation task and nothing from outside of the options present.""" def _run( self, @@ -144,11 +110,11 @@ You must ONLY choose the most appropriate evaluator and criteria based on the "p # Assert correct format of evaluators evals = [] - for e in evaluators: + for ev in evaluators: evals.append( { - "evaluator": e["evaluator"].lower(), - "criteria": e["name"] if "name" in e else e["criteria"], + "evaluator": ev["evaluator"].lower(), + "criteria": ev["name"] if "name" in ev else ev["criteria"], } ) @@ -173,150 +139,4 @@ You must ONLY choose the most appropriate evaluator and criteria based on the "p f"Failed to evaluate model input and output. Response status code: {response.status_code}. Reason: {response.text}" ) - return response.json() - - -class PatronusLocalEvaluatorTool(BaseTool): - name: str = "Patronus Local Evaluator Tool" - evaluator: str = "The registered local evaluator" - evaluated_model_gold_answer: str = "The agent's gold answer" - description: str = ( - "This tool is used to evaluate the model input and output using custom function evaluators." - ) - client: Any = None - args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema - - class Config: - arbitrary_types_allowed = True - - def __init__(self, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): - super().__init__(**kwargs) - self.client = Client() - if evaluator: - self.evaluator = evaluator - self.evaluated_model_gold_answer = evaluated_model_gold_answer - self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" - self._generate_description() - print( - f"Updating judge evaluator, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" - ) - - def _run( - self, - **kwargs: Any, - ) -> Any: - evaluated_model_input = kwargs.get("evaluated_model_input") - evaluated_model_output = kwargs.get("evaluated_model_output") - evaluated_model_retrieved_context = kwargs.get( - "evaluated_model_retrieved_context" - ) - evaluated_model_gold_answer = self.evaluated_model_gold_answer - evaluator = self.evaluator - - result = self.client.evaluate( - evaluator=evaluator, - evaluated_model_input=( - evaluated_model_input - if isinstance(evaluated_model_input, str) - else evaluated_model_input.get("description") - ), - evaluated_model_output=( - evaluated_model_output - if isinstance(evaluated_model_output, str) - else evaluated_model_output.get("description") - ), - evaluated_model_retrieved_context=( - evaluated_model_retrieved_context - if isinstance(evaluated_model_retrieved_context, str) - else evaluated_model_retrieved_context.get("description") - ), - evaluated_model_gold_answer=( - evaluated_model_gold_answer - if isinstance(evaluated_model_gold_answer, str) - else evaluated_model_gold_answer.get("description") - ), - tags={}, - ) - output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" - return output - - -class PatronusPredifinedCriteriaEvalTool(BaseTool): - """ - PatronusEvalTool is a tool to automatically evaluate and score agent interactions. - - Results are logged to the Patronus platform at app.patronus.ai - """ - - name: str = "Call Patronus API tool for evaluation of model inputs and outputs" - description: str = ( - """This tool calls the Patronus Evaluation API that takes the following arguments:""" - ) - evaluate_url: str = "https://api.patronus.ai/v1/evaluate" - args_schema: Type[BaseModel] = FixedBaseToolSchema - evaluators: List[Dict[str, str]] = [] - - def __init__(self, evaluators: List[Dict[str, str]], **kwargs: Any): - super().__init__(**kwargs) - if evaluators: - self.evaluators = evaluators - self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluators}" - self._generate_description() - print(f"Updating judge criteria to: {self.evaluators}") - - def _run( - self, - **kwargs: Any, - ) -> Any: - - evaluated_model_input = kwargs.get("evaluated_model_input") - evaluated_model_output = kwargs.get("evaluated_model_output") - evaluated_model_retrieved_context = kwargs.get( - "evaluated_model_retrieved_context" - ) - evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer") - evaluators = self.evaluators - - headers = { - "X-API-KEY": os.getenv("PATRONUS_API_KEY"), - "accept": "application/json", - "content-type": "application/json", - } - - data = { - "evaluated_model_input": ( - evaluated_model_input - if isinstance(evaluated_model_input, str) - else evaluated_model_input.get("description") - ), - "evaluated_model_output": ( - evaluated_model_output - if isinstance(evaluated_model_output, str) - else evaluated_model_output.get("description") - ), - "evaluated_model_retrieved_context": ( - evaluated_model_retrieved_context - if isinstance(evaluated_model_retrieved_context, str) - else evaluated_model_retrieved_context.get("description") - ), - "evaluated_model_gold_answer": ( - evaluated_model_gold_answer - if isinstance(evaluated_model_gold_answer, str) - else evaluated_model_gold_answer.get("description") - ), - "evaluators": ( - evaluators - if isinstance(evaluators, list) - else evaluators.get("description") - ), - } - - response = requests.post( - self.evaluate_url, headers=headers, data=json.dumps(data) - ) - if response.status_code != 200: - raise Exception( - f"Failed to evaluate model input and output. Status code: {response.status_code}. Reason: {response.text}" - ) - - return response.json() + return response.json() \ No newline at end of file diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py new file mode 100644 index 000000000..ca4c972d1 --- /dev/null +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -0,0 +1,89 @@ +import os +import json +import requests +import warnings +from typing import Any, List, Dict, Optional, Type +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from patronus import Client + + +class FixedLocalEvaluatorToolSchema(BaseModel): + evaluated_model_input: Dict = Field( + ..., description="The agent's task description in simple text" + ) + evaluated_model_output: Dict = Field( + ..., description="The agent's output of the task" + ) + evaluated_model_retrieved_context: Dict = Field( + ..., description="The agent's context" + ) + evaluated_model_gold_answer: Dict = Field( + ..., description="The agent's gold answer only if available" + ) + evaluator: str = Field(..., description="The registered local evaluator") + + +class PatronusLocalEvaluatorTool(BaseTool): + name: str = "Patronus Local Evaluator Tool" + evaluator: str = "The registered local evaluator" + evaluated_model_gold_answer: str = "The agent's gold answer" + description: str = ( + "This tool is used to evaluate the model input and output using custom function evaluators." + ) + client: Any = None + args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema + + class Config: + arbitrary_types_allowed = True + + def __init__(self, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): + super().__init__(**kwargs) + self.client = Client() + if evaluator: + self.evaluator = evaluator + self.evaluated_model_gold_answer = evaluated_model_gold_answer + self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" + self._generate_description() + print( + f"Updating judge evaluator, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + ) + + def _run( + self, + **kwargs: Any, + ) -> Any: + evaluated_model_input = kwargs.get("evaluated_model_input") + evaluated_model_output = kwargs.get("evaluated_model_output") + evaluated_model_retrieved_context = kwargs.get( + "evaluated_model_retrieved_context" + ) + evaluated_model_gold_answer = self.evaluated_model_gold_answer + evaluator = self.evaluator + + result = self.client.evaluate( + evaluator=evaluator, + evaluated_model_input=( + evaluated_model_input + if isinstance(evaluated_model_input, str) + else evaluated_model_input.get("description") + ), + evaluated_model_output=( + evaluated_model_output + if isinstance(evaluated_model_output, str) + else evaluated_model_output.get("description") + ), + evaluated_model_retrieved_context=( + evaluated_model_retrieved_context + if isinstance(evaluated_model_retrieved_context, str) + else evaluated_model_retrieved_context.get("description") + ), + evaluated_model_gold_answer=( + evaluated_model_gold_answer + if isinstance(evaluated_model_gold_answer, str) + else evaluated_model_gold_answer.get("description") + ), + tags={}, + ) + output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" + return output diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py new file mode 100644 index 000000000..28661f64b --- /dev/null +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py @@ -0,0 +1,108 @@ +import os +import json +import requests +import warnings +from typing import Any, List, Dict, Optional, Type +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from patronus import Client + + +class FixedBaseToolSchema(BaseModel): + evaluated_model_input: Dict = Field( + ..., description="The agent's task description in simple text" + ) + evaluated_model_output: Dict = Field( + ..., description="The agent's output of the task" + ) + evaluated_model_retrieved_context: Dict = Field( + ..., description="The agent's context" + ) + evaluated_model_gold_answer: Dict = Field( + ..., description="The agent's gold answer only if available" + ) + evaluators: List[Dict[str, str]] = Field( + ..., + description="List of dictionaries containing the evaluator and criteria to evaluate the model input and output. An example input for this field: [{'evaluator': '[evaluator-from-user]', 'criteria': '[criteria-from-user]'}]", + ) + + +class PatronusPredefinedCriteriaEvalTool(BaseTool): + """ + PatronusEvalTool is a tool to automatically evaluate and score agent interactions. + + Results are logged to the Patronus platform at app.patronus.ai + """ + + name: str = "Call Patronus API tool for evaluation of model inputs and outputs" + description: str = ( + """This tool calls the Patronus Evaluation API that takes the following arguments:""" + ) + evaluate_url: str = "https://api.patronus.ai/v1/evaluate" + args_schema: Type[BaseModel] = FixedBaseToolSchema + evaluators: List[Dict[str, str]] = [] + + def __init__(self, evaluators: List[Dict[str, str]], **kwargs: Any): + super().__init__(**kwargs) + if evaluators: + self.evaluators = evaluators + self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluators}" + self._generate_description() + print(f"Updating judge criteria to: {self.evaluators}") + + def _run( + self, + **kwargs: Any, + ) -> Any: + + evaluated_model_input = kwargs.get("evaluated_model_input") + evaluated_model_output = kwargs.get("evaluated_model_output") + evaluated_model_retrieved_context = kwargs.get( + "evaluated_model_retrieved_context" + ) + evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer") + evaluators = self.evaluators + + headers = { + "X-API-KEY": os.getenv("PATRONUS_API_KEY"), + "accept": "application/json", + "content-type": "application/json", + } + + data = { + "evaluated_model_input": ( + evaluated_model_input + if isinstance(evaluated_model_input, str) + else evaluated_model_input.get("description") + ), + "evaluated_model_output": ( + evaluated_model_output + if isinstance(evaluated_model_output, str) + else evaluated_model_output.get("description") + ), + "evaluated_model_retrieved_context": ( + evaluated_model_retrieved_context + if isinstance(evaluated_model_retrieved_context, str) + else evaluated_model_retrieved_context.get("description") + ), + "evaluated_model_gold_answer": ( + evaluated_model_gold_answer + if isinstance(evaluated_model_gold_answer, str) + else evaluated_model_gold_answer.get("description") + ), + "evaluators": ( + evaluators + if isinstance(evaluators, list) + else evaluators.get("description") + ), + } + + response = requests.post( + self.evaluate_url, headers=headers, data=json.dumps(data) + ) + if response.status_code != 200: + raise Exception( + f"Failed to evaluate model input and output. Status code: {response.status_code}. Reason: {response.text}" + ) + + return response.json() \ No newline at end of file From a7316a86bf72bdcbabe9c80192bc726edcea9463 Mon Sep 17 00:00:00 2001 From: Rebecca Qian Date: Tue, 31 Dec 2024 04:01:26 -0500 Subject: [PATCH 211/391] fix bug in local evaluator tool --- src/crewai_tools/__init__.py | 2 ++ .../tools/patronus_eval_tool/example.py | 4 +-- .../patronus_eval_tool/patronus_eval_tool.py | 5 +--- .../patronus_local_evaluator_tool.py | 26 +++++++++---------- .../patronus_predefined_criteria_eval_tool.py | 4 +-- 5 files changed, 19 insertions(+), 22 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7e27286e7..e920a5969 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -23,6 +23,8 @@ from .tools import ( MySQLSearchTool, NL2SQLTool, PatronusEvalTool, + PatronusLocalEvaluatorTool, + PatronusPredefinedCriteriaEvalTool, PDFSearchTool, PGSearchTool, RagTool, diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index 83787c86e..b9e1bad5e 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -17,7 +17,7 @@ client = Client() # Example of an evaluator that returns a random pass/fail result @client.register_local_evaluator("random_evaluator") -def my_evaluator(**kwargs): +def random_evaluator(**kwargs): score = random.random() return EvaluationResult( score_raw=score, @@ -35,7 +35,7 @@ def my_evaluator(**kwargs): # 3. Uses PatronusLocalEvaluatorTool: agent uses user defined evaluator patronus_eval_tool = PatronusLocalEvaluatorTool( - evaluator="random_evaluator", evaluated_model_gold_answer="example label" + patronus_client=client, evaluator="random_evaluator", evaluated_model_gold_answer="example label" ) # Create a new agent diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index 9136cfb59..23ffe2fd4 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -2,11 +2,8 @@ import os import json import requests import warnings -from typing import Any, List, Dict, Optional, Type +from typing import Any, List, Dict, Optional from crewai.tools import BaseTool -from pydantic import BaseModel, Field -from patronus import Client - class PatronusEvalTool(BaseTool): diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index ca4c972d1..5f75ad26c 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,24 +1,20 @@ -import os -import json -import requests -import warnings -from typing import Any, List, Dict, Optional, Type +from typing import Any, Type from crewai.tools import BaseTool from pydantic import BaseModel, Field from patronus import Client class FixedLocalEvaluatorToolSchema(BaseModel): - evaluated_model_input: Dict = Field( + evaluated_model_input: str = Field( ..., description="The agent's task description in simple text" ) - evaluated_model_output: Dict = Field( + evaluated_model_output: str = Field( ..., description="The agent's output of the task" ) - evaluated_model_retrieved_context: Dict = Field( + evaluated_model_retrieved_context: str = Field( ..., description="The agent's context" ) - evaluated_model_gold_answer: Dict = Field( + evaluated_model_gold_answer: str = Field( ..., description="The agent's gold answer only if available" ) evaluator: str = Field(..., description="The registered local evaluator") @@ -37,9 +33,9 @@ class PatronusLocalEvaluatorTool(BaseTool): class Config: arbitrary_types_allowed = True - def __init__(self, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): + def __init__(self, patronus_client: Client, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): super().__init__(**kwargs) - self.client = Client() + self.client = patronus_client #Client() if evaluator: self.evaluator = evaluator self.evaluated_model_gold_answer = evaluated_model_gold_answer @@ -58,9 +54,13 @@ class PatronusLocalEvaluatorTool(BaseTool): evaluated_model_retrieved_context = kwargs.get( "evaluated_model_retrieved_context" ) - evaluated_model_gold_answer = self.evaluated_model_gold_answer + evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer") + # evaluated_model_gold_answer = self.evaluated_model_gold_answer evaluator = self.evaluator + print(kwargs) + print(self.evaluator) + result = self.client.evaluate( evaluator=evaluator, evaluated_model_input=( @@ -83,7 +83,7 @@ class PatronusLocalEvaluatorTool(BaseTool): if isinstance(evaluated_model_gold_answer, str) else evaluated_model_gold_answer.get("description") ), - tags={}, + tags={}, # Optional metadata, supports arbitrary kv pairs ) output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" return output diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py index 28661f64b..28ffc2912 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py @@ -1,11 +1,9 @@ import os import json import requests -import warnings -from typing import Any, List, Dict, Optional, Type +from typing import Any, List, Dict, Type from crewai.tools import BaseTool from pydantic import BaseModel, Field -from patronus import Client class FixedBaseToolSchema(BaseModel): From 10f8a8731779c2b6a1bfce9ce6a6e87e947c8017 Mon Sep 17 00:00:00 2001 From: Rebecca Qian Date: Tue, 31 Dec 2024 04:05:46 -0500 Subject: [PATCH 212/391] update local evaluator --- .../patronus_eval_tool/patronus_local_evaluator_tool.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index 5f75ad26c..e65cb342d 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -35,7 +35,7 @@ class PatronusLocalEvaluatorTool(BaseTool): def __init__(self, patronus_client: Client, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): super().__init__(**kwargs) - self.client = patronus_client #Client() + self.client = patronus_client if evaluator: self.evaluator = evaluator self.evaluated_model_gold_answer = evaluated_model_gold_answer @@ -54,13 +54,9 @@ class PatronusLocalEvaluatorTool(BaseTool): evaluated_model_retrieved_context = kwargs.get( "evaluated_model_retrieved_context" ) - evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer") - # evaluated_model_gold_answer = self.evaluated_model_gold_answer + evaluated_model_gold_answer = self.evaluated_model_gold_answer evaluator = self.evaluator - print(kwargs) - print(self.evaluator) - result = self.client.evaluate( evaluator=evaluator, evaluated_model_input=( From 4c7ce3a94548d7768d676fb236bb1b1fb72dfbbe Mon Sep 17 00:00:00 2001 From: Priyanshupareek <37779762+Priyanshupareek@users.noreply.github.com> Date: Thu, 2 Jan 2025 00:54:48 +0530 Subject: [PATCH 213/391] Update browserbase_load_tool.py --- .../browserbase_load_tool/browserbase_load_tool.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 54c33db3c..0a848fc7b 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,8 +1,9 @@ from typing import Any, Optional, Type - -from crewai.tools import BaseTool +import os from pydantic import BaseModel, Field +from crewai_tools.tools.base_tool import BaseTool + class BrowserbaseLoadToolSchema(BaseModel): url: str = Field(description="Website URL") @@ -14,8 +15,8 @@ class BrowserbaseLoadTool(BaseTool): "Load webpages url in a headless browser using Browserbase and return the contents" ) args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema - api_key: Optional[str] = None - project_id: Optional[str] = None + api_key: Optional[str] = os.getenv('BROWSERBASE_API_KEY') + project_id: Optional[str] = os.getenv('BROWSERBASE_PROJECT_ID') text_content: Optional[bool] = False session_id: Optional[str] = None proxy: Optional[bool] = None @@ -38,7 +39,7 @@ class BrowserbaseLoadTool(BaseTool): "`browserbase` package not found, please run `pip install browserbase`" ) - self.browserbase = Browserbase(api_key, project_id) + self.browserbase = Browserbase(api_key=self.api_key) self.text_content = text_content self.session_id = session_id self.proxy = proxy From 954dd43c17cc4dde498ad90bf31612d3e5e95aa0 Mon Sep 17 00:00:00 2001 From: Priyanshupareek <37779762+Priyanshupareek@users.noreply.github.com> Date: Thu, 2 Jan 2025 01:34:42 +0530 Subject: [PATCH 214/391] Update browserbase_load_tool.py --- .../tools/browserbase_load_tool/browserbase_load_tool.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 0a848fc7b..95e4084fd 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,5 +1,5 @@ -from typing import Any, Optional, Type import os +from typing import Any, Optional, Type from pydantic import BaseModel, Field from crewai_tools.tools.base_tool import BaseTool @@ -32,6 +32,8 @@ class BrowserbaseLoadTool(BaseTool): **kwargs, ): super().__init__(**kwargs) + if not self.api_key: + raise EnvironmentError("BROWSERBASE_API_KEY environment variable is required for initialization") try: from browserbase import Browserbase # type: ignore except ImportError: From 29da6659cf2eeff61c515642efabfbffd8052969 Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Thu, 2 Jan 2025 19:40:56 +0530 Subject: [PATCH 215/391] added the skeleton for the AIMind tool --- .../tools/ai_minds_tool/README.md | 0 .../tools/ai_minds_tool/__init__.py | 0 .../tools/ai_minds_tool/ai_minds_tool.py | 40 +++++++++++++++++++ 3 files changed, 40 insertions(+) create mode 100644 src/crewai_tools/tools/ai_minds_tool/README.md create mode 100644 src/crewai_tools/tools/ai_minds_tool/__init__.py create mode 100644 src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py diff --git a/src/crewai_tools/tools/ai_minds_tool/README.md b/src/crewai_tools/tools/ai_minds_tool/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/ai_minds_tool/__init__.py b/src/crewai_tools/tools/ai_minds_tool/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py new file mode 100644 index 000000000..99d8e3f8f --- /dev/null +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -0,0 +1,40 @@ +from typing import Dict, Optional, Type, TYPE_CHECKING + +from crewai.tools import BaseTool +from openai import OpenAI +from pydantic import BaseModel + +if TYPE_CHECKING: + from minds_sdk import Client + + +class AIMindInputSchema(BaseModel): + """Input for AIMind Tool.""" + + query: str = "Question in natural language to ask the AI-Mind" + + +class AIMindTool(BaseTool): + name: str = "AIMind Tool" + description: str = ( + "A wrapper around [AI-Minds](https://mindsdb.com/minds). " + "Useful for when you need answers to questions from your data, stored in " + "data sources including PostgreSQL, MySQL, MariaDB, ClickHouse, Snowflake " + "and Google BigQuery. " + "Input should be a question in natural language." + ) + args_schema: Type[BaseModel] = AIMindInputSchema + api_key: Optional[str] = None + datasources: Optional[Dict] = None + minds_client: Optional["Client"] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + try: + from minds_sdk import Client # type: ignore + except ImportError: + raise ImportError( + "`minds_sdk` package not found, please run `pip install minds-sdk`" + ) + + self.minds_client = Client(api_key=api_key) \ No newline at end of file From 55f669989bca634ba61919f1295ee8f47b4a208c Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 00:28:30 +0530 Subject: [PATCH 216/391] completed the initialization logic for the tool --- .../tools/ai_minds_tool/ai_minds_tool.py | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py index 99d8e3f8f..411daf209 100644 --- a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -1,12 +1,10 @@ -from typing import Dict, Optional, Type, TYPE_CHECKING +import secrets +from typing import Dict, Optional, Text, Type from crewai.tools import BaseTool from openai import OpenAI from pydantic import BaseModel -if TYPE_CHECKING: - from minds_sdk import Client - class AIMindInputSchema(BaseModel): """Input for AIMind Tool.""" @@ -26,15 +24,38 @@ class AIMindTool(BaseTool): args_schema: Type[BaseModel] = AIMindInputSchema api_key: Optional[str] = None datasources: Optional[Dict] = None - minds_client: Optional["Client"] = None + mind_name: Optional[Text] = None def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) try: from minds_sdk import Client # type: ignore + from minds.datasources import DatabaseConfig # type: ignore except ImportError: raise ImportError( "`minds_sdk` package not found, please run `pip install minds-sdk`" ) - self.minds_client = Client(api_key=api_key) \ No newline at end of file + minds_client = Client(api_key=api_key) + + # Convert the datasources to DatabaseConfig objects. + datasources = [] + for datasource in self.datasources: + if datasource["type"] == "database": + config = DatabaseConfig( + name=datasource["name"], + engine=datasource["engine"], + description=datasource["description"], + connection_data=datasource["connection_data"], + tables=datasource["tables"], + ) + datasources.append(config) + + # Generate a random name for the Mind. + name = f"cai_mind_{secrets.token_hex(5)}" + + mind = minds_client.minds.create( + name=name, datasources=datasources, replace=True + ) + + self.mind_name = mind.name \ No newline at end of file From 0b5f0841bf235eb029e4d24c19efd00c90bbeccd Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 00:32:24 +0530 Subject: [PATCH 217/391] implemented the run function for the tool --- .../tools/ai_minds_tool/ai_minds_tool.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py index 411daf209..915ed1ca0 100644 --- a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -58,4 +58,20 @@ class AIMindTool(BaseTool): name=name, datasources=datasources, replace=True ) - self.mind_name = mind.name \ No newline at end of file + self.mind_name = mind.name + + def _run( + self, + query: Text + ): + # Run the query on the AI-Mind. + # The Minds API is OpenAI compatible and therefore, the OpenAI client can be used. + openai_client = OpenAI(base_url="https://mdb.ai/", api_key=self.api_key) + + completion = openai_client.create( + model=self.mind_name, + messages=[{"role": "user", "content": query}], + stream=False, + ) + + return completion.choices[0].message.content \ No newline at end of file From 555638a654f61c2c07afc68320e272421be94f7b Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 00:37:12 +0530 Subject: [PATCH 218/391] added the main import statements --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 87aca8531..a0e384683 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,4 +1,5 @@ from .tools import ( + AIMindTool, BraveSearchTool, BrowserbaseLoadTool, CodeDocsSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index f6c31f45f..c125082f3 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,3 +1,4 @@ +from .ai_minds_tool.ai_minds_tool import AIMindTool from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py index 915ed1ca0..8d7750771 100644 --- a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -26,7 +26,7 @@ class AIMindTool(BaseTool): datasources: Optional[Dict] = None mind_name: Optional[Text] = None - def __init__(self, api_key: Optional[str] = None, **kwargs): + def __init__(self, api_key: Optional[Text] = None, **kwargs): super().__init__(**kwargs) try: from minds_sdk import Client # type: ignore From faff58ba1cc18f2aef203164a75388a2d4f04d3f Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 01:17:11 +0530 Subject: [PATCH 219/391] fixed a few bugs, type hints and imports --- .../tools/ai_minds_tool/ai_minds_tool.py | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py index 8d7750771..222271d7f 100644 --- a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -1,5 +1,5 @@ import secrets -from typing import Dict, Optional, Text, Type +from typing import Any, Dict, List, Optional, Text, Type from crewai.tools import BaseTool from openai import OpenAI @@ -23,13 +23,13 @@ class AIMindTool(BaseTool): ) args_schema: Type[BaseModel] = AIMindInputSchema api_key: Optional[str] = None - datasources: Optional[Dict] = None + datasources: Optional[List[Dict[str, Any]]] = None mind_name: Optional[Text] = None def __init__(self, api_key: Optional[Text] = None, **kwargs): - super().__init__(**kwargs) + super().__init__(api_key=api_key, **kwargs) try: - from minds_sdk import Client # type: ignore + from minds.client import Client # type: ignore from minds.datasources import DatabaseConfig # type: ignore except ImportError: raise ImportError( @@ -41,15 +41,14 @@ class AIMindTool(BaseTool): # Convert the datasources to DatabaseConfig objects. datasources = [] for datasource in self.datasources: - if datasource["type"] == "database": - config = DatabaseConfig( - name=datasource["name"], - engine=datasource["engine"], - description=datasource["description"], - connection_data=datasource["connection_data"], - tables=datasource["tables"], - ) - datasources.append(config) + config = DatabaseConfig( + name=f"cai_ds_{secrets.token_hex(5)}", + engine=datasource["engine"], + description=datasource["description"], + connection_data=datasource["connection_data"], + tables=datasource["tables"], + ) + datasources.append(config) # Generate a random name for the Mind. name = f"cai_mind_{secrets.token_hex(5)}" @@ -68,7 +67,7 @@ class AIMindTool(BaseTool): # The Minds API is OpenAI compatible and therefore, the OpenAI client can be used. openai_client = OpenAI(base_url="https://mdb.ai/", api_key=self.api_key) - completion = openai_client.create( + completion = openai_client.chat.completions.create( model=self.mind_name, messages=[{"role": "user", "content": query}], stream=False, From 64d54bd42352e54615a89211571b9c22557bee8a Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 01:55:51 +0530 Subject: [PATCH 220/391] updated the content in the README --- .../tools/ai_minds_tool/README.md | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/src/crewai_tools/tools/ai_minds_tool/README.md b/src/crewai_tools/tools/ai_minds_tool/README.md index e69de29bb..7bb47cde5 100644 --- a/src/crewai_tools/tools/ai_minds_tool/README.md +++ b/src/crewai_tools/tools/ai_minds_tool/README.md @@ -0,0 +1,75 @@ +# AIMind Tool + +## Description + +[Minds](https://mindsdb.com/minds) are AI systems provided by [MindsDB](https://mindsdb.com/) that work similarly to large language models (LLMs) but go beyond by answering any question from any data. + +This is accomplished by selecting the most relevant data for an answer using parametric search, understanding the meaning and providing responses within the correct context through semantic search, and finally, delivering precise answers by analyzing data and using machine learning (ML) models. + +## Installation + +1. Install the `crewai[tools]` package: + +```shell +pip install 'crewai[tools]' +``` + +2. Install the Minds SDK: + +```shell +pip install minds-sdk +``` + +3. Sign for a Minds account [here](https://mdb.ai/register), and obtain an API key. + +4. Set the Minds API key in an environment variable named `MINDS_API_KEY`. + +## Usage + +```python +from crewai_tools import AIMindTool + + +# Initialize the AIMindTool. +aimind_tool = AIMindTool( + datasources=[ + { + "description": "house sales data", + "engine": "postgres", + "connection_data": { + "user": "demo_user", + "password": "demo_password", + "host": "samples.mindsdb.com", + "port": 5432, + "database": "demo", + "schema": "demo_data" + }, + "tables": ["house_sales"] + } + ] +) +``` + +The `datasources` parameter is a list of dictionaries, each containing the following keys: + +- `description`: A description of the data contained in the datasource. +- `engine`: The engine (or type) of the datasource. +- `connection_data`: A dictionary containing the connection parameters for the datasource. +- `tables`: A list of tables that the data source will use. + +A list of supported data sources and their connection parameters can be found [here](https://docs.mdb.ai/docs/data_sources). + +```python +from crewai import Agent +from crewai.project import agent + + +# Define an agent with the AIMindTool. +@agent +def researcher(self) -> Agent: + return Agent( + config=self.agents_config["researcher"], + allow_delegation=False, + tools=[aimind_tool] + ) +``` From 3c29a6cc11cf0a221e2bb84eefcf84797ce6d450 Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 02:25:14 +0530 Subject: [PATCH 221/391] added an example of running the tool to the README --- src/crewai_tools/tools/ai_minds_tool/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crewai_tools/tools/ai_minds_tool/README.md b/src/crewai_tools/tools/ai_minds_tool/README.md index 7bb47cde5..5b3755515 100644 --- a/src/crewai_tools/tools/ai_minds_tool/README.md +++ b/src/crewai_tools/tools/ai_minds_tool/README.md @@ -48,6 +48,8 @@ aimind_tool = AIMindTool( } ] ) + +aimind_tool.run("How many 3 bedroom houses were sold in 2008?") ``` The `datasources` parameter is a list of dictionaries, each containing the following keys: From e0c6ec5bd316c9c373826dd15c34b34664bdd84e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 2 Jan 2025 20:51:14 -0300 Subject: [PATCH 222/391] fix imports --- src/crewai_tools/__init__.py | 4 ++-- src/crewai_tools/tools/__init__.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index ba779e5ac..890dc36f8 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -27,8 +27,8 @@ from .tools import ( PGSearchTool, RagTool, ScrapeElementFromWebsiteTool, - ScrapeGraphScrapeTool, - ScrapegraphScrapeToolSchema + ScrapegraphScrapeTool, + ScrapegraphScrapeToolSchema, ScrapeWebsiteTool, ScrapflyScrapeWebsiteTool, SeleniumScrapingTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index d6faccc98..c8ee55084 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -20,7 +20,7 @@ from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool from .json_search_tool.json_search_tool import JSONSearchTool -from .linkup_search_tool.linkup_search_tool import LinkupSearchTool +from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool @@ -32,7 +32,7 @@ from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( ScrapeElementFromWebsiteTool, ) -from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ScrapeGraphScrapeTool, ScrapegraphScrapeToolSchema +from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ScrapegraphScrapeTool, ScrapegraphScrapeToolSchema from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( ScrapflyScrapeWebsiteTool, @@ -55,4 +55,4 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool from .weaviate_tool.vector_search import WeaviateVectorSearchTool from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool -from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool \ No newline at end of file +from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool From 94cce06044af904a8f794511449abf250dc64c2f Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 11:08:38 +0530 Subject: [PATCH 223/391] updated the initialization logic to allow the API key to be passed as env var --- src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py index 222271d7f..1059d0053 100644 --- a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py +++ b/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py @@ -1,3 +1,4 @@ +import os import secrets from typing import Any, Dict, List, Optional, Text, Type @@ -36,7 +37,13 @@ class AIMindTool(BaseTool): "`minds_sdk` package not found, please run `pip install minds-sdk`" ) - minds_client = Client(api_key=api_key) + if os.getenv("MINDS_API_KEY"): + self.api_key = os.getenv("MINDS_API_KEY") + + if self.api_key is None: + raise ValueError("A Minds API key is required to use the AIMind Tool.") + + minds_client = Client(api_key=self.api_key) # Convert the datasources to DatabaseConfig objects. datasources = [] From 16cdabbf3513abc2333c595cf67996ab0f25a13c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 3 Jan 2025 02:53:24 -0300 Subject: [PATCH 224/391] bumping verison fixing tests --- .../browserbase_load_tool.py | 2 +- tests/base_tool_test.py | 19 ++++++++++--------- tests/spider_tool_test.py | 12 ++++++------ 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 95e4084fd..2ca1b95fc 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -2,7 +2,7 @@ import os from typing import Any, Optional, Type from pydantic import BaseModel, Field -from crewai_tools.tools.base_tool import BaseTool +from crewai.tools import BaseTool class BrowserbaseLoadToolSchema(BaseModel): diff --git a/tests/base_tool_test.py b/tests/base_tool_test.py index 949a445c2..4a4e40783 100644 --- a/tests/base_tool_test.py +++ b/tests/base_tool_test.py @@ -1,5 +1,6 @@ from typing import Callable -from crewai_tools import BaseTool, tool +from crewai.tools import BaseTool, tool +from crewai.tools.base_tool import to_langchain def test_creating_a_tool_using_annotation(): @tool("Name of my tool") @@ -9,14 +10,14 @@ def test_creating_a_tool_using_annotation(): # Assert all the right attributes were defined assert my_tool.name == "Name of my tool" - assert my_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert my_tool.func("What is the meaning of life?") == "What is the meaning of life?" # Assert the langchain tool conversion worked as expected - converted_tool = my_tool.to_langchain() + converted_tool = to_langchain([my_tool])[0] assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} assert converted_tool.func("What is the meaning of life?") == "What is the meaning of life?" @@ -31,16 +32,16 @@ def test_creating_a_tool_using_baseclass(): my_tool = MyCustomTool() # Assert all the right attributes were defined assert my_tool.name == "Name of my tool" - assert my_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." + assert my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?" + assert my_tool._run("What is the meaning of life?") == "What is the meaning of life?" # Assert the langchain tool conversion worked as expected - converted_tool = my_tool.to_langchain() + converted_tool = to_langchain([my_tool])[0] assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Name of my tool(question: 'string') - Clear description for what this tool is useful for, you agent will need this information to use it." + assert converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert converted_tool.run("What is the meaning of life?") == "What is the meaning of life?" + assert converted_tool.invoke({"question": "What is the meaning of life?"}) == "What is the meaning of life?" def test_setting_cache_function(): class MyCustomTool(BaseTool): diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py index 977dd8769..264394777 100644 --- a/tests/spider_tool_test.py +++ b/tests/spider_tool_test.py @@ -3,7 +3,7 @@ from crewai import Agent, Task, Crew def test_spider_tool(): spider_tool = SpiderTool() - + searcher = Agent( role="Web Research Expert", goal="Find related information from specific URL's", @@ -12,7 +12,7 @@ def test_spider_tool(): verbose=True, cache=False ) - + choose_between_scrape_crawl = Task( description="Scrape the page of spider.cloud and return a summary of how fast it is", expected_output="spider.cloud is a fast scraping and crawling tool", @@ -34,13 +34,13 @@ def test_spider_tool(): crew = Crew( agents=[searcher], tasks=[ - choose_between_scrape_crawl, - return_metadata, + choose_between_scrape_crawl, + return_metadata, css_selector ], - verbose=2 + verbose=True ) - + crew.kickoff() if __name__ == "__main__": From 29a7961ca8c3164f8d20f46011af4d35019895e0 Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 11:26:16 +0530 Subject: [PATCH 225/391] refined the content in the README --- src/crewai_tools/tools/ai_minds_tool/README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/ai_minds_tool/README.md b/src/crewai_tools/tools/ai_minds_tool/README.md index 5b3755515..95d2deb42 100644 --- a/src/crewai_tools/tools/ai_minds_tool/README.md +++ b/src/crewai_tools/tools/ai_minds_tool/README.md @@ -6,6 +6,8 @@ This is accomplished by selecting the most relevant data for an answer using parametric search, understanding the meaning and providing responses within the correct context through semantic search, and finally, delivering precise answers by analyzing data and using machine learning (ML) models. +The `AIMindTool` can be used to query data sources in natural language by simply configuring their connection parameters. + ## Installation 1. Install the `crewai[tools]` package: @@ -55,9 +57,9 @@ aimind_tool.run("How many 3 bedroom houses were sold in 2008?") The `datasources` parameter is a list of dictionaries, each containing the following keys: - `description`: A description of the data contained in the datasource. -- `engine`: The engine (or type) of the datasource. -- `connection_data`: A dictionary containing the connection parameters for the datasource. -- `tables`: A list of tables that the data source will use. +- `engine`: The engine (or type) of the datasource. Find a list of supported engines in the link below. +- `connection_data`: A dictionary containing the connection parameters for the datasource. Find a list of connection parameters for each engine in the link below. +- `tables`: A list of tables that the data source will use. This is optional and can be omitted if all tables in the data source are to be used. A list of supported data sources and their connection parameters can be found [here](https://docs.mdb.ai/docs/data_sources). From d360906f578c830ea6c80e7bc2e012bfc4195acc Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 11:41:59 +0530 Subject: [PATCH 226/391] renamed the pkg and module --- src/crewai_tools/tools/__init__.py | 2 +- .../tools/{ai_minds_tool => ai_mind_tool}/README.md | 0 .../tools/{ai_minds_tool => ai_mind_tool}/__init__.py | 0 .../ai_minds_tool.py => ai_mind_tool/ai_mind_tool.py} | 0 4 files changed, 1 insertion(+), 1 deletion(-) rename src/crewai_tools/tools/{ai_minds_tool => ai_mind_tool}/README.md (100%) rename src/crewai_tools/tools/{ai_minds_tool => ai_mind_tool}/__init__.py (100%) rename src/crewai_tools/tools/{ai_minds_tool/ai_minds_tool.py => ai_mind_tool/ai_mind_tool.py} (100%) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index c125082f3..33d68fb26 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,4 +1,4 @@ -from .ai_minds_tool.ai_minds_tool import AIMindTool +from .ai_mind_tool.ai_mind_tool import AIMindTool from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool diff --git a/src/crewai_tools/tools/ai_minds_tool/README.md b/src/crewai_tools/tools/ai_mind_tool/README.md similarity index 100% rename from src/crewai_tools/tools/ai_minds_tool/README.md rename to src/crewai_tools/tools/ai_mind_tool/README.md diff --git a/src/crewai_tools/tools/ai_minds_tool/__init__.py b/src/crewai_tools/tools/ai_mind_tool/__init__.py similarity index 100% rename from src/crewai_tools/tools/ai_minds_tool/__init__.py rename to src/crewai_tools/tools/ai_mind_tool/__init__.py diff --git a/src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py similarity index 100% rename from src/crewai_tools/tools/ai_minds_tool/ai_minds_tool.py rename to src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py From d1be5a937f6b19569b76a4aab2cccc7e6355a6dd Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 11:48:11 +0530 Subject: [PATCH 227/391] moved constants like the base URL to a class --- .../tools/ai_mind_tool/ai_mind_tool.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py index 1059d0053..c36400d0b 100644 --- a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py +++ b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py @@ -7,7 +7,13 @@ from openai import OpenAI from pydantic import BaseModel -class AIMindInputSchema(BaseModel): +class AIMindToolConstants: + MINDS_API_BASE_URL = "https://mdb.ai/" + MIND_NAME_PREFIX = "crwai_mind_" + DATASOURCE_NAME_PREFIX = "crwai_ds_" + + +class AIMindToolInputSchema(BaseModel): """Input for AIMind Tool.""" query: str = "Question in natural language to ask the AI-Mind" @@ -22,7 +28,7 @@ class AIMindTool(BaseTool): "and Google BigQuery. " "Input should be a question in natural language." ) - args_schema: Type[BaseModel] = AIMindInputSchema + args_schema: Type[BaseModel] = AIMindToolInputSchema api_key: Optional[str] = None datasources: Optional[List[Dict[str, Any]]] = None mind_name: Optional[Text] = None @@ -49,7 +55,7 @@ class AIMindTool(BaseTool): datasources = [] for datasource in self.datasources: config = DatabaseConfig( - name=f"cai_ds_{secrets.token_hex(5)}", + name=f"{AIMindToolConstants.DATASOURCE_NAME_PREFIX}_{secrets.token_hex(5)}", engine=datasource["engine"], description=datasource["description"], connection_data=datasource["connection_data"], @@ -58,7 +64,7 @@ class AIMindTool(BaseTool): datasources.append(config) # Generate a random name for the Mind. - name = f"cai_mind_{secrets.token_hex(5)}" + name = f"{AIMindToolConstants.MIND_NAME_PREFIX}_{secrets.token_hex(5)}" mind = minds_client.minds.create( name=name, datasources=datasources, replace=True @@ -72,7 +78,7 @@ class AIMindTool(BaseTool): ): # Run the query on the AI-Mind. # The Minds API is OpenAI compatible and therefore, the OpenAI client can be used. - openai_client = OpenAI(base_url="https://mdb.ai/", api_key=self.api_key) + openai_client = OpenAI(base_url=AIMindToolConstants.MINDS_API_BASE_URL, api_key=self.api_key) completion = openai_client.chat.completions.create( model=self.mind_name, From ea85f02e035ba106ead271ee7b83d628feae2215 Mon Sep 17 00:00:00 2001 From: Minura Punchihewa Date: Fri, 3 Jan 2025 11:49:58 +0530 Subject: [PATCH 228/391] refactored the logic for accessing the API key --- src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py index c36400d0b..b38426e09 100644 --- a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py +++ b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py @@ -34,7 +34,11 @@ class AIMindTool(BaseTool): mind_name: Optional[Text] = None def __init__(self, api_key: Optional[Text] = None, **kwargs): - super().__init__(api_key=api_key, **kwargs) + super().__init__(**kwargs) + self.api_key = api_key or os.getenv("MINDS_API_KEY") + if not self.api_key: + raise ValueError("API key must be provided either through constructor or MINDS_API_KEY environment variable") + try: from minds.client import Client # type: ignore from minds.datasources import DatabaseConfig # type: ignore @@ -43,12 +47,6 @@ class AIMindTool(BaseTool): "`minds_sdk` package not found, please run `pip install minds-sdk`" ) - if os.getenv("MINDS_API_KEY"): - self.api_key = os.getenv("MINDS_API_KEY") - - if self.api_key is None: - raise ValueError("A Minds API key is required to use the AIMind Tool.") - minds_client = Client(api_key=self.api_key) # Convert the datasources to DatabaseConfig objects. From 8047ee067cae2a416f9c3c1d3abde20e92452394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 3 Jan 2025 03:34:34 -0300 Subject: [PATCH 229/391] treating for uninstalled dependencies --- src/crewai_tools/tools/linkup/linkup_search_tool.py | 12 +++++++++++- src/crewai_tools/tools/spider_tool/spider_tool.py | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index 8ddb81527..3106a7c0c 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -1,4 +1,9 @@ -from linkup import LinkupClient +try: + from linkup import LinkupClient + LINKUP_AVAILABLE = True +except ImportError: + LINKUP_AVAILABLE = False + from pydantic import PrivateAttr class LinkupSearchTool: @@ -10,6 +15,11 @@ class LinkupSearchTool: """ Initialize the tool with an API key. """ + if not LINKUP_AVAILABLE: + raise ImportError( + "The 'linkup' package is required to use the LinkupSearchTool. " + "Please install it with: uv add linkup" + ) self._client = LinkupClient(api_key=api_key) def _run(self, query: str, depth: str = "standard", output_type: str = "searchResults") -> dict: diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 40959612f..87726f0bc 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -90,7 +90,7 @@ class SpiderTool(BaseTool): self.spider = Spider(api_key=api_key) except ImportError: raise ImportError( - "`spider-client` package not found, please run `pip install spider-client`" + "`spider-client` package not found, please run `uv add spider-client`" ) except Exception as e: raise RuntimeError(f"Failed to initialize Spider client: {str(e)}") From fa901453feea21a380da749454d5e38bf56464ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 3 Jan 2025 04:15:39 -0300 Subject: [PATCH 230/391] new version --- .../tools/linkup/linkup_search_tool.py | 5 ++- .../tools/weaviate_tool/vector_search.py | 33 +++++++++++-------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index 3106a7c0c..b172ad029 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -1,15 +1,18 @@ +from typing import Any + try: from linkup import LinkupClient LINKUP_AVAILABLE = True except ImportError: LINKUP_AVAILABLE = False + LinkupClient = Any # type placeholder when package is not available from pydantic import PrivateAttr class LinkupSearchTool: name: str = "Linkup Search Tool" description: str = "Performs an API call to Linkup to retrieve contextual information." - _client: LinkupClient = PrivateAttr() + _client: LinkupClient = PrivateAttr() # type: ignore def __init__(self, api_key: str): """ diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index ab80b6ce1..a9c7ce519 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -1,12 +1,20 @@ +from typing import Any, Type, Optional import os import json -import weaviate -from pydantic import BaseModel, Field -from typing import Type, Optional -from crewai.tools import BaseTool +try: + import weaviate + from weaviate.classes.config import Configure, Vectorizers + from weaviate.classes.init import Auth + WEAVIATE_AVAILABLE = True +except ImportError: + WEAVIATE_AVAILABLE = False + weaviate = Any # type placeholder + Configure = Any + Vectorizers = Any + Auth = Any -from weaviate.classes.config import Configure, Vectorizers -from weaviate.classes.init import Auth +from pydantic import BaseModel, Field +from crewai.tools import BaseTool class WeaviateToolSchema(BaseModel): @@ -51,14 +59,11 @@ class WeaviateVectorSearchTool(BaseTool): ) def _run(self, query: str) -> str: - """Search the Weaviate database - - Args: - query (str): The query to search retrieve relevant information from the Weaviate database. Pass only the query as a string, not the question. - - Returns: - str: The result of the search query - """ + if not WEAVIATE_AVAILABLE: + raise ImportError( + "The 'weaviate-client' package is required to use the WeaviateVectorSearchTool. " + "Please install it with: uv add weaviate-client" + ) if not self.weaviate_cluster_url or not self.weaviate_api_key: raise ValueError("WEAVIATE_URL or WEAVIATE_API_KEY is not set") From 7efc092873aed3487e65eef94b81f2a0fa196479 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Fri, 3 Jan 2025 08:47:52 -0300 Subject: [PATCH 231/391] fix: weaviate init parameters --- .../tools/weaviate_tool/vector_search.py | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index a9c7ce519..fc5641009 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -1,10 +1,12 @@ -from typing import Any, Type, Optional -import os import json +import os +from typing import Any, Optional, Type + try: import weaviate from weaviate.classes.config import Configure, Vectorizers from weaviate.classes.init import Auth + WEAVIATE_AVAILABLE = True except ImportError: WEAVIATE_AVAILABLE = False @@ -14,6 +16,7 @@ except ImportError: Auth = Any from pydantic import BaseModel, Field + from crewai.tools import BaseTool @@ -34,16 +37,8 @@ class WeaviateVectorSearchTool(BaseTool): args_schema: Type[BaseModel] = WeaviateToolSchema query: Optional[str] = None - vectorizer: Optional[Vectorizers] = Field( - default=Configure.Vectorizer.text2vec_openai( - model="nomic-embed-text", - ) - ) - generative_model: Optional[str] = Field( - default=Configure.Generative.openai( - model="gpt-4o", - ), - ) + vectorizer: Optional[Vectorizers] = None + generative_model: Optional[str] = None collection_name: Optional[str] = None limit: Optional[int] = Field(default=3) headers: Optional[dict] = Field( @@ -58,6 +53,19 @@ class WeaviateVectorSearchTool(BaseTool): description="The API key for the Weaviate cluster", ) + def __init__(self, **kwargs): + super().__init__(**kwargs) + if WEAVIATE_AVAILABLE: + self.vectorizer = self.vectorizer or Configure.Vectorizer.text2vec_openai( + model="nomic-embed-text", + ) + self.generative_model = ( + self.generative_model + or Configure.Generative.openai( + model="gpt-4o", + ) + ) + def _run(self, query: str) -> str: if not WEAVIATE_AVAILABLE: raise ImportError( From c31a8d6ee2e5207ef88b4ef4e6c5dba5ebbca7d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 3 Jan 2025 09:58:58 -0300 Subject: [PATCH 232/391] fix --- src/crewai_tools/tools/weaviate_tool/vector_search.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index fc5641009..bfe80f966 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -36,7 +36,6 @@ class WeaviateVectorSearchTool(BaseTool): description: str = "A tool to search the Weaviate database for relevant information on internal documents." args_schema: Type[BaseModel] = WeaviateToolSchema query: Optional[str] = None - vectorizer: Optional[Vectorizers] = None generative_model: Optional[str] = None collection_name: Optional[str] = None From aafcf992ab4f8df96a994b73a126a085f695524f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 3 Jan 2025 10:03:53 -0300 Subject: [PATCH 233/391] fix weviate tool --- src/crewai_tools/tools/weaviate_tool/vector_search.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index bfe80f966..14e10d7c5 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -40,9 +40,7 @@ class WeaviateVectorSearchTool(BaseTool): generative_model: Optional[str] = None collection_name: Optional[str] = None limit: Optional[int] = Field(default=3) - headers: Optional[dict] = Field( - default={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]} - ) + headers: Optional[dict] = None weaviate_cluster_url: str = Field( ..., description="The URL of the Weaviate cluster", @@ -55,6 +53,12 @@ class WeaviateVectorSearchTool(BaseTool): def __init__(self, **kwargs): super().__init__(**kwargs) if WEAVIATE_AVAILABLE: + openai_api_key = os.environ.get("OPENAI_API_KEY") + if not openai_api_key: + raise ValueError( + "OPENAI_API_KEY environment variable is required for WeaviateVectorSearchTool and it is mandatory to use the tool." + ) + self.headers = {"X-OpenAI-Api-Key": openai_api_key} self.vectorizer = self.vectorizer or Configure.Vectorizer.text2vec_openai( model="nomic-embed-text", ) From 66dee007b76da208a12d57806daba6d9ddf20c69 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Fri, 3 Jan 2025 09:33:59 -0800 Subject: [PATCH 234/391] Vision Tool Improvement --- .../tools/vision_tool/vision_tool.py | 131 +++++++++--------- 1 file changed, 62 insertions(+), 69 deletions(-) diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index 6b7a21dbd..3479cbd74 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -1,18 +1,31 @@ import base64 -from typing import Type +from typing import Type, Optional +from pathlib import Path -import requests +from crewai.tools import BaseTool from openai import OpenAI -from pydantic import BaseModel - -from crewai_tools.tools.base_tool import BaseTool +from pydantic import BaseModel, validator class ImagePromptSchema(BaseModel): """Input for Vision Tool.""" - image_path_url: str = "The image path or URL." + @validator("image_path_url") + def validate_image_path_url(cls, v: str) -> str: + if v.startswith("http"): + return v + + path = Path(v) + if not path.exists(): + raise ValueError(f"Image file does not exist: {v}") + + # Validate supported formats + valid_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"} + if path.suffix.lower() not in valid_extensions: + raise ValueError(f"Unsupported image format. Supported formats: {valid_extensions}") + + return v class VisionTool(BaseTool): name: str = "Vision Tool" @@ -20,75 +33,55 @@ class VisionTool(BaseTool): "This tool uses OpenAI's Vision API to describe the contents of an image." ) args_schema: Type[BaseModel] = ImagePromptSchema + _client: Optional[OpenAI] = None - def _run_web_hosted_images(self, client, image_path_url: str) -> str: - response = client.chat.completions.create( - model="gpt-4o-mini", - messages=[ - { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - { - "type": "image_url", - "image_url": {"url": image_path_url}, - }, - ], - } - ], - max_tokens=300, - ) - - return response.choices[0].message.content - - def _run_local_images(self, client, image_path_url: str) -> str: - base64_image = self._encode_image(image_path_url) - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {client.api_key}", - } - - payload = { - "model": "gpt-4o-mini", - "messages": [ - { - "role": "user", - "content": [ - {"type": "text", "text": "What's in this image?"}, - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{base64_image}" - }, - }, - ], - } - ], - "max_tokens": 300, - } - - response = requests.post( - "https://api.openai.com/v1/chat/completions", headers=headers, json=payload - ) - - return response.json()["choices"][0]["message"]["content"] + @property + def client(self) -> OpenAI: + """Cached OpenAI client instance.""" + if self._client is None: + self._client = OpenAI() + return self._client def _run(self, **kwargs) -> str: - client = OpenAI() + try: + image_path_url = kwargs.get("image_path_url") + if not image_path_url: + return "Image Path or URL is required." + + # Validate input using Pydantic + ImagePromptSchema(image_path_url=image_path_url) + + if image_path_url.startswith("http"): + image_data = image_path_url + else: + try: + base64_image = self._encode_image(image_path_url) + image_data = f"data:image/jpeg;base64,{base64_image}" + except Exception as e: + return f"Error processing image: {str(e)}" - image_path_url = kwargs.get("image_path_url") + response = self.client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": {"url": image_data}, + } + ], + } + ], + max_tokens=300, + ) - if not image_path_url: - return "Image Path or URL is required." + return response.choices[0].message.content - if "http" in image_path_url: - image_description = self._run_web_hosted_images(client, image_path_url) - else: - image_description = self._run_local_images(client, image_path_url) + except Exception as e: + return f"An error occurred: {str(e)}" - return image_description - - def _encode_image(self, image_path: str): + def _encode_image(self, image_path: str) -> str: with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") From 9f8529eab2d501ac0f7af245d84f75dae34cc55d Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 3 Jan 2025 13:43:50 -0800 Subject: [PATCH 235/391] add optional dependencies for all deps that we have --- .../tools/multion_tool/multion_tool.py | 2 +- .../pdf_text_writing_tool.py | 37 +++++++++++++++---- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index a991074da..1ae0f96d3 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -26,7 +26,7 @@ class MultiOnTool(BaseTool): ): super().__init__(**kwargs) try: - from multion.client import MultiOn # type: ignore + from multion.client import MultiOn except ImportError: raise ImportError( "`multion` package not found, please run `pip install multion`" diff --git a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py index c3a686b14..851593167 100644 --- a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py +++ b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py @@ -1,29 +1,50 @@ -from typing import Any, Optional, Type +from typing import Optional, Type from pydantic import BaseModel, Field from pypdf import PdfReader, PdfWriter, PageObject, ContentStream, NameObject, Font from pathlib import Path +from crewai_tools.tools.rag.rag_tool import RagTool class PDFTextWritingToolSchema(BaseModel): """Input schema for PDFTextWritingTool.""" + pdf_path: str = Field(..., description="Path to the PDF file to modify") text: str = Field(..., description="Text to add to the PDF") - position: tuple = Field(..., description="Tuple of (x, y) coordinates for text placement") + position: tuple = Field( + ..., description="Tuple of (x, y) coordinates for text placement" + ) font_size: int = Field(default=12, description="Font size of the text") - font_color: str = Field(default="0 0 0 rg", description="RGB color code for the text") - font_name: Optional[str] = Field(default="F1", description="Font name for standard fonts") - font_file: Optional[str] = Field(None, description="Path to a .ttf font file for custom font usage") + font_color: str = Field( + default="0 0 0 rg", description="RGB color code for the text" + ) + font_name: Optional[str] = Field( + default="F1", description="Font name for standard fonts" + ) + font_file: Optional[str] = Field( + None, description="Path to a .ttf font file for custom font usage" + ) page_number: int = Field(default=0, description="Page number to add text to") class PDFTextWritingTool(RagTool): """A tool to add text to specific positions in a PDF, with custom font support.""" + name: str = "PDF Text Writing Tool" description: str = "A tool that can write text to a specific position in a PDF document, with optional custom font embedding." args_schema: Type[BaseModel] = PDFTextWritingToolSchema - def run(self, pdf_path: str, text: str, position: tuple, font_size: int, font_color: str, - font_name: str = "F1", font_file: Optional[str] = None, page_number: int = 0, **kwargs) -> str: + def run( + self, + pdf_path: str, + text: str, + position: tuple, + font_size: int, + font_color: str, + font_name: str = "F1", + font_file: Optional[str] = None, + page_number: int = 0, + **kwargs, + ) -> str: reader = PdfReader(pdf_path) writer = PdfWriter() @@ -63,4 +84,4 @@ class PDFTextWritingTool(RagTool): with open(font_file, "rb") as file: font = Font.true_type(file.read()) font_ref = writer.add_object(font) - return font_ref \ No newline at end of file + return font_ref From dab8f648cbcd426b91c1ab4eb5b3826924423e1d Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 3 Jan 2025 13:48:21 -0800 Subject: [PATCH 236/391] leave ignore --- src/crewai_tools/tools/multion_tool/multion_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index 1ae0f96d3..a991074da 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -26,7 +26,7 @@ class MultiOnTool(BaseTool): ): super().__init__(**kwargs) try: - from multion.client import MultiOn + from multion.client import MultiOn # type: ignore except ImportError: raise ImportError( "`multion` package not found, please run `pip install multion`" From 2f8c07320b9f83b2159fc5d6ed07ae82628ecda2 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:38:55 +0000 Subject: [PATCH 237/391] feat: add new StagehandTool for stagehand integration This commit adds a new StagehandTool that integrates Stagehand's AI-powered web automation capabilities into CrewAI. The tool provides access to Stagehand's three core APIs: - act: Perform web interactions - extract: Extract information from web pages - observe: Monitor web page changes Each function takes atomic instructions to increase reliability. Co-Authored-By: Joe Moura --- .../tools/stagehand_tool/__init__.py | 5 + .../tools/stagehand_tool/stagehand_tool.py | 157 ++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100644 src/crewai_tools/tools/stagehand_tool/__init__.py create mode 100644 src/crewai_tools/tools/stagehand_tool/stagehand_tool.py diff --git a/src/crewai_tools/tools/stagehand_tool/__init__.py b/src/crewai_tools/tools/stagehand_tool/__init__.py new file mode 100644 index 000000000..cbd90dd15 --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/__init__.py @@ -0,0 +1,5 @@ +"""Stagehand tool for web automation in CrewAI.""" + +from .stagehand_tool import StagehandTool + +__all__ = ["StagehandTool"] diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py new file mode 100644 index 000000000..2fb003889 --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -0,0 +1,157 @@ +""" +A tool for using Stagehand's AI-powered web automation capabilities in CrewAI. + +This tool provides access to Stagehand's three core APIs: +- act: Perform web interactions +- extract: Extract information from web pages +- observe: Monitor web page changes + +Each function takes atomic instructions to increase reliability. +""" + +import os +from typing import Any, Type + +from pydantic import BaseModel, Field + +from crewai.tools.base_tool import BaseTool + +# Define STAGEHAND_AVAILABLE at module level +STAGEHAND_AVAILABLE = False +try: + import stagehand + STAGEHAND_AVAILABLE = True +except ImportError: + pass # Keep STAGEHAND_AVAILABLE as False + + +class StagehandToolSchema(BaseModel): + """Schema for the StagehandTool input parameters. + + Examples: + ```python + # Using the 'act' API to click a button + tool.run( + api_method="act", + instruction="Click the 'Sign In' button" + ) + + # Using the 'extract' API to get text + tool.run( + api_method="extract", + instruction="Get the text content of the main article" + ) + + # Using the 'observe' API to monitor changes + tool.run( + api_method="observe", + instruction="Watch for changes in the shopping cart count" + ) + ``` + """ + api_method: str = Field( + ..., + description="The Stagehand API to use: 'act' for interactions, 'extract' for getting content, or 'observe' for monitoring changes", + pattern="^(act|extract|observe)$" + ) + instruction: str = Field( + ..., + description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability." + ) + + +class StagehandTool(BaseTool): + """A tool for using Stagehand's AI-powered web automation capabilities. + + This tool provides access to Stagehand's three core APIs: + - act: Perform web interactions (e.g., clicking buttons, filling forms) + - extract: Extract information from web pages (e.g., getting text content) + - observe: Monitor web page changes (e.g., watching for updates) + + Each function takes atomic instructions to increase reliability. + + Required Environment Variables: + OPENAI_API_KEY: API key for OpenAI (required by Stagehand) + + Examples: + ```python + tool = StagehandTool() + + # Perform a web interaction + result = tool.run( + api_method="act", + instruction="Click the 'Sign In' button" + ) + + # Extract content from a page + content = tool.run( + api_method="extract", + instruction="Get the text content of the main article" + ) + + # Monitor for changes + changes = tool.run( + api_method="observe", + instruction="Watch for changes in the shopping cart count" + ) + ``` + """ + + name: str = "StagehandTool" + description: str = ( + "A tool that uses Stagehand's AI-powered web automation to interact with websites. " + "It can perform actions (click, type, etc.), extract content, and observe changes. " + "Each instruction should be atomic (simple and specific) to increase reliability." + ) + args_schema: Type[BaseModel] = StagehandToolSchema + + def __init__(self, **kwargs: Any) -> None: + """Initialize the StagehandTool. + + The tool requires the OPENAI_API_KEY environment variable to be set. + """ + super().__init__(**kwargs) + + if not STAGEHAND_AVAILABLE: + raise ImportError( + "The 'stagehand' package is required to use this tool. " + "Please install it with: pip install stagehand" + ) + + self.api_key = os.getenv("OPENAI_API_KEY") + if not self.api_key: + raise ValueError( + "OPENAI_API_KEY environment variable is required for StagehandTool" + ) + + def _run(self, api_method: str, instruction: str, **kwargs: Any) -> Any: + """Execute a Stagehand command using the specified API method. + + Args: + api_method: The Stagehand API to use ('act', 'extract', or 'observe') + instruction: An atomic instruction for Stagehand to execute + **kwargs: Additional keyword arguments passed to the Stagehand API + + Returns: + The result from the Stagehand API call + + Raises: + ValueError: If an invalid api_method is provided + RuntimeError: If the Stagehand API call fails + """ + try: + # Initialize Stagehand with the OpenAI API key + st = stagehand.Stagehand(api_key=self.api_key) + + # Call the appropriate Stagehand API based on the method + if api_method == "act": + return st.act(instruction) + elif api_method == "extract": + return st.extract(instruction) + elif api_method == "observe": + return st.observe(instruction) + else: + raise ValueError(f"Unknown api_method: {api_method}") + + except Exception as e: + raise RuntimeError(f"Stagehand API call failed: {str(e)}") From ad4c7112231213c3dea896de86f81d35ac86c76a Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:56:38 +0000 Subject: [PATCH 238/391] feat: implement reviewer suggestions for StagehandTool Co-Authored-By: Joe Moura --- .../tools/stagehand_tool/stagehand_tool.py | 203 +++++++++++++++--- 1 file changed, 175 insertions(+), 28 deletions(-) diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 2fb003889..07c76c8c3 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -9,13 +9,18 @@ This tool provides access to Stagehand's three core APIs: Each function takes atomic instructions to increase reliability. """ +import logging import os -from typing import Any, Type +from functools import lru_cache +from typing import Any, Dict, List, Optional, Type, Union from pydantic import BaseModel, Field from crewai.tools.base_tool import BaseTool +# Set up logging +logger = logging.getLogger(__name__) + # Define STAGEHAND_AVAILABLE at module level STAGEHAND_AVAILABLE = False try: @@ -25,6 +30,32 @@ except ImportError: pass # Keep STAGEHAND_AVAILABLE as False +class StagehandResult(BaseModel): + """Result from a Stagehand operation. + + Attributes: + success: Whether the operation completed successfully + data: The result data from the operation + error: Optional error message if the operation failed + """ + success: bool = Field(..., description="Whether the operation completed successfully") + data: Union[str, Dict, List] = Field(..., description="The result data from the operation") + error: Optional[str] = Field(None, description="Optional error message if the operation failed") + + +class StagehandToolConfig(BaseModel): + """Configuration for the StagehandTool. + + Attributes: + api_key: OpenAI API key for Stagehand authentication + timeout: Maximum time in seconds to wait for operations (default: 30) + retry_attempts: Number of times to retry failed operations (default: 3) + """ + api_key: str = Field(..., description="OpenAI API key for Stagehand authentication") + timeout: int = Field(30, description="Maximum time in seconds to wait for operations") + retry_attempts: int = Field(3, description="Number of times to retry failed operations") + + class StagehandToolSchema(BaseModel): """Schema for the StagehandTool input parameters. @@ -56,7 +87,9 @@ class StagehandToolSchema(BaseModel): ) instruction: str = Field( ..., - description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability." + description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability.", + min_length=1, + max_length=500 ) @@ -105,10 +138,17 @@ class StagehandTool(BaseTool): ) args_schema: Type[BaseModel] = StagehandToolSchema - def __init__(self, **kwargs: Any) -> None: + def __init__(self, config: StagehandToolConfig | None = None, **kwargs: Any) -> None: """Initialize the StagehandTool. - The tool requires the OPENAI_API_KEY environment variable to be set. + Args: + config: Optional configuration for the tool. If not provided, + will attempt to use OPENAI_API_KEY from environment. + **kwargs: Additional keyword arguments passed to the base class. + + Raises: + ImportError: If the stagehand package is not installed + ValueError: If no API key is provided via config or environment """ super().__init__(**kwargs) @@ -117,14 +157,82 @@ class StagehandTool(BaseTool): "The 'stagehand' package is required to use this tool. " "Please install it with: pip install stagehand" ) - - self.api_key = os.getenv("OPENAI_API_KEY") - if not self.api_key: - raise ValueError( - "OPENAI_API_KEY environment variable is required for StagehandTool" + + # Use config if provided, otherwise try environment variable + if config is not None: + self.config = config + else: + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError( + "Either provide config with api_key or set OPENAI_API_KEY environment variable" + ) + self.config = StagehandToolConfig( + api_key=api_key, + timeout=30, + retry_attempts=3 ) - def _run(self, api_method: str, instruction: str, **kwargs: Any) -> Any: + @lru_cache(maxsize=100) + def _cached_run(self, api_method: str, instruction: str) -> Any: + """Execute a cached Stagehand command. + + This method is cached to improve performance for repeated operations. + + Args: + api_method: The Stagehand API to use ('act', 'extract', or 'observe') + instruction: An atomic instruction for Stagehand to execute + + Returns: + The raw result from the Stagehand API call + + Raises: + ValueError: If an invalid api_method is provided + Exception: If the Stagehand API call fails + """ + logger.debug( + "Cache operation - Method: %s, Instruction length: %d", + api_method, + len(instruction) + ) + + # Initialize Stagehand with configuration + logger.info( + "Initializing Stagehand (timeout=%ds, retries=%d)", + self.config.timeout, + self.config.retry_attempts + ) + st = stagehand.Stagehand( + api_key=self.config.api_key, + timeout=self.config.timeout, + retry_attempts=self.config.retry_attempts + ) + + # Call the appropriate Stagehand API based on the method + logger.info("Executing %s operation with instruction: %s", api_method, instruction[:100]) + try: + if api_method == "act": + result = st.act(instruction) + elif api_method == "extract": + result = st.extract(instruction) + elif api_method == "observe": + result = st.observe(instruction) + else: + raise ValueError(f"Unknown api_method: {api_method}") + + + logger.info("Successfully executed %s operation", api_method) + return result + + except Exception as e: + logger.warning( + "Operation failed (method=%s, error=%s), will be retried on next attempt", + api_method, + str(e) + ) + raise + + def _run(self, api_method: str, instruction: str, **kwargs: Any) -> StagehandResult: """Execute a Stagehand command using the specified API method. Args: @@ -132,26 +240,65 @@ class StagehandTool(BaseTool): instruction: An atomic instruction for Stagehand to execute **kwargs: Additional keyword arguments passed to the Stagehand API - Returns: - The result from the Stagehand API call - - Raises: - ValueError: If an invalid api_method is provided - RuntimeError: If the Stagehand API call fails + Returns: + StagehandResult containing the operation result and status """ try: - # Initialize Stagehand with the OpenAI API key - st = stagehand.Stagehand(api_key=self.api_key) + # Log operation context + logger.debug( + "Starting operation - Method: %s, Instruction length: %d, Args: %s", + api_method, + len(instruction), + kwargs + ) - # Call the appropriate Stagehand API based on the method - if api_method == "act": - return st.act(instruction) - elif api_method == "extract": - return st.extract(instruction) - elif api_method == "observe": - return st.observe(instruction) - else: - raise ValueError(f"Unknown api_method: {api_method}") + # Use cached execution + result = self._cached_run(api_method, instruction) + logger.info("Operation completed successfully") + return StagehandResult(success=True, data=result) + except stagehand.AuthenticationError as e: + logger.error( + "Authentication failed - Method: %s, Error: %s", + api_method, + str(e) + ) + return StagehandResult( + success=False, + data={}, + error=f"Authentication failed: {str(e)}" + ) + except stagehand.APIError as e: + logger.error( + "API error - Method: %s, Error: %s", + api_method, + str(e) + ) + return StagehandResult( + success=False, + data={}, + error=f"API error: {str(e)}" + ) + except stagehand.BrowserError as e: + logger.error( + "Browser error - Method: %s, Error: %s", + api_method, + str(e) + ) + return StagehandResult( + success=False, + data={}, + error=f"Browser error: {str(e)}" + ) except Exception as e: - raise RuntimeError(f"Stagehand API call failed: {str(e)}") + logger.error( + "Unexpected error - Method: %s, Error type: %s, Message: %s", + api_method, + type(e).__name__, + str(e) + ) + return StagehandResult( + success=False, + data={}, + error=f"Unexpected error: {str(e)}" + ) From c27727b16eb37b00773197162b32312dde16ff17 Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Tue, 7 Jan 2025 15:51:52 +0100 Subject: [PATCH 239/391] Update scrapegraph_scrape_tool.py --- .../scrapegraph_scrape_tool.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 906bf6376..9b5806b19 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -60,16 +60,19 @@ class ScrapegraphScrapeTool(BaseTool): website_url: Optional[str] = None user_prompt: Optional[str] = None api_key: Optional[str] = None + enable_logging: bool = False def __init__( self, website_url: Optional[str] = None, user_prompt: Optional[str] = None, api_key: Optional[str] = None, + enable_logging: bool = False, **kwargs, ): super().__init__(**kwargs) self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") + self.enable_logging = enable_logging if not self.api_key: raise ValueError("Scrapegraph API key is required") @@ -83,8 +86,9 @@ class ScrapegraphScrapeTool(BaseTool): if user_prompt is not None: self.user_prompt = user_prompt - # Configure logging - sgai_logger.set_logging(level="INFO") + # Configure logging only if enabled + if self.enable_logging: + sgai_logger.set_logging(level="INFO") @staticmethod def _validate_url(url: str) -> None: @@ -96,22 +100,6 @@ class ScrapegraphScrapeTool(BaseTool): except Exception: raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") - def _handle_api_response(self, response: dict) -> str: - """Handle and validate API response""" - if not response: - raise RuntimeError("Empty response from Scrapegraph API") - - if "error" in response: - error_msg = response.get("error", {}).get("message", "Unknown error") - if "rate limit" in error_msg.lower(): - raise RateLimitError(f"Rate limit exceeded: {error_msg}") - raise RuntimeError(f"API error: {error_msg}") - - if "result" not in response: - raise RuntimeError("Invalid response format from Scrapegraph API") - - return response["result"] - def _run( self, **kwargs: Any, @@ -135,8 +123,7 @@ class ScrapegraphScrapeTool(BaseTool): user_prompt=user_prompt, ) - # Handle and validate the response - return self._handle_api_response(response) + return response except RateLimitError: raise # Re-raise rate limit errors From 4f4b0619079235dcdc59522879822cad5bf0e32a Mon Sep 17 00:00:00 2001 From: Marco Vinciguerra Date: Tue, 7 Jan 2025 16:13:50 +0100 Subject: [PATCH 240/391] fix: scrapegraph-tool --- .../scrapegraph_scrape_tool.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 906bf6376..9b5806b19 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -60,16 +60,19 @@ class ScrapegraphScrapeTool(BaseTool): website_url: Optional[str] = None user_prompt: Optional[str] = None api_key: Optional[str] = None + enable_logging: bool = False def __init__( self, website_url: Optional[str] = None, user_prompt: Optional[str] = None, api_key: Optional[str] = None, + enable_logging: bool = False, **kwargs, ): super().__init__(**kwargs) self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") + self.enable_logging = enable_logging if not self.api_key: raise ValueError("Scrapegraph API key is required") @@ -83,8 +86,9 @@ class ScrapegraphScrapeTool(BaseTool): if user_prompt is not None: self.user_prompt = user_prompt - # Configure logging - sgai_logger.set_logging(level="INFO") + # Configure logging only if enabled + if self.enable_logging: + sgai_logger.set_logging(level="INFO") @staticmethod def _validate_url(url: str) -> None: @@ -96,22 +100,6 @@ class ScrapegraphScrapeTool(BaseTool): except Exception: raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") - def _handle_api_response(self, response: dict) -> str: - """Handle and validate API response""" - if not response: - raise RuntimeError("Empty response from Scrapegraph API") - - if "error" in response: - error_msg = response.get("error", {}).get("message", "Unknown error") - if "rate limit" in error_msg.lower(): - raise RateLimitError(f"Rate limit exceeded: {error_msg}") - raise RuntimeError(f"API error: {error_msg}") - - if "result" not in response: - raise RuntimeError("Invalid response format from Scrapegraph API") - - return response["result"] - def _run( self, **kwargs: Any, @@ -135,8 +123,7 @@ class ScrapegraphScrapeTool(BaseTool): user_prompt=user_prompt, ) - # Handle and validate the response - return self._handle_api_response(response) + return response except RateLimitError: raise # Re-raise rate limit errors From e5aabe05e1b85d2c574a72bde85a6488fbd0d20c Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Wed, 8 Jan 2025 14:56:12 -0500 Subject: [PATCH 241/391] improve serper and firecrawl --- .../firecrawl_crawl_website_tool.py | 59 ++++++++----------- .../firecrawl_scrape_website_tool.py | 29 +++------ .../firecrawl_search_tool.py | 47 +++++++++++---- .../tools/serper_dev_tool/serper_dev_tool.py | 2 +- 4 files changed, 71 insertions(+), 66 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index edada38dd..0eafd6e4a 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,9 +1,8 @@ import os from typing import TYPE_CHECKING, Any, Dict, Optional, Type -from pydantic import BaseModel, ConfigDict, Field - from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field # Type checking import if TYPE_CHECKING: @@ -12,6 +11,14 @@ if TYPE_CHECKING: class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") + crawler_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for crawling" + ) + timeout: Optional[int] = Field( + default=30000, + description="Timeout in milliseconds for the crawling operation. The default value is 30000.", + ) + class FirecrawlCrawlWebsiteTool(BaseTool): model_config = ConfigDict( @@ -20,25 +27,10 @@ class FirecrawlCrawlWebsiteTool(BaseTool): name: str = "Firecrawl web crawl tool" description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema - firecrawl_app: Optional["FirecrawlApp"] = None api_key: Optional[str] = None - url: Optional[str] = None - params: Optional[Dict[str, Any]] = None - poll_interval: Optional[int] = 2 - idempotency_key: Optional[str] = None + firecrawl: Optional["FirecrawlApp"] = None def __init__(self, api_key: Optional[str] = None, **kwargs): - """Initialize FirecrawlCrawlWebsiteTool. - - Args: - api_key (Optional[str]): Firecrawl API key. If not provided, will check FIRECRAWL_API_KEY env var. - url (Optional[str]): Base URL to crawl. Can be overridden by the _run method. - firecrawl_app (Optional[FirecrawlApp]): Previously created FirecrawlApp instance. - params (Optional[Dict[str, Any]]): Additional parameters to pass to the FirecrawlApp. - poll_interval (Optional[int]): Poll interval for the FirecrawlApp. - idempotency_key (Optional[str]): Idempotency key for the FirecrawlApp. - **kwargs: Additional arguments passed to BaseTool. - """ super().__init__(**kwargs) try: from firecrawl import FirecrawlApp # type: ignore @@ -47,28 +39,29 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - # Allows passing a previously created FirecrawlApp instance - # or builds a new one with the provided API key - if not self.firecrawl_app: - client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") + if not self.firecrawl: + client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") if not client_api_key: raise ValueError( "FIRECRAWL_API_KEY is not set. Please provide it either via the constructor " "with the `api_key` argument or by setting the FIRECRAWL_API_KEY environment variable." ) - self.firecrawl_app = FirecrawlApp(api_key=client_api_key) + self.firecrawl = FirecrawlApp(api_key=client_api_key) - def _run(self, url: str): - # Unless url has been previously set via constructor by the user, - # use the url argument provided by the agent at runtime. - base_url = self.url or url + def _run( + self, + url: str, + crawler_options: Optional[Dict[str, Any]] = None, + timeout: Optional[int] = 30000, + ): + if crawler_options is None: + crawler_options = {} - return self.firecrawl_app.crawl_url( - base_url, - params=self.params, - poll_interval=self.poll_interval, - idempotency_key=self.idempotency_key - ) + options = { + "crawlerOptions": crawler_options, + "timeout": timeout, + } + return self.firecrawl.crawl_url(url, options) try: diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 9ab7d293e..8b2a37185 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Any, Dict, Optional, Type +from typing import TYPE_CHECKING, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field @@ -10,14 +10,8 @@ if TYPE_CHECKING: class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - page_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for page scraping" - ) - extractor_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for data extraction" - ) timeout: Optional[int] = Field( - default=None, + default=30000, description="Timeout in milliseconds for the scraping operation. The default value is 30000.", ) @@ -46,20 +40,15 @@ class FirecrawlScrapeWebsiteTool(BaseTool): def _run( self, url: str, - page_options: Optional[Dict[str, Any]] = None, - extractor_options: Optional[Dict[str, Any]] = None, - timeout: Optional[int] = None, + timeout: Optional[int] = 30000, ): - if page_options is None: - page_options = {} - if extractor_options is None: - extractor_options = {} - if timeout is None: - timeout = 30000 - options = { - "pageOptions": page_options, - "extractorOptions": extractor_options, + "formats": ["markdown"], + "onlyMainContent": True, + "includeTags": [], + "excludeTags": [], + "headers": {}, + "waitFor": 0, "timeout": timeout, } return self.firecrawl.scrape_url(url, options) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 5efd274de..36ba16391 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -10,11 +10,22 @@ if TYPE_CHECKING: class FirecrawlSearchToolSchema(BaseModel): query: str = Field(description="Search query") - page_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for result formatting" + limit: Optional[int] = Field( + default=5, description="Maximum number of results to return" ) - search_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for searching" + tbs: Optional[str] = Field(default=None, description="Time-based search parameter") + lang: Optional[str] = Field( + default="en", description="Language code for search results" + ) + country: Optional[str] = Field( + default="us", description="Country code for search results" + ) + location: Optional[str] = Field( + default=None, description="Location parameter for search results" + ) + timeout: Optional[int] = Field(default=60000, description="Timeout in milliseconds") + scrape_options: Optional[Dict[str, Any]] = Field( + default=None, description="Options for scraping search results" ) @@ -39,13 +50,25 @@ class FirecrawlSearchTool(BaseTool): def _run( self, query: str, - page_options: Optional[Dict[str, Any]] = None, - result_options: Optional[Dict[str, Any]] = None, + limit: Optional[int] = 5, + tbs: Optional[str] = None, + lang: Optional[str] = "en", + country: Optional[str] = "us", + location: Optional[str] = None, + timeout: Optional[int] = 60000, + scrape_options: Optional[Dict[str, Any]] = None, ): - if page_options is None: - page_options = {} - if result_options is None: - result_options = {} + if scrape_options is None: + scrape_options = {} - options = {"pageOptions": page_options, "resultOptions": result_options} - return self.firecrawl.search(query, **options) + options = { + "query": query, + "limit": limit, + "tbs": tbs, + "lang": lang, + "country": country, + "location": location, + "timeout": timeout, + "scrapeOptions": scrape_options, + } + return self.firecrawl.search(**options) diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 5e8986c7e..e9eab56a2 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -35,7 +35,7 @@ class SerperDevToolSchema(BaseModel): class SerperDevTool(BaseTool): - name: str = "Search the internet" + name: str = "Search the internet with Serper" description: str = ( "A tool that can be used to search the internet with a search_query. " "Supports different search types: 'search' (default), 'news'" From 90a335de46f33f0c7d2519e36e2ad14c2a564e3b Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Wed, 8 Jan 2025 15:12:29 -0500 Subject: [PATCH 242/391] Fix patronus issues as well --- src/crewai_tools/__init__.py | 6 +++--- src/crewai_tools/tools/__init__.py | 17 ++++++++++++----- .../tools/patronus_eval_tool/__init__.py | 3 +++ 3 files changed, 18 insertions(+), 8 deletions(-) create mode 100644 src/crewai_tools/tools/patronus_eval_tool/__init__.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 34e33d60f..2db0fa05f 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -35,6 +35,8 @@ from .tools import ( ScrapeWebsiteTool, ScrapflyScrapeWebsiteTool, SeleniumScrapingTool, + SerpApiGoogleSearchTool, + SerpApiGoogleShoppingTool, SerperDevTool, SerplyJobSearchTool, SerplyNewsSearchTool, @@ -44,11 +46,9 @@ from .tools import ( SpiderTool, TXTSearchTool, VisionTool, + WeaviateVectorSearchTool, WebsiteSearchTool, XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, - WeaviateVectorSearchTool, - SerpApiGoogleSearchTool, - SerpApiGoogleShoppingTool, ) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 55ced936d..e4288a310 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -26,19 +26,28 @@ from .mdx_seach_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool -from .patronus_eval_tool.eval_tool import PatronusEvalTool +from .patronus_eval_tool import ( + PatronusEvalTool, + PatronusLocalEvaluatorTool, + PatronusPredefinedCriteriaEvalTool, +) from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( ScrapeElementFromWebsiteTool, ) -from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ScrapegraphScrapeTool, ScrapegraphScrapeToolSchema from .scrape_website_tool.scrape_website_tool import ScrapeWebsiteTool +from .scrapegraph_scrape_tool.scrapegraph_scrape_tool import ( + ScrapegraphScrapeTool, + ScrapegraphScrapeToolSchema, +) from .scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import ( ScrapflyScrapeWebsiteTool, ) from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool +from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool +from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool @@ -48,12 +57,10 @@ from .serply_api_tool.serply_webpage_to_markdown_tool import SerplyWebpageToMark from .spider_tool.spider_tool import SpiderTool from .txt_search_tool.txt_search_tool import TXTSearchTool from .vision_tool.vision_tool import VisionTool +from .weaviate_tool.vector_search import WeaviateVectorSearchTool from .website_search.website_search_tool import WebsiteSearchTool from .xml_search_tool.xml_search_tool import XMLSearchTool from .youtube_channel_search_tool.youtube_channel_search_tool import ( YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .weaviate_tool.vector_search import WeaviateVectorSearchTool -from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool -from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool diff --git a/src/crewai_tools/tools/patronus_eval_tool/__init__.py b/src/crewai_tools/tools/patronus_eval_tool/__init__.py new file mode 100644 index 000000000..351cced92 --- /dev/null +++ b/src/crewai_tools/tools/patronus_eval_tool/__init__.py @@ -0,0 +1,3 @@ +from .patronus_eval_tool import PatronusEvalTool +from .patronus_local_evaluator_tool import PatronusLocalEvaluatorTool +from .patronus_predefined_criteria_eval_tool import PatronusPredefinedCriteriaEvalTool From ecbf550be9d5fc7b1d37c633ddf5721e3a049c88 Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Wed, 8 Jan 2025 16:38:38 -0500 Subject: [PATCH 243/391] Fix firecrawl errors --- .../firecrawl_crawl_website_tool.py | 21 ++++++++--------- .../firecrawl_scrape_website_tool.py | 10 ++++---- .../firecrawl_search_tool.py | 23 ++++++++++++++----- 3 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 0eafd6e4a..6c7c4ffd9 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -2,7 +2,7 @@ import os from typing import TYPE_CHECKING, Any, Dict, Optional, Type from crewai.tools import BaseTool -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr # Type checking import if TYPE_CHECKING: @@ -28,7 +28,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None - firecrawl: Optional["FirecrawlApp"] = None + _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -39,14 +39,13 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - if not self.firecrawl: - client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") - if not client_api_key: - raise ValueError( - "FIRECRAWL_API_KEY is not set. Please provide it either via the constructor " - "with the `api_key` argument or by setting the FIRECRAWL_API_KEY environment variable." - ) - self.firecrawl = FirecrawlApp(api_key=client_api_key) + client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") + if not client_api_key: + raise ValueError( + "FIRECRAWL_API_KEY is not set. Please provide it either via the constructor " + "with the `api_key` argument or by setting the FIRECRAWL_API_KEY environment variable." + ) + self._firecrawl = FirecrawlApp(api_key=client_api_key) def _run( self, @@ -61,7 +60,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "crawlerOptions": crawler_options, "timeout": timeout, } - return self.firecrawl.crawl_url(url, options) + return self._firecrawl.crawl_url(url, options) try: diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 8b2a37185..9458e7a4f 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,7 +1,7 @@ from typing import TYPE_CHECKING, Optional, Type from crewai.tools import BaseTool -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr # Type checking import if TYPE_CHECKING: @@ -21,10 +21,10 @@ class FirecrawlScrapeWebsiteTool(BaseTool): arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) name: str = "Firecrawl web scrape tool" - description: str = "Scrape webpages url using Firecrawl and return the contents" + description: str = "Scrape webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema api_key: Optional[str] = None - firecrawl: Optional["FirecrawlApp"] = None # Updated to use TYPE_CHECKING + _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -35,7 +35,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - self.firecrawl = FirecrawlApp(api_key=api_key) + self._firecrawl = FirecrawlApp(api_key=api_key) def _run( self, @@ -51,7 +51,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): "waitFor": 0, "timeout": timeout, } - return self.firecrawl.scrape_url(url, options) + return self._firecrawl.scrape_url(url, options) try: diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 36ba16391..da483fb34 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,7 +1,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type from crewai.tools import BaseTool -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr # Type checking import if TYPE_CHECKING: @@ -30,11 +30,14 @@ class FirecrawlSearchToolSchema(BaseModel): class FirecrawlSearchTool(BaseTool): + model_config = ConfigDict( + arbitrary_types_allowed=True, validate_assignment=True, frozen=False + ) name: str = "Firecrawl web search tool" description: str = "Search webpages using Firecrawl and return the results" args_schema: Type[BaseModel] = FirecrawlSearchToolSchema api_key: Optional[str] = None - firecrawl: Optional["FirecrawlApp"] = None + _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -44,8 +47,7 @@ class FirecrawlSearchTool(BaseTool): raise ImportError( "`firecrawl` package not found, please run `pip install firecrawl-py`" ) - - self.firecrawl = FirecrawlApp(api_key=api_key) + self._firecrawl = FirecrawlApp(api_key=api_key) def _run( self, @@ -62,7 +64,6 @@ class FirecrawlSearchTool(BaseTool): scrape_options = {} options = { - "query": query, "limit": limit, "tbs": tbs, "lang": lang, @@ -71,4 +72,14 @@ class FirecrawlSearchTool(BaseTool): "timeout": timeout, "scrapeOptions": scrape_options, } - return self.firecrawl.search(**options) + return self._firecrawl.search(query, options) + + +try: + from firecrawl import FirecrawlApp + + # Rebuild the model after class is defined + FirecrawlSearchTool.model_rebuild() +except ImportError: + # Exception can be ignored if the tool is not used + pass From 40dcf63a70dc37c9430fad3f35a0cd021adc3537 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 13:51:39 -0800 Subject: [PATCH 244/391] optional deps for most --- .../browserbase_load_tool.py | 27 ++- .../firecrawl_crawl_website_tool.py | 18 +- .../firecrawl_scrape_website_tool.py | 18 +- .../firecrawl_search_tool.py | 18 +- .../tools/linkup/linkup_search_tool.py | 44 +++-- .../tools/multion_tool/multion_tool.py | 15 +- .../patronus_local_evaluator_tool.py | 45 +++-- .../scrapegraph_scrape_tool.py | 77 ++++++--- .../scrapfly_scrape_website_tool.py | 15 +- .../selenium_scraping_tool.py | 56 ++++-- .../tools/serpapi_tool/serpapi_base_tool.py | 16 +- .../tools/spider_tool/spider_tool.py | 20 ++- .../tools/stagehand_tool/stagehand_tool.py | 163 +++++++++--------- .../tools/weaviate_tool/vector_search.py | 17 +- 14 files changed, 374 insertions(+), 175 deletions(-) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 2ca1b95fc..6ac798df9 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -11,12 +11,10 @@ class BrowserbaseLoadToolSchema(BaseModel): class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" - description: str = ( - "Load webpages url in a headless browser using Browserbase and return the contents" - ) + description: str = "Load webpages url in a headless browser using Browserbase and return the contents" args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema - api_key: Optional[str] = os.getenv('BROWSERBASE_API_KEY') - project_id: Optional[str] = os.getenv('BROWSERBASE_PROJECT_ID') + api_key: Optional[str] = os.getenv("BROWSERBASE_API_KEY") + project_id: Optional[str] = os.getenv("BROWSERBASE_PROJECT_ID") text_content: Optional[bool] = False session_id: Optional[str] = None proxy: Optional[bool] = None @@ -33,13 +31,24 @@ class BrowserbaseLoadTool(BaseTool): ): super().__init__(**kwargs) if not self.api_key: - raise EnvironmentError("BROWSERBASE_API_KEY environment variable is required for initialization") + raise EnvironmentError( + "BROWSERBASE_API_KEY environment variable is required for initialization" + ) try: from browserbase import Browserbase # type: ignore except ImportError: - raise ImportError( - "`browserbase` package not found, please run `pip install browserbase`" - ) + import click + + if click.confirm( + "`browserbase` package not found, would you like to install it?" + ): + import subprocess + + subprocess.run(["uv", "add", "browserbase"], check=True) + else: + raise ImportError( + "`browserbase` package not found, please run `uv add browserbase`" + ) self.browserbase = Browserbase(api_key=self.api_key) self.text_content = text_content diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 0eafd6e4a..cc44e4b39 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -35,9 +35,21 @@ class FirecrawlCrawlWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + import click + + if click.confirm( + "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "firecrawl-py"], check=True) + from firecrawl import ( + FirecrawlApp, + ) + else: + raise ImportError( + "`firecrawl-py` package not found, please run `uv add firecrawl-py`" + ) if not self.firecrawl: client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 8b2a37185..7076ad263 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -31,9 +31,21 @@ class FirecrawlScrapeWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + import click + + if click.confirm( + "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "firecrawl-py"], check=True) + from firecrawl import ( + FirecrawlApp, + ) + else: + raise ImportError( + "`firecrawl-py` package not found, please run `uv add firecrawl-py`" + ) self.firecrawl = FirecrawlApp(api_key=api_key) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 36ba16391..c10f98c83 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -41,9 +41,21 @@ class FirecrawlSearchTool(BaseTool): try: from firecrawl import FirecrawlApp # type: ignore except ImportError: - raise ImportError( - "`firecrawl` package not found, please run `pip install firecrawl-py`" - ) + import click + + if click.confirm( + "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "firecrawl-py"], check=True) + from firecrawl import ( + FirecrawlApp, + ) + else: + raise ImportError( + "`firecrawl-py` package not found, please run `uv add firecrawl-py`" + ) self.firecrawl = FirecrawlApp(api_key=api_key) diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index b172ad029..be03750fa 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -1,7 +1,10 @@ from typing import Any +from crewai.tools import BaseTool + try: from linkup import LinkupClient + LINKUP_AVAILABLE = True except ImportError: LINKUP_AVAILABLE = False @@ -9,23 +12,42 @@ except ImportError: from pydantic import PrivateAttr -class LinkupSearchTool: + +class LinkupSearchTool(BaseTool): name: str = "Linkup Search Tool" - description: str = "Performs an API call to Linkup to retrieve contextual information." - _client: LinkupClient = PrivateAttr() # type: ignore + description: str = ( + "Performs an API call to Linkup to retrieve contextual information." + ) + _client: LinkupClient = PrivateAttr() # type: ignore def __init__(self, api_key: str): """ Initialize the tool with an API key. """ - if not LINKUP_AVAILABLE: - raise ImportError( - "The 'linkup' package is required to use the LinkupSearchTool. " - "Please install it with: uv add linkup" - ) + super().__init__() + try: + from linkup import LinkupClient + except ImportError: + import click + + if click.confirm( + "You are missing the 'linkup-sdk' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "linkup-sdk"], check=True) + from linkup import LinkupClient + + else: + raise ImportError( + "The 'linkup-sdk' package is required to use the LinkupSearchTool. " + "Please install it with: uv add linkup-sdk" + ) self._client = LinkupClient(api_key=api_key) - def _run(self, query: str, depth: str = "standard", output_type: str = "searchResults") -> dict: + def _run( + self, query: str, depth: str = "standard", output_type: str = "searchResults" + ) -> dict: """ Executes a search using the Linkup API. @@ -36,9 +58,7 @@ class LinkupSearchTool: """ try: response = self._client.search( - query=query, - depth=depth, - output_type=output_type + query=query, depth=depth, output_type=output_type ) results = [ {"name": result.name, "url": result.url, "content": result.content} diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index a991074da..b525c4693 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -28,9 +28,18 @@ class MultiOnTool(BaseTool): try: from multion.client import MultiOn # type: ignore except ImportError: - raise ImportError( - "`multion` package not found, please run `pip install multion`" - ) + import click + + if click.confirm( + "You are missing the 'multion' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "multion"], check=True) + else: + raise ImportError( + "`multion` package not found, please run `uv add multion`" + ) self.session_id = None self.local = local self.multion = MultiOn(api_key=api_key) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index e65cb342d..a1b63c790 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,7 +1,14 @@ from typing import Any, Type from crewai.tools import BaseTool from pydantic import BaseModel, Field -from patronus import Client + +try: + from patronus import Client + + PYPATRONUS_AVAILABLE = True +except ImportError: + PYPATRONUS_AVAILABLE = False + Client = Any class FixedLocalEvaluatorToolSchema(BaseModel): @@ -24,26 +31,44 @@ class PatronusLocalEvaluatorTool(BaseTool): name: str = "Patronus Local Evaluator Tool" evaluator: str = "The registered local evaluator" evaluated_model_gold_answer: str = "The agent's gold answer" - description: str = ( - "This tool is used to evaluate the model input and output using custom function evaluators." - ) + description: str = "This tool is used to evaluate the model input and output using custom function evaluators." client: Any = None args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema class Config: arbitrary_types_allowed = True - def __init__(self, patronus_client: Client, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): + def __init__( + self, + patronus_client: Client, + evaluator: str, + evaluated_model_gold_answer: str, + **kwargs: Any, + ): super().__init__(**kwargs) - self.client = patronus_client - if evaluator: - self.evaluator = evaluator - self.evaluated_model_gold_answer = evaluated_model_gold_answer + if PYPATRONUS_AVAILABLE: + self.client = patronus_client + if evaluator: + self.evaluator = evaluator + self.evaluated_model_gold_answer = evaluated_model_gold_answer self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" self._generate_description() print( f"Updating judge evaluator, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" ) + else: + import click + + if click.confirm( + "You are missing the 'patronus' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "patronus"], check=True) + else: + raise ImportError( + "You are missing the patronus package. Would you like to install it?" + ) def _run( self, @@ -79,7 +104,7 @@ class PatronusLocalEvaluatorTool(BaseTool): if isinstance(evaluated_model_gold_answer, str) else evaluated_model_gold_answer.get("description") ), - tags={}, # Optional metadata, supports arbitrary kv pairs + tags={}, # Optional metadata, supports arbitrary kv pairs ) output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" return output diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 906bf6376..92623e3e0 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -1,25 +1,30 @@ import os -from typing import Any, Optional, Type +from typing import Any, Optional, Type, TYPE_CHECKING from urllib.parse import urlparse from crewai.tools import BaseTool -from pydantic import BaseModel, Field, validator -from scrapegraph_py import Client -from scrapegraph_py.logger import sgai_logger +from pydantic import BaseModel, Field, validator, ConfigDict + +# Type checking import +if TYPE_CHECKING: + from scrapegraph_py import Client class ScrapegraphError(Exception): """Base exception for Scrapegraph-related errors""" + pass class RateLimitError(ScrapegraphError): """Raised when API rate limits are exceeded""" + pass class FixedScrapegraphScrapeToolSchema(BaseModel): """Input for ScrapegraphScrapeTool when website_url is fixed.""" + pass @@ -32,7 +37,7 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): description="Prompt to guide the extraction of content", ) - @validator('website_url') + @validator("website_url") def validate_url(cls, v): """Validate URL format""" try: @@ -41,25 +46,32 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): raise ValueError return v except Exception: - raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + raise ValueError( + "Invalid URL format. URL must include scheme (http/https) and domain" + ) class ScrapegraphScrapeTool(BaseTool): """ A tool that uses Scrapegraph AI to intelligently scrape website content. - + Raises: ValueError: If API key is missing or URL format is invalid RateLimitError: If API rate limits are exceeded RuntimeError: If scraping operation fails """ + model_config = ConfigDict(arbitrary_types_allowed=True) + name: str = "Scrapegraph website scraper" - description: str = "A tool that uses Scrapegraph AI to intelligently scrape website content." + description: str = ( + "A tool that uses Scrapegraph AI to intelligently scrape website content." + ) args_schema: Type[BaseModel] = ScrapegraphScrapeToolSchema website_url: Optional[str] = None user_prompt: Optional[str] = None api_key: Optional[str] = None + _client: Optional["Client"] = None def __init__( self, @@ -69,8 +81,31 @@ class ScrapegraphScrapeTool(BaseTool): **kwargs, ): super().__init__(**kwargs) + try: + from scrapegraph_py import Client + from scrapegraph_py.logger import sgai_logger + + except ImportError: + import click + + if click.confirm( + "You are missing the 'scrapegraph-py' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "scrapegraph-py"], check=True) + from scrapegraph_py import Client + from scrapegraph_py.logger import sgai_logger + + else: + raise ImportError( + "`scrapegraph-py` package not found, please run `uv add scrapegraph-py`" + ) + + self._client = Client(api_key=api_key) + self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") - + if not self.api_key: raise ValueError("Scrapegraph API key is required") @@ -79,7 +114,7 @@ class ScrapegraphScrapeTool(BaseTool): self.website_url = website_url self.description = f"A tool that uses Scrapegraph AI to intelligently scrape {website_url}'s content." self.args_schema = FixedScrapegraphScrapeToolSchema - + if user_prompt is not None: self.user_prompt = user_prompt @@ -94,22 +129,24 @@ class ScrapegraphScrapeTool(BaseTool): if not all([result.scheme, result.netloc]): raise ValueError except Exception: - raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + raise ValueError( + "Invalid URL format. URL must include scheme (http/https) and domain" + ) def _handle_api_response(self, response: dict) -> str: """Handle and validate API response""" if not response: raise RuntimeError("Empty response from Scrapegraph API") - + if "error" in response: error_msg = response.get("error", {}).get("message", "Unknown error") if "rate limit" in error_msg.lower(): raise RateLimitError(f"Rate limit exceeded: {error_msg}") raise RuntimeError(f"API error: {error_msg}") - + if "result" not in response: raise RuntimeError("Invalid response format from Scrapegraph API") - + return response["result"] def _run( @@ -117,7 +154,10 @@ class ScrapegraphScrapeTool(BaseTool): **kwargs: Any, ) -> Any: website_url = kwargs.get("website_url", self.website_url) - user_prompt = kwargs.get("user_prompt", self.user_prompt) or "Extract the main content of the webpage" + user_prompt = ( + kwargs.get("user_prompt", self.user_prompt) + or "Extract the main content of the webpage" + ) if not website_url: raise ValueError("website_url is required") @@ -125,12 +165,9 @@ class ScrapegraphScrapeTool(BaseTool): # Validate URL format self._validate_url(website_url) - # Initialize the client - sgai_client = Client(api_key=self.api_key) - try: # Make the SmartScraper request - response = sgai_client.smartscraper( + response = self.client.smartscraper( website_url=website_url, user_prompt=user_prompt, ) @@ -144,4 +181,4 @@ class ScrapegraphScrapeTool(BaseTool): raise RuntimeError(f"Scraping failed: {str(e)}") finally: # Always close the client - sgai_client.close() + self.client.close() diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index b47ce8e5b..dd071a61b 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -34,9 +34,18 @@ class ScrapflyScrapeWebsiteTool(BaseTool): try: from scrapfly import ScrapflyClient except ImportError: - raise ImportError( - "`scrapfly` package not found, please run `pip install scrapfly-sdk`" - ) + import click + + if click.confirm( + "You are missing the 'scrapfly-sdk' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "scrapfly-sdk"], check=True) + else: + raise ImportError( + "`scrapfly-sdk` package not found, please run `uv add scrapfly-sdk`" + ) self.scrapfly = ScrapflyClient(key=api_key) def _run( diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index d7a55428d..e43a63828 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -5,9 +5,6 @@ from urllib.parse import urlparse from crewai.tools import BaseTool from pydantic import BaseModel, Field, validator -from selenium import webdriver -from selenium.webdriver.chrome.options import Options -from selenium.webdriver.common.by import By class FixedSeleniumScrapingToolSchema(BaseModel): @@ -17,33 +14,36 @@ class FixedSeleniumScrapingToolSchema(BaseModel): class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): """Input for SeleniumScrapingTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file. Must start with http:// or https://") + website_url: str = Field( + ..., + description="Mandatory website url to read the file. Must start with http:// or https://", + ) css_element: str = Field( ..., description="Mandatory css reference for element to scrape from the website", ) - @validator('website_url') + @validator("website_url") def validate_website_url(cls, v): if not v: raise ValueError("Website URL cannot be empty") - + if len(v) > 2048: # Common maximum URL length raise ValueError("URL is too long (max 2048 characters)") - - if not re.match(r'^https?://', v): + + if not re.match(r"^https?://", v): raise ValueError("URL must start with http:// or https://") - + try: result = urlparse(v) if not all([result.scheme, result.netloc]): raise ValueError("Invalid URL format") except Exception as e: raise ValueError(f"Invalid URL: {str(e)}") - - if re.search(r'\s', v): + + if re.search(r"\s", v): raise ValueError("URL cannot contain whitespace") - + return v @@ -52,7 +52,7 @@ class SeleniumScrapingTool(BaseTool): description: str = "A tool that can be used to read a website content." args_schema: Type[BaseModel] = SeleniumScrapingToolSchema website_url: Optional[str] = None - driver: Optional[Any] = webdriver.Chrome + driver: Optional[Any] = None cookie: Optional[dict] = None wait_time: Optional[int] = 3 css_element: Optional[str] = None @@ -66,6 +66,30 @@ class SeleniumScrapingTool(BaseTool): **kwargs, ): super().__init__(**kwargs) + try: + from selenium import webdriver + from selenium.webdriver.chrome.options import Options + from selenium.webdriver.common.by import By + except ImportError: + import click + + if click.confirm( + "You are missing the 'selenium' and 'webdriver-manager' packages. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run( + ["uv", "pip", "install", "selenium", "webdriver-manager"], + check=True, + ) + from selenium import webdriver + from selenium.webdriver.chrome.options import Options + from selenium.webdriver.common.by import By + else: + raise ImportError( + "`selenium` and `webdriver-manager` package not found, please run `uv add selenium webdriver-manager`" + ) + self.driver = webdriver.Chrome() if cookie is not None: self.cookie = cookie @@ -130,11 +154,11 @@ class SeleniumScrapingTool(BaseTool): def _create_driver(self, url, cookie, wait_time): if not url: raise ValueError("URL cannot be empty") - + # Validate URL format - if not re.match(r'^https?://', url): + if not re.match(r"^https?://", url): raise ValueError("URL must start with http:// or https://") - + options = Options() options.add_argument("--headless") driver = self.driver(options=options) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index 98491190c..f41f0a596 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -4,6 +4,7 @@ from typing import Optional, Any, Union from crewai.tools import BaseTool + class SerpApiBaseTool(BaseTool): """Base class for SerpApi functionality with shared capabilities.""" @@ -15,9 +16,18 @@ class SerpApiBaseTool(BaseTool): try: from serpapi import Client except ImportError: - raise ImportError( - "`serpapi` package not found, please install with `pip install serpapi`" - ) + import click + + if click.confirm( + "You are missing the 'serpapi' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "serpapi"], check=True) + else: + raise ImportError( + "`serpapi` package not found, please install with `uv add serpapi`" + ) api_key = os.getenv("SERPAPI_API_KEY") if not api_key: raise ValueError( diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 87726f0bc..170e691f9 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -87,13 +87,21 @@ class SpiderTool(BaseTool): try: from spider import Spider # type: ignore - self.spider = Spider(api_key=api_key) except ImportError: - raise ImportError( - "`spider-client` package not found, please run `uv add spider-client`" - ) - except Exception as e: - raise RuntimeError(f"Failed to initialize Spider client: {str(e)}") + import click + + if click.confirm( + "You are missing the 'spider-client' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "pip", "install", "spider-client"], check=True) + from spider import Spider + else: + raise ImportError( + "`spider-client` package not found, please run `uv add spider-client`" + ) + self.spider = Spider(api_key=api_key) def _validate_url(self, url: str) -> bool: """Validate URL format and security constraints. diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 07c76c8c3..0aac44e86 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -25,6 +25,7 @@ logger = logging.getLogger(__name__) STAGEHAND_AVAILABLE = False try: import stagehand + STAGEHAND_AVAILABLE = True except ImportError: pass # Keep STAGEHAND_AVAILABLE as False @@ -32,33 +33,45 @@ except ImportError: class StagehandResult(BaseModel): """Result from a Stagehand operation. - + Attributes: success: Whether the operation completed successfully data: The result data from the operation error: Optional error message if the operation failed """ - success: bool = Field(..., description="Whether the operation completed successfully") - data: Union[str, Dict, List] = Field(..., description="The result data from the operation") - error: Optional[str] = Field(None, description="Optional error message if the operation failed") + + success: bool = Field( + ..., description="Whether the operation completed successfully" + ) + data: Union[str, Dict, List] = Field( + ..., description="The result data from the operation" + ) + error: Optional[str] = Field( + None, description="Optional error message if the operation failed" + ) class StagehandToolConfig(BaseModel): """Configuration for the StagehandTool. - + Attributes: api_key: OpenAI API key for Stagehand authentication timeout: Maximum time in seconds to wait for operations (default: 30) retry_attempts: Number of times to retry failed operations (default: 3) """ + api_key: str = Field(..., description="OpenAI API key for Stagehand authentication") - timeout: int = Field(30, description="Maximum time in seconds to wait for operations") - retry_attempts: int = Field(3, description="Number of times to retry failed operations") + timeout: int = Field( + 30, description="Maximum time in seconds to wait for operations" + ) + retry_attempts: int = Field( + 3, description="Number of times to retry failed operations" + ) class StagehandToolSchema(BaseModel): """Schema for the StagehandTool input parameters. - + Examples: ```python # Using the 'act' API to click a button @@ -66,13 +79,13 @@ class StagehandToolSchema(BaseModel): api_method="act", instruction="Click the 'Sign In' button" ) - + # Using the 'extract' API to get text tool.run( api_method="extract", instruction="Get the text content of the main article" ) - + # Using the 'observe' API to monitor changes tool.run( api_method="observe", @@ -80,48 +93,49 @@ class StagehandToolSchema(BaseModel): ) ``` """ + api_method: str = Field( ..., description="The Stagehand API to use: 'act' for interactions, 'extract' for getting content, or 'observe' for monitoring changes", - pattern="^(act|extract|observe)$" + pattern="^(act|extract|observe)$", ) instruction: str = Field( ..., description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability.", min_length=1, - max_length=500 + max_length=500, ) class StagehandTool(BaseTool): """A tool for using Stagehand's AI-powered web automation capabilities. - + This tool provides access to Stagehand's three core APIs: - act: Perform web interactions (e.g., clicking buttons, filling forms) - extract: Extract information from web pages (e.g., getting text content) - observe: Monitor web page changes (e.g., watching for updates) - + Each function takes atomic instructions to increase reliability. - + Required Environment Variables: OPENAI_API_KEY: API key for OpenAI (required by Stagehand) - + Examples: ```python tool = StagehandTool() - + # Perform a web interaction result = tool.run( api_method="act", instruction="Click the 'Sign In' button" ) - + # Extract content from a page content = tool.run( api_method="extract", instruction="Get the text content of the main article" ) - + # Monitor for changes changes = tool.run( api_method="observe", @@ -129,7 +143,7 @@ class StagehandTool(BaseTool): ) ``` """ - + name: str = "StagehandTool" description: str = ( "A tool that uses Stagehand's AI-powered web automation to interact with websites. " @@ -137,27 +151,33 @@ class StagehandTool(BaseTool): "Each instruction should be atomic (simple and specific) to increase reliability." ) args_schema: Type[BaseModel] = StagehandToolSchema - - def __init__(self, config: StagehandToolConfig | None = None, **kwargs: Any) -> None: + + def __init__( + self, config: StagehandToolConfig | None = None, **kwargs: Any + ) -> None: """Initialize the StagehandTool. - + Args: config: Optional configuration for the tool. If not provided, will attempt to use OPENAI_API_KEY from environment. **kwargs: Additional keyword arguments passed to the base class. - + Raises: ImportError: If the stagehand package is not installed ValueError: If no API key is provided via config or environment """ super().__init__(**kwargs) - + if not STAGEHAND_AVAILABLE: - raise ImportError( - "The 'stagehand' package is required to use this tool. " - "Please install it with: pip install stagehand" - ) - + import click + + if click.confirm( + "You are missing the 'stagehand-sdk' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "add", "stagehand-sdk"], check=True) + # Use config if provided, otherwise try environment variable if config is not None: self.config = config @@ -168,24 +188,22 @@ class StagehandTool(BaseTool): "Either provide config with api_key or set OPENAI_API_KEY environment variable" ) self.config = StagehandToolConfig( - api_key=api_key, - timeout=30, - retry_attempts=3 + api_key=api_key, timeout=30, retry_attempts=3 ) - + @lru_cache(maxsize=100) def _cached_run(self, api_method: str, instruction: str) -> Any: """Execute a cached Stagehand command. - + This method is cached to improve performance for repeated operations. - + Args: api_method: The Stagehand API to use ('act', 'extract', or 'observe') instruction: An atomic instruction for Stagehand to execute - + Returns: The raw result from the Stagehand API call - + Raises: ValueError: If an invalid api_method is provided Exception: If the Stagehand API call fails @@ -193,23 +211,25 @@ class StagehandTool(BaseTool): logger.debug( "Cache operation - Method: %s, Instruction length: %d", api_method, - len(instruction) + len(instruction), ) - + # Initialize Stagehand with configuration logger.info( "Initializing Stagehand (timeout=%ds, retries=%d)", self.config.timeout, - self.config.retry_attempts + self.config.retry_attempts, ) st = stagehand.Stagehand( api_key=self.config.api_key, timeout=self.config.timeout, - retry_attempts=self.config.retry_attempts + retry_attempts=self.config.retry_attempts, ) - + # Call the appropriate Stagehand API based on the method - logger.info("Executing %s operation with instruction: %s", api_method, instruction[:100]) + logger.info( + "Executing %s operation with instruction: %s", api_method, instruction[:100] + ) try: if api_method == "act": result = st.act(instruction) @@ -219,28 +239,27 @@ class StagehandTool(BaseTool): result = st.observe(instruction) else: raise ValueError(f"Unknown api_method: {api_method}") - - + logger.info("Successfully executed %s operation", api_method) return result - + except Exception as e: logger.warning( "Operation failed (method=%s, error=%s), will be retried on next attempt", api_method, - str(e) + str(e), ) raise def _run(self, api_method: str, instruction: str, **kwargs: Any) -> StagehandResult: """Execute a Stagehand command using the specified API method. - + Args: api_method: The Stagehand API to use ('act', 'extract', or 'observe') instruction: An atomic instruction for Stagehand to execute **kwargs: Additional keyword arguments passed to the Stagehand API - - Returns: + + Returns: StagehandResult containing the operation result and status """ try: @@ -249,56 +268,36 @@ class StagehandTool(BaseTool): "Starting operation - Method: %s, Instruction length: %d, Args: %s", api_method, len(instruction), - kwargs + kwargs, ) - + # Use cached execution result = self._cached_run(api_method, instruction) logger.info("Operation completed successfully") return StagehandResult(success=True, data=result) - + except stagehand.AuthenticationError as e: logger.error( - "Authentication failed - Method: %s, Error: %s", - api_method, - str(e) + "Authentication failed - Method: %s, Error: %s", api_method, str(e) ) return StagehandResult( - success=False, - data={}, - error=f"Authentication failed: {str(e)}" + success=False, data={}, error=f"Authentication failed: {str(e)}" ) except stagehand.APIError as e: - logger.error( - "API error - Method: %s, Error: %s", - api_method, - str(e) - ) - return StagehandResult( - success=False, - data={}, - error=f"API error: {str(e)}" - ) + logger.error("API error - Method: %s, Error: %s", api_method, str(e)) + return StagehandResult(success=False, data={}, error=f"API error: {str(e)}") except stagehand.BrowserError as e: - logger.error( - "Browser error - Method: %s, Error: %s", - api_method, - str(e) - ) + logger.error("Browser error - Method: %s, Error: %s", api_method, str(e)) return StagehandResult( - success=False, - data={}, - error=f"Browser error: {str(e)}" + success=False, data={}, error=f"Browser error: {str(e)}" ) except Exception as e: logger.error( "Unexpected error - Method: %s, Error type: %s, Message: %s", api_method, type(e).__name__, - str(e) + str(e), ) return StagehandResult( - success=False, - data={}, - error=f"Unexpected error: {str(e)}" + success=False, data={}, error=f"Unexpected error: {str(e)}" ) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index 14e10d7c5..879a950f6 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -68,12 +68,25 @@ class WeaviateVectorSearchTool(BaseTool): model="gpt-4o", ) ) + else: + import click + + if click.confirm( + "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" + ): + import subprocess + + subprocess.run(["uv", "pip", "install", "weaviate-client"], check=True) + + else: + raise ImportError( + "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" + ) def _run(self, query: str) -> str: if not WEAVIATE_AVAILABLE: raise ImportError( - "The 'weaviate-client' package is required to use the WeaviateVectorSearchTool. " - "Please install it with: uv add weaviate-client" + "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" ) if not self.weaviate_cluster_url or not self.weaviate_api_key: From 41cec25ad9885b69047a713871373b7738a589f4 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 14:26:03 -0800 Subject: [PATCH 245/391] fix client init on scrapegraph tool --- .../tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 92623e3e0..56fe0360a 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -167,7 +167,7 @@ class ScrapegraphScrapeTool(BaseTool): try: # Make the SmartScraper request - response = self.client.smartscraper( + response = self._client.smartscraper( website_url=website_url, user_prompt=user_prompt, ) From d3d3cc4c28f269bcdac4b893469e1a0d20d60acd Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 15:56:54 -0800 Subject: [PATCH 246/391] remove y/n as it comes with it within click.confirm and fixed firecrawl tools --- .../firecrawl_crawl_website_tool.py | 43 ++++++++------- .../firecrawl_scrape_website_tool.py | 13 +++-- .../firecrawl_search_tool.py | 54 ++++++++++++++----- .../tools/linkup/linkup_search_tool.py | 2 +- .../tools/multion_tool/multion_tool.py | 2 +- .../patronus_local_evaluator_tool.py | 2 +- .../scrapegraph_scrape_tool.py | 2 +- .../scrapfly_scrape_website_tool.py | 2 +- .../selenium_scraping_tool.py | 12 +++-- .../tools/serpapi_tool/serpapi_base_tool.py | 2 +- .../tools/spider_tool/spider_tool.py | 2 +- .../tools/stagehand_tool/stagehand_tool.py | 2 +- .../tools/weaviate_tool/vector_search.py | 6 +-- 13 files changed, 90 insertions(+), 54 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index cc44e4b39..e46b22f0e 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,12 +1,13 @@ -import os -from typing import TYPE_CHECKING, Any, Dict, Optional, Type +from typing import Any, Dict, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field -# Type checking import -if TYPE_CHECKING: + +try: from firecrawl import FirecrawlApp +except ImportError: + FirecrawlApp = Any class FirecrawlCrawlWebsiteToolSchema(BaseModel): @@ -32,34 +33,34 @@ class FirecrawlCrawlWebsiteTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) + self.api_key = api_key + self._initialize_firecrawl() + + def _initialize_firecrawl(self) -> None: try: from firecrawl import FirecrawlApp # type: ignore + + self.firecrawl = FirecrawlApp(api_key=self.api_key) except ImportError: import click if click.confirm( - "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + "You are missing the 'firecrawl-py' package. Would you like to install it?" ): import subprocess - subprocess.run(["uv", "add", "firecrawl-py"], check=True) - from firecrawl import ( - FirecrawlApp, - ) + try: + subprocess.run(["uv", "add", "firecrawl-py"], check=True) + from firecrawl import FirecrawlApp + + self.firecrawl = FirecrawlApp(api_key=self.api_key) + except subprocess.CalledProcessError: + raise ImportError("Failed to install firecrawl-py package") else: raise ImportError( "`firecrawl-py` package not found, please run `uv add firecrawl-py`" ) - if not self.firecrawl: - client_api_key = api_key or os.getenv("FIRECRAWL_API_KEY") - if not client_api_key: - raise ValueError( - "FIRECRAWL_API_KEY is not set. Please provide it either via the constructor " - "with the `api_key` argument or by setting the FIRECRAWL_API_KEY environment variable." - ) - self.firecrawl = FirecrawlApp(api_key=client_api_key) - def _run( self, url: str, @@ -79,8 +80,10 @@ class FirecrawlCrawlWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp - # Must rebuild model after class is defined - FirecrawlCrawlWebsiteTool.model_rebuild() + # Only rebuild if the class hasn't been initialized yet + if not hasattr(FirecrawlCrawlWebsiteTool, "_model_rebuilt"): + FirecrawlCrawlWebsiteTool.model_rebuild() + FirecrawlCrawlWebsiteTool._model_rebuilt = True except ImportError: """ When this tool is not used, then exception can be ignored. diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 7076ad263..5c9a0b759 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,11 +1,12 @@ -from typing import TYPE_CHECKING, Optional, Type +from typing import Any, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field -# Type checking import -if TYPE_CHECKING: +try: from firecrawl import FirecrawlApp +except ImportError: + FirecrawlApp = Any class FirecrawlScrapeWebsiteToolSchema(BaseModel): @@ -34,7 +35,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): import click if click.confirm( - "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + "You are missing the 'firecrawl-py' package. Would you like to install it?" ): import subprocess @@ -70,7 +71,9 @@ try: from firecrawl import FirecrawlApp # Must rebuild model after class is defined - FirecrawlScrapeWebsiteTool.model_rebuild() + if not hasattr(FirecrawlScrapeWebsiteTool, "_model_rebuilt"): + FirecrawlScrapeWebsiteTool.model_rebuild() + FirecrawlScrapeWebsiteTool._model_rebuilt = True except ImportError: """ When this tool is not used, then exception can be ignored. diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index c10f98c83..2998a5025 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,11 +1,13 @@ -from typing import TYPE_CHECKING, Any, Dict, Optional, Type +from typing import Any, Dict, Optional, Type from crewai.tools import BaseTool -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict # Type checking import -if TYPE_CHECKING: +try: from firecrawl import FirecrawlApp +except ImportError: + FirecrawlApp = Any class FirecrawlSearchToolSchema(BaseModel): @@ -30,6 +32,9 @@ class FirecrawlSearchToolSchema(BaseModel): class FirecrawlSearchTool(BaseTool): + model_config = ConfigDict( + arbitrary_types_allowed=True, validate_assignment=True, frozen=False + ) name: str = "Firecrawl web search tool" description: str = "Search webpages using Firecrawl and return the results" args_schema: Type[BaseModel] = FirecrawlSearchToolSchema @@ -38,27 +43,34 @@ class FirecrawlSearchTool(BaseTool): def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) + self.api_key = api_key + self._initialize_firecrawl() + + def _initialize_firecrawl(self) -> None: try: from firecrawl import FirecrawlApp # type: ignore + + self.firecrawl = FirecrawlApp(api_key=self.api_key) except ImportError: import click if click.confirm( - "You are missing the 'firecrawl-py' package. Would you like to install it? (y/N)" + "You are missing the 'firecrawl-py' package. Would you like to install it?" ): import subprocess - subprocess.run(["uv", "add", "firecrawl-py"], check=True) - from firecrawl import ( - FirecrawlApp, - ) + try: + subprocess.run(["uv", "add", "firecrawl-py"], check=True) + from firecrawl import FirecrawlApp + + self.firecrawl = FirecrawlApp(api_key=self.api_key) + except subprocess.CalledProcessError: + raise ImportError("Failed to install firecrawl-py package") else: raise ImportError( "`firecrawl-py` package not found, please run `uv add firecrawl-py`" ) - self.firecrawl = FirecrawlApp(api_key=api_key) - def _run( self, query: str, @@ -69,9 +81,9 @@ class FirecrawlSearchTool(BaseTool): location: Optional[str] = None, timeout: Optional[int] = 60000, scrape_options: Optional[Dict[str, Any]] = None, - ): - if scrape_options is None: - scrape_options = {} + ) -> Any: + if not self.firecrawl: + raise RuntimeError("FirecrawlApp not properly initialized") options = { "query": query, @@ -81,6 +93,20 @@ class FirecrawlSearchTool(BaseTool): "country": country, "location": location, "timeout": timeout, - "scrapeOptions": scrape_options, + "scrapeOptions": scrape_options or {}, } return self.firecrawl.search(**options) + + +try: + from firecrawl import FirecrawlApp # type: ignore + + # Only rebuild if the class hasn't been initialized yet + if not hasattr(FirecrawlSearchTool, "_model_rebuilt"): + FirecrawlSearchTool.model_rebuild() + FirecrawlSearchTool._model_rebuilt = True +except ImportError: + """ + When this tool is not used, then exception can be ignored. + """ + pass diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index be03750fa..5836c1851 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -31,7 +31,7 @@ class LinkupSearchTool(BaseTool): import click if click.confirm( - "You are missing the 'linkup-sdk' package. Would you like to install it? (y/N)" + "You are missing the 'linkup-sdk' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index b525c4693..299d66bd1 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -31,7 +31,7 @@ class MultiOnTool(BaseTool): import click if click.confirm( - "You are missing the 'multion' package. Would you like to install it? (y/N)" + "You are missing the 'multion' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index a1b63c790..7a879db1c 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -60,7 +60,7 @@ class PatronusLocalEvaluatorTool(BaseTool): import click if click.confirm( - "You are missing the 'patronus' package. Would you like to install it? (y/N)" + "You are missing the 'patronus' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 56fe0360a..57c81aabe 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -89,7 +89,7 @@ class ScrapegraphScrapeTool(BaseTool): import click if click.confirm( - "You are missing the 'scrapegraph-py' package. Would you like to install it? (y/N)" + "You are missing the 'scrapegraph-py' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index dd071a61b..4d6b72b61 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -37,7 +37,7 @@ class ScrapflyScrapeWebsiteTool(BaseTool): import click if click.confirm( - "You are missing the 'scrapfly-sdk' package. Would you like to install it? (y/N)" + "You are missing the 'scrapfly-sdk' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index e43a63828..36188fc0e 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -57,6 +57,8 @@ class SeleniumScrapingTool(BaseTool): wait_time: Optional[int] = 3 css_element: Optional[str] = None return_html: Optional[bool] = False + _options: Optional[dict] = None + _by: Optional[Any] = None def __init__( self, @@ -74,7 +76,7 @@ class SeleniumScrapingTool(BaseTool): import click if click.confirm( - "You are missing the 'selenium' and 'webdriver-manager' packages. Would you like to install it? (y/N)" + "You are missing the 'selenium' and 'webdriver-manager' packages. Would you like to install it?" ): import subprocess @@ -90,6 +92,8 @@ class SeleniumScrapingTool(BaseTool): "`selenium` and `webdriver-manager` package not found, please run `uv add selenium webdriver-manager`" ) self.driver = webdriver.Chrome() + self._options = Options() + self._by = By if cookie is not None: self.cookie = cookie @@ -133,7 +137,7 @@ class SeleniumScrapingTool(BaseTool): return css_element is None or css_element.strip() == "" def _get_body_content(self, driver, return_html): - body_element = driver.find_element(By.TAG_NAME, "body") + body_element = driver.find_element(self._by.TAG_NAME, "body") return ( body_element.get_attribute("outerHTML") @@ -144,7 +148,7 @@ class SeleniumScrapingTool(BaseTool): def _get_elements_content(self, driver, css_element, return_html): elements_content = [] - for element in driver.find_elements(By.CSS_SELECTOR, css_element): + for element in driver.find_elements(self._by.CSS_SELECTOR, css_element): elements_content.append( element.get_attribute("outerHTML") if return_html else element.text ) @@ -159,7 +163,7 @@ class SeleniumScrapingTool(BaseTool): if not re.match(r"^https?://", url): raise ValueError("URL must start with http:// or https://") - options = Options() + options = self._options() options.add_argument("--headless") driver = self.driver(options=options) driver.get(url) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index f41f0a596..5dbc52214 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -19,7 +19,7 @@ class SerpApiBaseTool(BaseTool): import click if click.confirm( - "You are missing the 'serpapi' package. Would you like to install it? (y/N)" + "You are missing the 'serpapi' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 170e691f9..ff52a35dc 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -91,7 +91,7 @@ class SpiderTool(BaseTool): import click if click.confirm( - "You are missing the 'spider-client' package. Would you like to install it? (y/N)" + "You are missing the 'spider-client' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 0aac44e86..3b19c514f 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -172,7 +172,7 @@ class StagehandTool(BaseTool): import click if click.confirm( - "You are missing the 'stagehand-sdk' package. Would you like to install it? (y/N)" + "You are missing the 'stagehand-sdk' package. Would you like to install it?" ): import subprocess diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index 879a950f6..d03d444e1 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -72,7 +72,7 @@ class WeaviateVectorSearchTool(BaseTool): import click if click.confirm( - "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" + "You are missing the 'weaviate-client' package. Would you like to install it?" ): import subprocess @@ -80,13 +80,13 @@ class WeaviateVectorSearchTool(BaseTool): else: raise ImportError( - "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" + "You are missing the 'weaviate-client' package. Would you like to install it?" ) def _run(self, query: str) -> str: if not WEAVIATE_AVAILABLE: raise ImportError( - "You are missing the 'weaviate-client' package. Would you like to install it? (y/N)" + "You are missing the 'weaviate-client' package. Would you like to install it?" ) if not self.weaviate_cluster_url or not self.weaviate_api_key: From e26667ea40416f714fceb61c40efc26d37725836 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 18:17:27 -0800 Subject: [PATCH 247/391] removing serpapi from dependenices --- src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index 5dbc52214..4174092ac 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -14,7 +14,7 @@ class SerpApiBaseTool(BaseTool): super().__init__(**kwargs) try: - from serpapi import Client + from serpapi import Client # type: ignore except ImportError: import click @@ -24,6 +24,7 @@ class SerpApiBaseTool(BaseTool): import subprocess subprocess.run(["uv", "add", "serpapi"], check=True) + from serpapi import Client else: raise ImportError( "`serpapi` package not found, please install with `uv add serpapi`" From d882818d6c1ba282cd5e1241e48c5b382a3c3c0e Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 18:24:07 -0800 Subject: [PATCH 248/391] fixed serpapi --- .../serpapi_google_search_tool.py | 46 +++++++++++++------ .../serpapi_google_shopping_tool.py | 45 ++++++++++++------ 2 files changed, 65 insertions(+), 26 deletions(-) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py index 199b7f5a2..afadbe5e2 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py @@ -1,16 +1,30 @@ from typing import Any, Type, Optional import re -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict from .serpapi_base_tool import SerpApiBaseTool -from serpapi import HTTPError + +try: + from serpapi import HTTPError +except ImportError: + HTTPError = Any + class SerpApiGoogleSearchToolSchema(BaseModel): """Input for Google Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to Google search.") - location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to Google search." + ) + location: Optional[str] = Field( + None, description="Location you want the search to be performed in." + ) + class SerpApiGoogleSearchTool(SerpApiBaseTool): + model_config = ConfigDict( + arbitrary_types_allowed=True, validate_assignment=True, frozen=False + ) name: str = "Google Search" description: str = ( "A tool to perform to perform a Google search with a search_query." @@ -22,19 +36,25 @@ class SerpApiGoogleSearchTool(SerpApiBaseTool): **kwargs: Any, ) -> Any: try: - results = self.client.search({ - "q": kwargs.get("search_query"), - "location": kwargs.get("location"), - }).as_dict() + results = self.client.search( + { + "q": kwargs.get("search_query"), + "location": kwargs.get("location"), + } + ).as_dict() self._omit_fields( - results, - [r"search_metadata", r"search_parameters", r"serpapi_.+", r".+_token", r"displayed_link", r"pagination"] + results, + [ + r"search_metadata", + r"search_parameters", + r"serpapi_.+", + r".+_token", + r"displayed_link", + r"pagination", + ], ) return results except HTTPError as e: return f"An error occurred: {str(e)}. Some parameters may be invalid." - - - \ No newline at end of file diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py index b44b3a809..1cf865985 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py @@ -3,15 +3,29 @@ from typing import Any, Type, Optional import re from pydantic import BaseModel, Field from .serpapi_base_tool import SerpApiBaseTool -from serpapi import HTTPError +from pydantic import ConfigDict + +try: + from serpapi import HTTPError +except ImportError: + HTTPError = Any + class SerpApiGoogleShoppingToolSchema(BaseModel): """Input for Google Shopping.""" - search_query: str = Field(..., description="Mandatory search query you want to use to Google shopping.") - location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to Google shopping." + ) + location: Optional[str] = Field( + None, description="Location you want the search to be performed in." + ) class SerpApiGoogleShoppingTool(SerpApiBaseTool): + model_config = ConfigDict( + arbitrary_types_allowed=True, validate_assignment=True, frozen=False + ) name: str = "Google Shopping" description: str = ( "A tool to perform search on Google shopping with a search_query." @@ -23,20 +37,25 @@ class SerpApiGoogleShoppingTool(SerpApiBaseTool): **kwargs: Any, ) -> Any: try: - results = self.client.search({ - "engine": "google_shopping", - "q": kwargs.get("search_query"), - "location": kwargs.get("location") - }).as_dict() + results = self.client.search( + { + "engine": "google_shopping", + "q": kwargs.get("search_query"), + "location": kwargs.get("location"), + } + ).as_dict() self._omit_fields( - results, - [r"search_metadata", r"search_parameters", r"serpapi_.+", r"filters", r"pagination"] + results, + [ + r"search_metadata", + r"search_parameters", + r"serpapi_.+", + r"filters", + r"pagination", + ], ) return results except HTTPError as e: return f"An error occurred: {str(e)}. Some parameters may be invalid." - - - \ No newline at end of file From 31192bcdda6dda86bbffa96c5a96ac3a00b6947d Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 10 Jan 2025 18:31:03 -0800 Subject: [PATCH 249/391] fixed multion --- src/crewai_tools/tools/multion_tool/multion_tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index 299d66bd1..d49321dc0 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -36,6 +36,7 @@ class MultiOnTool(BaseTool): import subprocess subprocess.run(["uv", "add", "multion"], check=True) + from multion.client import MultiOn else: raise ImportError( "`multion` package not found, please run `uv add multion`" From 78aff9dbdcfa92334377b6894d9982ef29d54adb Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Mon, 13 Jan 2025 10:45:12 -0800 Subject: [PATCH 250/391] fix no client err --- .../tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 57c81aabe..86ea546d4 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -181,4 +181,4 @@ class ScrapegraphScrapeTool(BaseTool): raise RuntimeError(f"Scraping failed: {str(e)}") finally: # Always close the client - self.client.close() + self._client.close() From 1a824cf432bbb9feab15ee12b27abeaaa8915e3e Mon Sep 17 00:00:00 2001 From: Nikhil Shahi Date: Mon, 13 Jan 2025 15:48:45 -0600 Subject: [PATCH 251/391] added HyperbrowserLoadTool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/hyperbrowser_load_tool/README.md | 42 +++++++++ .../hyperbrowser_load_tool.py | 94 +++++++++++++++++++ 4 files changed, 138 insertions(+) create mode 100644 src/crewai_tools/tools/hyperbrowser_load_tool/README.md create mode 100644 src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 2db0fa05f..ca46c34d2 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -16,6 +16,7 @@ from .tools import ( FirecrawlScrapeWebsiteTool, FirecrawlSearchTool, GithubSearchTool, + HyperbrowserLoadTool, JSONSearchTool, LinkupSearchTool, LlamaIndexTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index e4288a310..ac42857bc 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -19,6 +19,7 @@ from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( ) from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool +from .hyperbrowser_load_tool.hyperbrowser_load_tool import HyperbrowserLoadTool from .json_search_tool.json_search_tool import JSONSearchTool from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool diff --git a/src/crewai_tools/tools/hyperbrowser_load_tool/README.md b/src/crewai_tools/tools/hyperbrowser_load_tool/README.md new file mode 100644 index 000000000..e95864f5a --- /dev/null +++ b/src/crewai_tools/tools/hyperbrowser_load_tool/README.md @@ -0,0 +1,42 @@ +# HyperbrowserLoadTool + +## Description + +[Hyperbrowser](https://hyperbrowser.ai) is a platform for running and scaling headless browsers. It lets you launch and manage browser sessions at scale and provides easy to use solutions for any webscraping needs, such as scraping a single page or crawling an entire site. + +Key Features: +- Instant Scalability - Spin up hundreds of browser sessions in seconds without infrastructure headaches +- Simple Integration - Works seamlessly with popular tools like Puppeteer and Playwright +- Powerful APIs - Easy to use APIs for scraping/crawling any site, and much more +- Bypass Anti-Bot Measures - Built-in stealth mode, ad blocking, automatic CAPTCHA solving, and rotating proxies + +For more information about Hyperbrowser, please visit the [Hyperbrowser website](https://hyperbrowser.ai) or if you want to check out the docs, you can visit the [Hyperbrowser docs](https://docs.hyperbrowser.ai). + +## Installation + +- Head to [Hyperbrowser](https://app.hyperbrowser.ai/) to sign up and generate an API key. Once you've done this set the `HYPERBROWSER_API_KEY` environment variable or you can pass it to the `HyperbrowserLoadTool` constructor. +- Install the [Hyperbrowser SDK](https://github.com/hyperbrowserai/python-sdk): + +``` +pip install hyperbrowser 'crewai[tools]' +``` + +## Example + +Utilize the HyperbrowserLoadTool as follows to allow your agent to load websites: + +```python +from crewai_tools import HyperbrowserLoadTool + +tool = HyperbrowserLoadTool() +``` + +## Arguments + +`__init__` arguments: +- `api_key`: Optional. Specifies Hyperbrowser API key. Defaults to the `HYPERBROWSER_API_KEY` environment variable. + +`run` arguments: +- `url`: The base URL to start scraping or crawling from. +- `operation`: Optional. Specifies the operation to perform on the website. Either 'scrape' or 'crawl'. Defaults is 'scrape'. +- `params`: Optional. Specifies the params for the operation. For more information on the supported params, visit https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait or https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait. diff --git a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py new file mode 100644 index 000000000..eb52b151c --- /dev/null +++ b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py @@ -0,0 +1,94 @@ +import os +from typing import Any, Optional, Type, Dict, Literal, Union + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + + +class HyperbrowserLoadToolSchema(BaseModel): + url: str = Field(description="Website URL") + operation: Literal['scrape', 'crawl'] = Field(description="Operation to perform on the website. Either 'scrape' or 'crawl'") + params: Optional[Dict] = Field(description="Optional params for scrape or crawl. For more information on the supported params, visit https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait or https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait") + +class HyperbrowserLoadTool(BaseTool): + name: str = "Hyperbrowser web load tool" + description: str = "Scrape or crawl a website using Hyperbrowser and return the contents in properly formatted markdown or html" + args_schema: Type[BaseModel] = HyperbrowserLoadToolSchema + api_key: Optional[str] = None + hyperbrowser: Optional[Any] = None + + def __init__(self, api_key: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + self.api_key = api_key or os.getenv('HYPERBROWSER_API_KEY') + if not api_key: + raise ValueError( + "`api_key` is required, please set the `HYPERBROWSER_API_KEY` environment variable or pass it directly" + ) + + try: + from hyperbrowser import Hyperbrowser + except ImportError: + raise ImportError("`hyperbrowser` package not found, please run `pip install hyperbrowser`") + + if not self.api_key: + raise ValueError("HYPERBROWSER_API_KEY is not set. Please provide it either via the constructor with the `api_key` argument or by setting the HYPERBROWSER_API_KEY environment variable.") + + self.hyperbrowser = Hyperbrowser(api_key=self.api_key) + + def _prepare_params(self, params: Dict) -> Dict: + """Prepare session and scrape options parameters.""" + try: + from hyperbrowser.models.session import CreateSessionParams + from hyperbrowser.models.scrape import ScrapeOptions + except ImportError: + raise ImportError( + "`hyperbrowser` package not found, please run `pip install hyperbrowser`" + ) + + if "scrape_options" in params: + if "formats" in params["scrape_options"]: + formats = params["scrape_options"]["formats"] + if not all(fmt in ["markdown", "html"] for fmt in formats): + raise ValueError("formats can only contain 'markdown' or 'html'") + + if "session_options" in params: + params["session_options"] = CreateSessionParams(**params["session_options"]) + if "scrape_options" in params: + params["scrape_options"] = ScrapeOptions(**params["scrape_options"]) + return params + + def _extract_content(self, data: Union[Any, None]): + """Extract content from response data.""" + content = "" + if data: + content = data.markdown or data.html or "" + return content + + def _run(self, url: str, operation: Literal['scrape', 'crawl'] = 'scrape', params: Optional[Dict] = {}): + try: + from hyperbrowser.models.scrape import StartScrapeJobParams + from hyperbrowser.models.crawl import StartCrawlJobParams + except ImportError: + raise ImportError( + "`hyperbrowser` package not found, please run `pip install hyperbrowser`" + ) + + params = self._prepare_params(params) + + if operation == 'scrape': + scrape_params = StartScrapeJobParams(url=url, **params) + scrape_resp = self.hyperbrowser.scrape.start_and_wait(scrape_params) + content = self._extract_content(scrape_resp.data) + return content + else: + crawl_params = StartCrawlJobParams(url=url, **params) + crawl_resp = self.hyperbrowser.crawl.start_and_wait(crawl_params) + content = "" + if crawl_resp.data: + for page in crawl_resp.data: + page_content = self._extract_content(page) + if page_content: + content += ( + f"\n{'-'*50}\nUrl: {page.url}\nContent:\n{page_content}\n" + ) + return content From e343f26c037f5557c0f31654bf053802b1b534f6 Mon Sep 17 00:00:00 2001 From: Nikhil Shahi Date: Mon, 13 Jan 2025 16:08:11 -0600 Subject: [PATCH 252/391] add docstring --- .../hyperbrowser_load_tool/hyperbrowser_load_tool.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py index eb52b151c..b802d1859 100644 --- a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py +++ b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py @@ -11,6 +11,15 @@ class HyperbrowserLoadToolSchema(BaseModel): params: Optional[Dict] = Field(description="Optional params for scrape or crawl. For more information on the supported params, visit https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait or https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait") class HyperbrowserLoadTool(BaseTool): + """HyperbrowserLoadTool. + + Scrape or crawl web pages and load the contents with optional parameters for configuring content extraction. + Requires the `hyperbrowser` package. + Get your API Key from https://app.hyperbrowser.ai/ + + Args: + api_key: The Hyperbrowser API key, can be set as an environment variable `HYPERBROWSER_API_KEY` or passed directly + """ name: str = "Hyperbrowser web load tool" description: str = "Scrape or crawl a website using Hyperbrowser and return the contents in properly formatted markdown or html" args_schema: Type[BaseModel] = HyperbrowserLoadToolSchema From 14bc8de7746aeb69d0d5d774da0a8064489207e8 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Tue, 14 Jan 2025 08:48:09 -0800 Subject: [PATCH 253/391] fix browserbase tool here --- .../tools/browserbase_load_tool/browserbase_load_tool.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 6ac798df9..b4020f9c8 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -45,6 +45,7 @@ class BrowserbaseLoadTool(BaseTool): import subprocess subprocess.run(["uv", "add", "browserbase"], check=True) + from browserbase import Browserbase # type: ignore else: raise ImportError( "`browserbase` package not found, please run `uv add browserbase`" From 5cb5f4f1a673b02ddc71620a9291c70e0c5ccd82 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Tue, 14 Jan 2025 08:48:28 -0800 Subject: [PATCH 254/391] use private attr --- .../firecrawl_crawl_website_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 54ae8d3f6..aa4f236ef 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -40,7 +40,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): try: from firecrawl import FirecrawlApp # type: ignore - self.firecrawl = FirecrawlApp(api_key=self.api_key) + self._firecrawl = FirecrawlApp(api_key=self.api_key) except ImportError: import click @@ -53,7 +53,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): subprocess.run(["uv", "add", "firecrawl-py"], check=True) from firecrawl import FirecrawlApp - self.firecrawl = FirecrawlApp(api_key=self.api_key) + self._firecrawl = FirecrawlApp(api_key=self.api_key) except subprocess.CalledProcessError: raise ImportError("Failed to install firecrawl-py package") else: From 334beda1810201fcadab8dde8c50c04d9f968549 Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 14 Jan 2025 21:06:42 +0200 Subject: [PATCH 255/391] added missing import --- .../tools/code_interpreter_tool/code_interpreter_tool.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index fd0d39932..8924d52c0 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -3,6 +3,7 @@ import os from typing import List, Optional, Type from docker import from_env as docker_from_env +from docker import DockerClient from docker.models.containers import Container from docker.errors import ImageNotFound, NotFound from crewai.tools import BaseTool @@ -43,7 +44,7 @@ class CodeInterpreterTool(BaseTool): Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker_from_env() if self.user_docker_base_url == None else docker.DockerClient(base_url=self.user_docker_base_url) + client = docker_from_env() if self.user_docker_base_url == None else DockerClient(base_url=self.user_docker_base_url) try: client.images.get(self.default_image_tag) From 1bd87f514e8984e19b1c6e09cb8cee09a30a5601 Mon Sep 17 00:00:00 2001 From: Tom Mahler Date: Tue, 14 Jan 2025 21:07:08 +0200 Subject: [PATCH 256/391] changed == None to is None --- .../tools/code_interpreter_tool/code_interpreter_tool.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 8924d52c0..5d23c580a 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -31,7 +31,7 @@ class CodeInterpreterTool(BaseTool): default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None user_dockerfile_path: Optional[str] = None - user_docker_base_url: Optional[str] = None + user_docker_base_url: Optional[str] = None unsafe_mode: bool = False @staticmethod @@ -44,7 +44,7 @@ class CodeInterpreterTool(BaseTool): Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker_from_env() if self.user_docker_base_url == None else DockerClient(base_url=self.user_docker_base_url) + client = docker_from_env() if self.user_docker_base_url is None else DockerClient(base_url=self.user_docker_base_url) try: client.images.get(self.default_image_tag) @@ -136,4 +136,4 @@ class CodeInterpreterTool(BaseTool): exec(code, {}, exec_locals) return exec_locals.get("result", "No result variable found.") except Exception as e: - return f"An error occurred: {str(e)}" + return f"An error occurred: {str(e)}" \ No newline at end of file From 1568008db61d63eadb4b6f03657b785e3aec00f1 Mon Sep 17 00:00:00 2001 From: Carter Chen Date: Mon, 13 Jan 2025 21:33:40 -0500 Subject: [PATCH 257/391] remove _generate_description on file_read_tool --- .../tools/file_read_tool/file_read_tool.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 323a26d51..9106533fa 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -49,6 +49,7 @@ class FileReadTool(BaseTool): if file_path is not None: self.file_path = file_path self.description = f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file." + self._generate_description() def _run( self, @@ -68,14 +69,3 @@ class FileReadTool(BaseTool): except Exception as e: return f"Error: Failed to read file {file_path}. {str(e)}" - def _generate_description(self) -> None: - """Generate the tool description based on file path. - - This method updates the tool's description to include information about - the default file path while maintaining the ability to specify a different - file at runtime. - - Returns: - None - """ - self.description = f"A tool that can be used to read {self.file_path}'s content." From fe2a5abf8d19b72bedd2c7d0c099976e7abf7051 Mon Sep 17 00:00:00 2001 From: Carter Chen Date: Tue, 14 Jan 2025 21:16:11 -0500 Subject: [PATCH 258/391] restructure init statement to remove duplicate call to _generate_description --- src/crewai_tools/tools/file_read_tool/file_read_tool.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 9106533fa..22a1204f6 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -45,11 +45,11 @@ class FileReadTool(BaseTool): this becomes the default file path for the tool. **kwargs: Additional keyword arguments passed to BaseTool. """ - super().__init__(**kwargs) if file_path is not None: - self.file_path = file_path - self.description = f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file." - self._generate_description() + kwargs['description'] = f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file." + + super().__init__(**kwargs) + self.file_path = file_path def _run( self, From 9c4c4219cd18b75f56fce8279a7cca1eb7672829 Mon Sep 17 00:00:00 2001 From: ChethanUK Date: Fri, 17 Jan 2025 02:23:06 +0530 Subject: [PATCH 259/391] Adding Snowflake search tool --- src/crewai_tools/__init__.py | 2 + src/crewai_tools/tools/__init__.py | 5 + .../browserbase_load_tool.py | 14 +- .../code_interpreter_tool.py | 18 +- .../directory_read_tool.py | 2 - .../tools/file_read_tool/file_read_tool.py | 7 +- .../file_writer_tool/file_writer_tool.py | 4 +- .../firecrawl_crawl_website_tool.py | 1 - .../firecrawl_scrape_website_tool.py | 1 - .../github_search_tool/github_search_tool.py | 4 +- .../jina_scrape_website_tool.py | 4 +- .../tools/linkup/linkup_search_tool.py | 16 +- .../mysql_search_tool/mysql_search_tool.py | 4 +- .../tools/patronus_eval_tool/example.py | 26 +-- .../patronus_eval_tool/patronus_eval_tool.py | 14 +- .../patronus_local_evaluator_tool.py | 17 +- .../patronus_predefined_criteria_eval_tool.py | 12 +- .../pdf_text_writing_tool.py | 8 +- .../tools/pg_seach_tool/pg_search_tool.py | 4 +- .../scrape_element_from_website.py | 2 - .../scrape_website_tool.py | 2 - .../scrapegraph_scrape_tool.py | 34 +-- .../selenium_scraping_tool.py | 27 ++- .../tools/serpapi_tool/serpapi_base_tool.py | 3 +- .../serpapi_google_search_tool.py | 41 ++-- .../serpapi_google_shopping_tool.py | 41 ++-- .../tools/serper_dev_tool/serper_dev_tool.py | 4 +- .../serply_webpage_to_markdown_tool.py | 4 +- .../tools/snowflake_search_tool/README.md | 155 +++++++++++++ .../tools/snowflake_search_tool/__init__.py | 11 + .../snowflake_search_tool.py | 201 ++++++++++++++++ .../tools/stagehand_tool/stagehand_tool.py | 154 ++++++------ .../tools/vision_tool/vision_tool.py | 24 +- .../tools/weaviate_tool/vector_search.py | 3 +- .../website_search/website_search_tool.py | 4 +- .../youtube_channel_search_tool.py | 4 +- .../youtube_video_search_tool.py | 4 +- tests/base_tool_test.py | 133 +++++++---- tests/file_read_tool_test.py | 6 +- tests/it/tools/__init__.py | 0 tests/it/tools/conftest.py | 21 ++ tests/it/tools/snowflake_search_tool_test.py | 219 ++++++++++++++++++ tests/spider_tool_test.py | 21 +- tests/tools/snowflake_search_tool_test.py | 103 ++++++++ tests/tools/test_code_interpreter_tool.py | 16 +- 45 files changed, 1089 insertions(+), 311 deletions(-) create mode 100644 src/crewai_tools/tools/snowflake_search_tool/README.md create mode 100644 src/crewai_tools/tools/snowflake_search_tool/__init__.py create mode 100644 src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py create mode 100644 tests/it/tools/__init__.py create mode 100644 tests/it/tools/conftest.py create mode 100644 tests/it/tools/snowflake_search_tool_test.py create mode 100644 tests/tools/snowflake_search_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 2db0fa05f..9c7e9d9a9 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -43,6 +43,8 @@ from .tools import ( SerplyScholarSearchTool, SerplyWebpageToMarkdownTool, SerplyWebSearchTool, + SnowflakeConfig, + SnowflakeSearchTool, SpiderTool, TXTSearchTool, VisionTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index e4288a310..ea5a87ce1 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -54,6 +54,11 @@ from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool from .serply_api_tool.serply_webpage_to_markdown_tool import SerplyWebpageToMarkdownTool +from .snowflake_search_tool import ( + SnowflakeConfig, + SnowflakeSearchTool, + SnowflakeSearchToolInput, +) from .spider_tool.spider_tool import SpiderTool from .txt_search_tool.txt_search_tool import TXTSearchTool from .vision_tool.vision_tool import VisionTool diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 2ca1b95fc..d3f76e0a6 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,8 +1,8 @@ import os from typing import Any, Optional, Type -from pydantic import BaseModel, Field from crewai.tools import BaseTool +from pydantic import BaseModel, Field class BrowserbaseLoadToolSchema(BaseModel): @@ -11,12 +11,10 @@ class BrowserbaseLoadToolSchema(BaseModel): class BrowserbaseLoadTool(BaseTool): name: str = "Browserbase web load tool" - description: str = ( - "Load webpages url in a headless browser using Browserbase and return the contents" - ) + description: str = "Load webpages url in a headless browser using Browserbase and return the contents" args_schema: Type[BaseModel] = BrowserbaseLoadToolSchema - api_key: Optional[str] = os.getenv('BROWSERBASE_API_KEY') - project_id: Optional[str] = os.getenv('BROWSERBASE_PROJECT_ID') + api_key: Optional[str] = os.getenv("BROWSERBASE_API_KEY") + project_id: Optional[str] = os.getenv("BROWSERBASE_PROJECT_ID") text_content: Optional[bool] = False session_id: Optional[str] = None proxy: Optional[bool] = None @@ -33,7 +31,9 @@ class BrowserbaseLoadTool(BaseTool): ): super().__init__(**kwargs) if not self.api_key: - raise EnvironmentError("BROWSERBASE_API_KEY environment variable is required for initialization") + raise EnvironmentError( + "BROWSERBASE_API_KEY environment variable is required for initialization" + ) try: from browserbase import Browserbase # type: ignore except ImportError: diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index fd0d39932..b508e4b6a 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -2,10 +2,10 @@ import importlib.util import os from typing import List, Optional, Type -from docker import from_env as docker_from_env -from docker.models.containers import Container -from docker.errors import ImageNotFound, NotFound from crewai.tools import BaseTool +from docker import from_env as docker_from_env +from docker.errors import ImageNotFound, NotFound +from docker.models.containers import Container from pydantic import BaseModel, Field @@ -30,7 +30,7 @@ class CodeInterpreterTool(BaseTool): default_image_tag: str = "code-interpreter:latest" code: Optional[str] = None user_dockerfile_path: Optional[str] = None - user_docker_base_url: Optional[str] = None + user_docker_base_url: Optional[str] = None unsafe_mode: bool = False @staticmethod @@ -43,7 +43,11 @@ class CodeInterpreterTool(BaseTool): Verify if the Docker image is available. Optionally use a user-provided Dockerfile. """ - client = docker_from_env() if self.user_docker_base_url == None else docker.DockerClient(base_url=self.user_docker_base_url) + client = ( + docker_from_env() + if self.user_docker_base_url == None + else docker.DockerClient(base_url=self.user_docker_base_url) + ) try: client.images.get(self.default_image_tag) @@ -76,9 +80,7 @@ class CodeInterpreterTool(BaseTool): else: return self.run_code_in_docker(code, libraries_used) - def _install_libraries( - self, container: Container, libraries: List[str] - ) -> None: + def _install_libraries(self, container: Container, libraries: List[str]) -> None: """ Install missing libraries in the Docker container """ diff --git a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py index 6033202be..8488f391e 100644 --- a/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py +++ b/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py @@ -8,8 +8,6 @@ from pydantic import BaseModel, Field class FixedDirectoryReadToolSchema(BaseModel): """Input for DirectoryReadTool.""" - pass - class DirectoryReadToolSchema(FixedDirectoryReadToolSchema): """Input for DirectoryReadTool.""" diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 323a26d51..384b97f40 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -32,6 +32,7 @@ class FileReadTool(BaseTool): >>> content = tool.run() # Reads /path/to/file.txt >>> content = tool.run(file_path="/path/to/other.txt") # Reads other.txt """ + name: str = "Read a file's content" description: str = "A tool that reads the content of a file. To use this tool, provide a 'file_path' parameter with the path to the file you want to read." args_schema: Type[BaseModel] = FileReadToolSchema @@ -57,7 +58,7 @@ class FileReadTool(BaseTool): file_path = kwargs.get("file_path", self.file_path) if file_path is None: return "Error: No file path provided. Please provide a file path either in the constructor or as an argument." - + try: with open(file_path, "r") as file: return file.read() @@ -78,4 +79,6 @@ class FileReadTool(BaseTool): Returns: None """ - self.description = f"A tool that can be used to read {self.file_path}'s content." + self.description = ( + f"A tool that can be used to read {self.file_path}'s content." + ) diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index ed454a1bd..f975d3301 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -15,9 +15,7 @@ class FileWriterToolInput(BaseModel): class FileWriterTool(BaseTool): name: str = "File Writer Tool" - description: str = ( - "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." - ) + description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." args_schema: Type[BaseModel] = FileWriterToolInput def _run(self, **kwargs: Any) -> str: diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 6c7c4ffd9..dcb70e291 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -72,4 +72,3 @@ except ImportError: """ When this tool is not used, then exception can be ignored. """ - pass diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 9458e7a4f..3f5f8c4c4 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -63,4 +63,3 @@ except ImportError: """ When this tool is not used, then exception can be ignored. """ - pass diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 4bf8b9e05..6ba7b919c 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -27,9 +27,7 @@ class GithubSearchToolSchema(FixedGithubSearchToolSchema): class GithubSearchTool(RagTool): name: str = "Search a github repo's content" - description: str = ( - "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." - ) + description: str = "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." summarize: bool = False gh_token: str args_schema: Type[BaseModel] = GithubSearchToolSchema diff --git a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py index a10a4ffdb..86f771cd0 100644 --- a/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py +++ b/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py @@ -13,9 +13,7 @@ class JinaScrapeWebsiteToolInput(BaseModel): class JinaScrapeWebsiteTool(BaseTool): name: str = "JinaScrapeWebsiteTool" - description: str = ( - "A tool that can be used to read a website content using Jina.ai reader and return markdown content." - ) + description: str = "A tool that can be used to read a website content using Jina.ai reader and return markdown content." args_schema: Type[BaseModel] = JinaScrapeWebsiteToolInput website_url: Optional[str] = None api_key: Optional[str] = None diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index b172ad029..486663d3e 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -2,6 +2,7 @@ from typing import Any try: from linkup import LinkupClient + LINKUP_AVAILABLE = True except ImportError: LINKUP_AVAILABLE = False @@ -9,10 +10,13 @@ except ImportError: from pydantic import PrivateAttr + class LinkupSearchTool: name: str = "Linkup Search Tool" - description: str = "Performs an API call to Linkup to retrieve contextual information." - _client: LinkupClient = PrivateAttr() # type: ignore + description: str = ( + "Performs an API call to Linkup to retrieve contextual information." + ) + _client: LinkupClient = PrivateAttr() # type: ignore def __init__(self, api_key: str): """ @@ -25,7 +29,9 @@ class LinkupSearchTool: ) self._client = LinkupClient(api_key=api_key) - def _run(self, query: str, depth: str = "standard", output_type: str = "searchResults") -> dict: + def _run( + self, query: str, depth: str = "standard", output_type: str = "searchResults" + ) -> dict: """ Executes a search using the Linkup API. @@ -36,9 +42,7 @@ class LinkupSearchTool: """ try: response = self._client.search( - query=query, - depth=depth, - output_type=output_type + query=query, depth=depth, output_type=output_type ) results = [ {"name": result.name, "url": result.url, "content": result.content} diff --git a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py index f931a006b..a472e1761 100644 --- a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py @@ -17,9 +17,7 @@ class MySQLSearchToolSchema(BaseModel): class MySQLSearchTool(RagTool): name: str = "Search a database's table content" - description: str = ( - "A tool that can be used to semantic search a query from a database table's content." - ) + description: str = "A tool that can be used to semantic search a query from a database table's content." args_schema: Type[BaseModel] = MySQLSearchToolSchema db_uri: str = Field(..., description="Mandatory database URI") diff --git a/src/crewai_tools/tools/patronus_eval_tool/example.py b/src/crewai_tools/tools/patronus_eval_tool/example.py index b9e1bad5e..185e9f485 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/example.py +++ b/src/crewai_tools/tools/patronus_eval_tool/example.py @@ -1,30 +1,24 @@ -from crewai import Agent, Crew, Task -from patronus_eval_tool import ( - PatronusEvalTool, -) -from patronus_local_evaluator_tool import ( - PatronusLocalEvaluatorTool, -) -from patronus_predefined_criteria_eval_tool import ( - PatronusPredefinedCriteriaEvalTool, -) -from patronus import Client, EvaluationResult import random +from crewai import Agent, Crew, Task +from patronus import Client, EvaluationResult +from patronus_local_evaluator_tool import PatronusLocalEvaluatorTool # Test the PatronusLocalEvaluatorTool where agent uses the local evaluator client = Client() + # Example of an evaluator that returns a random pass/fail result @client.register_local_evaluator("random_evaluator") def random_evaluator(**kwargs): score = random.random() return EvaluationResult( - score_raw=score, - pass_=score >= 0.5, - explanation="example explanation" # Optional justification for LLM judges + score_raw=score, + pass_=score >= 0.5, + explanation="example explanation", # Optional justification for LLM judges ) + # 1. Uses PatronusEvalTool: agent can pick the best evaluator and criteria # patronus_eval_tool = PatronusEvalTool() @@ -35,7 +29,9 @@ def random_evaluator(**kwargs): # 3. Uses PatronusLocalEvaluatorTool: agent uses user defined evaluator patronus_eval_tool = PatronusLocalEvaluatorTool( - patronus_client=client, evaluator="random_evaluator", evaluated_model_gold_answer="example label" + patronus_client=client, + evaluator="random_evaluator", + evaluated_model_gold_answer="example label", ) # Create a new agent diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index 23ffe2fd4..be1f410e2 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -1,8 +1,9 @@ -import os import json -import requests +import os import warnings -from typing import Any, List, Dict, Optional +from typing import Any, Dict, List, Optional + +import requests from crewai.tools import BaseTool @@ -19,7 +20,9 @@ class PatronusEvalTool(BaseTool): self.evaluators = temp_evaluators self.criteria = temp_criteria self.description = self._generate_description() - warnings.warn("You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredefinedCriteriaEvalTool` instead.") + warnings.warn( + "You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredefinedCriteriaEvalTool` instead." + ) def _init_run(self): evaluators_set = json.loads( @@ -104,7 +107,6 @@ class PatronusEvalTool(BaseTool): evaluated_model_retrieved_context: Optional[str], evaluators: List[Dict[str, str]], ) -> Any: - # Assert correct format of evaluators evals = [] for ev in evaluators: @@ -136,4 +138,4 @@ class PatronusEvalTool(BaseTool): f"Failed to evaluate model input and output. Response status code: {response.status_code}. Reason: {response.text}" ) - return response.json() \ No newline at end of file + return response.json() diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index e65cb342d..66781c593 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,7 +1,8 @@ from typing import Any, Type + from crewai.tools import BaseTool -from pydantic import BaseModel, Field from patronus import Client +from pydantic import BaseModel, Field class FixedLocalEvaluatorToolSchema(BaseModel): @@ -24,16 +25,20 @@ class PatronusLocalEvaluatorTool(BaseTool): name: str = "Patronus Local Evaluator Tool" evaluator: str = "The registered local evaluator" evaluated_model_gold_answer: str = "The agent's gold answer" - description: str = ( - "This tool is used to evaluate the model input and output using custom function evaluators." - ) + description: str = "This tool is used to evaluate the model input and output using custom function evaluators." client: Any = None args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema class Config: arbitrary_types_allowed = True - def __init__(self, patronus_client: Client, evaluator: str, evaluated_model_gold_answer: str, **kwargs: Any): + def __init__( + self, + patronus_client: Client, + evaluator: str, + evaluated_model_gold_answer: str, + **kwargs: Any, + ): super().__init__(**kwargs) self.client = patronus_client if evaluator: @@ -79,7 +84,7 @@ class PatronusLocalEvaluatorTool(BaseTool): if isinstance(evaluated_model_gold_answer, str) else evaluated_model_gold_answer.get("description") ), - tags={}, # Optional metadata, supports arbitrary kv pairs + tags={}, # Optional metadata, supports arbitrary kv pairs ) output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" return output diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py index 28ffc2912..cf906586d 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py @@ -1,7 +1,8 @@ -import os import json +import os +from typing import Any, Dict, List, Type + import requests -from typing import Any, List, Dict, Type from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -33,9 +34,7 @@ class PatronusPredefinedCriteriaEvalTool(BaseTool): """ name: str = "Call Patronus API tool for evaluation of model inputs and outputs" - description: str = ( - """This tool calls the Patronus Evaluation API that takes the following arguments:""" - ) + description: str = """This tool calls the Patronus Evaluation API that takes the following arguments:""" evaluate_url: str = "https://api.patronus.ai/v1/evaluate" args_schema: Type[BaseModel] = FixedBaseToolSchema evaluators: List[Dict[str, str]] = [] @@ -52,7 +51,6 @@ class PatronusPredefinedCriteriaEvalTool(BaseTool): self, **kwargs: Any, ) -> Any: - evaluated_model_input = kwargs.get("evaluated_model_input") evaluated_model_output = kwargs.get("evaluated_model_output") evaluated_model_retrieved_context = kwargs.get( @@ -103,4 +101,4 @@ class PatronusPredefinedCriteriaEvalTool(BaseTool): f"Failed to evaluate model input and output. Status code: {response.status_code}. Reason: {response.text}" ) - return response.json() \ No newline at end of file + return response.json() diff --git a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py index 851593167..ad4d847b6 100644 --- a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py +++ b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py @@ -1,7 +1,9 @@ -from typing import Optional, Type -from pydantic import BaseModel, Field -from pypdf import PdfReader, PdfWriter, PageObject, ContentStream, NameObject, Font from pathlib import Path +from typing import Optional, Type + +from pydantic import BaseModel, Field +from pypdf import ContentStream, Font, NameObject, PageObject, PdfReader, PdfWriter + from crewai_tools.tools.rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py index dc75470a2..ec0207aa7 100644 --- a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py @@ -17,9 +17,7 @@ class PGSearchToolSchema(BaseModel): class PGSearchTool(RagTool): name: str = "Search a database's table content" - description: str = ( - "A tool that can be used to semantic search a query from a database table's content." - ) + description: str = "A tool that can be used to semantic search a query from a database table's content." args_schema: Type[BaseModel] = PGSearchToolSchema db_uri: str = Field(..., description="Mandatory database URI") diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index 14757d247..f1e215bf3 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -10,8 +10,6 @@ from pydantic import BaseModel, Field class FixedScrapeElementFromWebsiteToolSchema(BaseModel): """Input for ScrapeElementFromWebsiteTool.""" - pass - class ScrapeElementFromWebsiteToolSchema(FixedScrapeElementFromWebsiteToolSchema): """Input for ScrapeElementFromWebsiteTool.""" diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 8cfc5d136..0e7e25ca6 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -11,8 +11,6 @@ from pydantic import BaseModel, Field class FixedScrapeWebsiteToolSchema(BaseModel): """Input for ScrapeWebsiteTool.""" - pass - class ScrapeWebsiteToolSchema(FixedScrapeWebsiteToolSchema): """Input for ScrapeWebsiteTool.""" diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 906bf6376..29c132ea9 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -10,17 +10,14 @@ from scrapegraph_py.logger import sgai_logger class ScrapegraphError(Exception): """Base exception for Scrapegraph-related errors""" - pass class RateLimitError(ScrapegraphError): """Raised when API rate limits are exceeded""" - pass class FixedScrapegraphScrapeToolSchema(BaseModel): """Input for ScrapegraphScrapeTool when website_url is fixed.""" - pass class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): @@ -32,7 +29,7 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): description="Prompt to guide the extraction of content", ) - @validator('website_url') + @validator("website_url") def validate_url(cls, v): """Validate URL format""" try: @@ -41,13 +38,15 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): raise ValueError return v except Exception: - raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + raise ValueError( + "Invalid URL format. URL must include scheme (http/https) and domain" + ) class ScrapegraphScrapeTool(BaseTool): """ A tool that uses Scrapegraph AI to intelligently scrape website content. - + Raises: ValueError: If API key is missing or URL format is invalid RateLimitError: If API rate limits are exceeded @@ -55,7 +54,9 @@ class ScrapegraphScrapeTool(BaseTool): """ name: str = "Scrapegraph website scraper" - description: str = "A tool that uses Scrapegraph AI to intelligently scrape website content." + description: str = ( + "A tool that uses Scrapegraph AI to intelligently scrape website content." + ) args_schema: Type[BaseModel] = ScrapegraphScrapeToolSchema website_url: Optional[str] = None user_prompt: Optional[str] = None @@ -70,7 +71,7 @@ class ScrapegraphScrapeTool(BaseTool): ): super().__init__(**kwargs) self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") - + if not self.api_key: raise ValueError("Scrapegraph API key is required") @@ -79,7 +80,7 @@ class ScrapegraphScrapeTool(BaseTool): self.website_url = website_url self.description = f"A tool that uses Scrapegraph AI to intelligently scrape {website_url}'s content." self.args_schema = FixedScrapegraphScrapeToolSchema - + if user_prompt is not None: self.user_prompt = user_prompt @@ -94,22 +95,24 @@ class ScrapegraphScrapeTool(BaseTool): if not all([result.scheme, result.netloc]): raise ValueError except Exception: - raise ValueError("Invalid URL format. URL must include scheme (http/https) and domain") + raise ValueError( + "Invalid URL format. URL must include scheme (http/https) and domain" + ) def _handle_api_response(self, response: dict) -> str: """Handle and validate API response""" if not response: raise RuntimeError("Empty response from Scrapegraph API") - + if "error" in response: error_msg = response.get("error", {}).get("message", "Unknown error") if "rate limit" in error_msg.lower(): raise RateLimitError(f"Rate limit exceeded: {error_msg}") raise RuntimeError(f"API error: {error_msg}") - + if "result" not in response: raise RuntimeError("Invalid response format from Scrapegraph API") - + return response["result"] def _run( @@ -117,7 +120,10 @@ class ScrapegraphScrapeTool(BaseTool): **kwargs: Any, ) -> Any: website_url = kwargs.get("website_url", self.website_url) - user_prompt = kwargs.get("user_prompt", self.user_prompt) or "Extract the main content of the webpage" + user_prompt = ( + kwargs.get("user_prompt", self.user_prompt) + or "Extract the main content of the webpage" + ) if not website_url: raise ValueError("website_url is required") diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index d7a55428d..8099a06ab 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -17,33 +17,36 @@ class FixedSeleniumScrapingToolSchema(BaseModel): class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): """Input for SeleniumScrapingTool.""" - website_url: str = Field(..., description="Mandatory website url to read the file. Must start with http:// or https://") + website_url: str = Field( + ..., + description="Mandatory website url to read the file. Must start with http:// or https://", + ) css_element: str = Field( ..., description="Mandatory css reference for element to scrape from the website", ) - @validator('website_url') + @validator("website_url") def validate_website_url(cls, v): if not v: raise ValueError("Website URL cannot be empty") - + if len(v) > 2048: # Common maximum URL length raise ValueError("URL is too long (max 2048 characters)") - - if not re.match(r'^https?://', v): + + if not re.match(r"^https?://", v): raise ValueError("URL must start with http:// or https://") - + try: result = urlparse(v) if not all([result.scheme, result.netloc]): raise ValueError("Invalid URL format") except Exception as e: raise ValueError(f"Invalid URL: {str(e)}") - - if re.search(r'\s', v): + + if re.search(r"\s", v): raise ValueError("URL cannot contain whitespace") - + return v @@ -130,11 +133,11 @@ class SeleniumScrapingTool(BaseTool): def _create_driver(self, url, cookie, wait_time): if not url: raise ValueError("URL cannot be empty") - + # Validate URL format - if not re.match(r'^https?://', url): + if not re.match(r"^https?://", url): raise ValueError("URL must start with http:// or https://") - + options = Options() options.add_argument("--headless") driver = self.driver(options=options) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index 98491190c..895f3aadc 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -1,9 +1,10 @@ import os import re -from typing import Optional, Any, Union +from typing import Any, Optional, Union from crewai.tools import BaseTool + class SerpApiBaseTool(BaseTool): """Base class for SerpApi functionality with shared capabilities.""" diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py index 199b7f5a2..c1a877f23 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py @@ -1,14 +1,21 @@ -from typing import Any, Type, Optional +from typing import Any, Optional, Type -import re from pydantic import BaseModel, Field -from .serpapi_base_tool import SerpApiBaseTool from serpapi import HTTPError +from .serpapi_base_tool import SerpApiBaseTool + + class SerpApiGoogleSearchToolSchema(BaseModel): """Input for Google Search.""" - search_query: str = Field(..., description="Mandatory search query you want to use to Google search.") - location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to Google search." + ) + location: Optional[str] = Field( + None, description="Location you want the search to be performed in." + ) + class SerpApiGoogleSearchTool(SerpApiBaseTool): name: str = "Google Search" @@ -22,19 +29,25 @@ class SerpApiGoogleSearchTool(SerpApiBaseTool): **kwargs: Any, ) -> Any: try: - results = self.client.search({ - "q": kwargs.get("search_query"), - "location": kwargs.get("location"), - }).as_dict() + results = self.client.search( + { + "q": kwargs.get("search_query"), + "location": kwargs.get("location"), + } + ).as_dict() self._omit_fields( - results, - [r"search_metadata", r"search_parameters", r"serpapi_.+", r".+_token", r"displayed_link", r"pagination"] + results, + [ + r"search_metadata", + r"search_parameters", + r"serpapi_.+", + r".+_token", + r"displayed_link", + r"pagination", + ], ) return results except HTTPError as e: return f"An error occurred: {str(e)}. Some parameters may be invalid." - - - \ No newline at end of file diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py index b44b3a809..ec9477351 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py @@ -1,14 +1,20 @@ -from typing import Any, Type, Optional +from typing import Any, Optional, Type -import re from pydantic import BaseModel, Field -from .serpapi_base_tool import SerpApiBaseTool from serpapi import HTTPError +from .serpapi_base_tool import SerpApiBaseTool + + class SerpApiGoogleShoppingToolSchema(BaseModel): """Input for Google Shopping.""" - search_query: str = Field(..., description="Mandatory search query you want to use to Google shopping.") - location: Optional[str] = Field(None, description="Location you want the search to be performed in.") + + search_query: str = Field( + ..., description="Mandatory search query you want to use to Google shopping." + ) + location: Optional[str] = Field( + None, description="Location you want the search to be performed in." + ) class SerpApiGoogleShoppingTool(SerpApiBaseTool): @@ -23,20 +29,25 @@ class SerpApiGoogleShoppingTool(SerpApiBaseTool): **kwargs: Any, ) -> Any: try: - results = self.client.search({ - "engine": "google_shopping", - "q": kwargs.get("search_query"), - "location": kwargs.get("location") - }).as_dict() + results = self.client.search( + { + "engine": "google_shopping", + "q": kwargs.get("search_query"), + "location": kwargs.get("location"), + } + ).as_dict() self._omit_fields( - results, - [r"search_metadata", r"search_parameters", r"serpapi_.+", r"filters", r"pagination"] + results, + [ + r"search_metadata", + r"search_parameters", + r"serpapi_.+", + r"filters", + r"pagination", + ], ) return results except HTTPError as e: return f"An error occurred: {str(e)}. Some parameters may be invalid." - - - \ No newline at end of file diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index e9eab56a2..2db347190 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -1,19 +1,19 @@ import datetime import json -import os import logging +import os from typing import Any, Type import requests from crewai.tools import BaseTool from pydantic import BaseModel, Field - logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) logger = logging.getLogger(__name__) + def _save_results_to_file(content: str) -> None: """Saves the search results to a file.""" try: diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index e09a36fd9..4010236cc 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -18,9 +18,7 @@ class SerplyWebpageToMarkdownToolSchema(BaseModel): class SerplyWebpageToMarkdownTool(RagTool): name: str = "Webpage to Markdown" - description: str = ( - "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" - ) + description: str = "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" args_schema: Type[BaseModel] = SerplyWebpageToMarkdownToolSchema request_url: str = "https://api.serply.io/v1/request" proxy_location: Optional[str] = "US" diff --git a/src/crewai_tools/tools/snowflake_search_tool/README.md b/src/crewai_tools/tools/snowflake_search_tool/README.md new file mode 100644 index 000000000..fc0b845c3 --- /dev/null +++ b/src/crewai_tools/tools/snowflake_search_tool/README.md @@ -0,0 +1,155 @@ +# Snowflake Search Tool + +A tool for executing queries on Snowflake data warehouse with built-in connection pooling, retry logic, and async execution support. + +## Installation + +```bash +uv sync --extra snowflake + +OR +uv pip install snowflake-connector-python>=3.5.0 snowflake-sqlalchemy>=1.5.0 cryptography>=41.0.0 + +OR +pip install snowflake-connector-python>=3.5.0 snowflake-sqlalchemy>=1.5.0 cryptography>=41.0.0 +``` + +## Quick Start + +```python +import asyncio +from crewai_tools import SnowflakeSearchTool, SnowflakeConfig + +# Create configuration +config = SnowflakeConfig( + account="your_account", + user="your_username", + password="your_password", + warehouse="COMPUTE_WH", + database="your_database", + snowflake_schema="your_schema" # Note: Uses snowflake_schema instead of schema +) + +# Initialize tool +tool = SnowflakeSearchTool( + config=config, + pool_size=5, + max_retries=3, + enable_caching=True +) + +# Execute query +async def main(): + results = await tool._run( + query="SELECT * FROM your_table LIMIT 10", + timeout=300 + ) + print(f"Retrieved {len(results)} rows") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Features + +- ✨ Asynchronous query execution +- 🚀 Connection pooling for better performance +- 🔄 Automatic retries for transient failures +- 💾 Query result caching (optional) +- 🔒 Support for both password and key-pair authentication +- 📠Comprehensive error handling and logging + +## Configuration Options + +### SnowflakeConfig Parameters + +| Parameter | Required | Description | +|-----------|----------|-------------| +| account | Yes | Snowflake account identifier | +| user | Yes | Snowflake username | +| password | Yes* | Snowflake password | +| private_key_path | No* | Path to private key file (alternative to password) | +| warehouse | Yes | Snowflake warehouse name | +| database | Yes | Default database | +| snowflake_schema | Yes | Default schema | +| role | No | Snowflake role | +| session_parameters | No | Custom session parameters dict | + +\* Either password or private_key_path must be provided + +### Tool Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| pool_size | 5 | Number of connections in the pool | +| max_retries | 3 | Maximum retry attempts for failed queries | +| retry_delay | 1.0 | Delay between retries in seconds | +| enable_caching | True | Enable/disable query result caching | + +## Advanced Usage + +### Using Key-Pair Authentication + +```python +config = SnowflakeConfig( + account="your_account", + user="your_username", + private_key_path="/path/to/private_key.p8", + warehouse="your_warehouse", + database="your_database", + snowflake_schema="your_schema" +) +``` + +### Custom Session Parameters + +```python +config = SnowflakeConfig( + # ... other config parameters ... + session_parameters={ + "QUERY_TAG": "my_app", + "TIMEZONE": "America/Los_Angeles" + } +) +``` + +## Best Practices + +1. **Error Handling**: Always wrap query execution in try-except blocks +2. **Logging**: Enable logging to track query execution and errors +3. **Connection Management**: Use appropriate pool sizes for your workload +4. **Timeouts**: Set reasonable query timeouts to prevent hanging +5. **Security**: Use key-pair auth in production and never hardcode credentials + +## Example with Logging + +```python +import logging + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +async def main(): + try: + # ... tool initialization ... + results = await tool._run(query="SELECT * FROM table LIMIT 10") + logger.info(f"Query completed successfully. Retrieved {len(results)} rows") + except Exception as e: + logger.error(f"Query failed: {str(e)}") + raise +``` + +## Error Handling + +The tool automatically handles common Snowflake errors: +- DatabaseError +- OperationalError +- ProgrammingError +- Network timeouts +- Connection issues + +Errors are logged and retried based on your retry configuration. \ No newline at end of file diff --git a/src/crewai_tools/tools/snowflake_search_tool/__init__.py b/src/crewai_tools/tools/snowflake_search_tool/__init__.py new file mode 100644 index 000000000..abc1a45f5 --- /dev/null +++ b/src/crewai_tools/tools/snowflake_search_tool/__init__.py @@ -0,0 +1,11 @@ +from .snowflake_search_tool import ( + SnowflakeConfig, + SnowflakeSearchTool, + SnowflakeSearchToolInput, +) + +__all__ = [ + "SnowflakeSearchTool", + "SnowflakeSearchToolInput", + "SnowflakeConfig", +] diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py new file mode 100644 index 000000000..75c671d21 --- /dev/null +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -0,0 +1,201 @@ +import asyncio +import logging +from concurrent.futures import ThreadPoolExecutor +from typing import Any, Dict, List, Optional, Type + +import snowflake.connector +from crewai.tools.base_tool import BaseTool +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from pydantic import BaseModel, ConfigDict, Field, SecretStr +from snowflake.connector.connection import SnowflakeConnection +from snowflake.connector.errors import DatabaseError, OperationalError + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Cache for query results +_query_cache = {} + + +class SnowflakeConfig(BaseModel): + """Configuration for Snowflake connection.""" + + model_config = ConfigDict(protected_namespaces=()) + + account: str = Field( + ..., description="Snowflake account identifier", pattern=r"^[a-zA-Z0-9\-_]+$" + ) + user: str = Field(..., description="Snowflake username") + password: Optional[SecretStr] = Field(None, description="Snowflake password") + private_key_path: Optional[str] = Field( + None, description="Path to private key file" + ) + warehouse: Optional[str] = Field(None, description="Snowflake warehouse") + database: Optional[str] = Field(None, description="Default database") + snowflake_schema: Optional[str] = Field(None, description="Default schema") + role: Optional[str] = Field(None, description="Snowflake role") + session_parameters: Optional[Dict[str, Any]] = Field( + default_factory=dict, description="Session parameters" + ) + + @property + def has_auth(self) -> bool: + return bool(self.password or self.private_key_path) + + def model_post_init(self, *args, **kwargs): + if not self.has_auth: + raise ValueError("Either password or private_key_path must be provided") + + +class SnowflakeSearchToolInput(BaseModel): + """Input schema for SnowflakeSearchTool.""" + + model_config = ConfigDict(protected_namespaces=()) + + query: str = Field(..., description="SQL query or semantic search query to execute") + database: Optional[str] = Field(None, description="Override default database") + snowflake_schema: Optional[str] = Field(None, description="Override default schema") + timeout: Optional[int] = Field(300, description="Query timeout in seconds") + + +class SnowflakeSearchTool(BaseTool): + """Tool for executing queries and semantic search on Snowflake.""" + + name: str = "Snowflake Database Search" + description: str = ( + "Execute SQL queries or semantic search on Snowflake data warehouse. " + "Supports both raw SQL and natural language queries." + ) + args_schema: Type[BaseModel] = SnowflakeSearchToolInput + + # Define Pydantic fields + config: SnowflakeConfig = Field( + ..., description="Snowflake connection configuration" + ) + pool_size: int = Field(default=5, description="Size of connection pool") + max_retries: int = Field(default=3, description="Maximum retry attempts") + retry_delay: float = Field( + default=1.0, description="Delay between retries in seconds" + ) + enable_caching: bool = Field( + default=True, description="Enable query result caching" + ) + + model_config = ConfigDict(arbitrary_types_allowed=True) + + def __init__(self, **data): + """Initialize SnowflakeSearchTool.""" + super().__init__(**data) + self._connection_pool: List[SnowflakeConnection] = [] + self._pool_lock = asyncio.Lock() + self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) + + async def _get_connection(self) -> SnowflakeConnection: + """Get a connection from the pool or create a new one.""" + async with self._pool_lock: + if not self._connection_pool: + conn = self._create_connection() + self._connection_pool.append(conn) + return self._connection_pool.pop() + + def _create_connection(self) -> SnowflakeConnection: + """Create a new Snowflake connection.""" + conn_params = { + "account": self.config.account, + "user": self.config.user, + "warehouse": self.config.warehouse, + "database": self.config.database, + "schema": self.config.snowflake_schema, + "role": self.config.role, + "session_parameters": self.config.session_parameters, + } + + if self.config.password: + conn_params["password"] = self.config.password.get_secret_value() + elif self.config.private_key_path: + with open(self.config.private_key_path, "rb") as key_file: + p_key = serialization.load_pem_private_key( + key_file.read(), password=None, backend=default_backend() + ) + conn_params["private_key"] = p_key + + return snowflake.connector.connect(**conn_params) + + def _get_cache_key(self, query: str, timeout: int) -> str: + """Generate a cache key for the query.""" + return f"{self.config.account}:{self.config.database}:{self.config.snowflake_schema}:{query}:{timeout}" + + async def _execute_query( + self, query: str, timeout: int = 300 + ) -> List[Dict[str, Any]]: + """Execute a query with retries and return results.""" + if self.enable_caching: + cache_key = self._get_cache_key(query, timeout) + if cache_key in _query_cache: + logger.info("Returning cached result") + return _query_cache[cache_key] + + for attempt in range(self.max_retries): + try: + conn = await self._get_connection() + try: + cursor = conn.cursor() + cursor.execute(query, timeout=timeout) + + if not cursor.description: + return [] + + columns = [col[0] for col in cursor.description] + results = [dict(zip(columns, row)) for row in cursor.fetchall()] + + if self.enable_caching: + _query_cache[self._get_cache_key(query, timeout)] = results + + return results + finally: + cursor.close() + async with self._pool_lock: + self._connection_pool.append(conn) + except (DatabaseError, OperationalError) as e: + if attempt == self.max_retries - 1: + raise + await asyncio.sleep(self.retry_delay * (2**attempt)) + logger.warning(f"Query failed, attempt {attempt + 1}: {str(e)}") + continue + + async def _run( + self, + query: str, + database: Optional[str] = None, + snowflake_schema: Optional[str] = None, + timeout: int = 300, + **kwargs: Any, + ) -> Any: + """Execute the search query.""" + try: + # Override database/schema if provided + if database: + await self._execute_query(f"USE DATABASE {database}") + if snowflake_schema: + await self._execute_query(f"USE SCHEMA {snowflake_schema}") + + results = await self._execute_query(query, timeout) + return results + except Exception as e: + logger.error(f"Error executing query: {str(e)}") + raise + + def __del__(self): + """Cleanup connections on deletion.""" + try: + for conn in getattr(self, "_connection_pool", []): + try: + conn.close() + except: + pass + if hasattr(self, "_thread_pool"): + self._thread_pool.shutdown() + except: + pass diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 07c76c8c3..37b414509 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -14,9 +14,8 @@ import os from functools import lru_cache from typing import Any, Dict, List, Optional, Type, Union -from pydantic import BaseModel, Field - from crewai.tools.base_tool import BaseTool +from pydantic import BaseModel, Field # Set up logging logger = logging.getLogger(__name__) @@ -25,6 +24,7 @@ logger = logging.getLogger(__name__) STAGEHAND_AVAILABLE = False try: import stagehand + STAGEHAND_AVAILABLE = True except ImportError: pass # Keep STAGEHAND_AVAILABLE as False @@ -32,33 +32,45 @@ except ImportError: class StagehandResult(BaseModel): """Result from a Stagehand operation. - + Attributes: success: Whether the operation completed successfully data: The result data from the operation error: Optional error message if the operation failed """ - success: bool = Field(..., description="Whether the operation completed successfully") - data: Union[str, Dict, List] = Field(..., description="The result data from the operation") - error: Optional[str] = Field(None, description="Optional error message if the operation failed") + + success: bool = Field( + ..., description="Whether the operation completed successfully" + ) + data: Union[str, Dict, List] = Field( + ..., description="The result data from the operation" + ) + error: Optional[str] = Field( + None, description="Optional error message if the operation failed" + ) class StagehandToolConfig(BaseModel): """Configuration for the StagehandTool. - + Attributes: api_key: OpenAI API key for Stagehand authentication timeout: Maximum time in seconds to wait for operations (default: 30) retry_attempts: Number of times to retry failed operations (default: 3) """ + api_key: str = Field(..., description="OpenAI API key for Stagehand authentication") - timeout: int = Field(30, description="Maximum time in seconds to wait for operations") - retry_attempts: int = Field(3, description="Number of times to retry failed operations") + timeout: int = Field( + 30, description="Maximum time in seconds to wait for operations" + ) + retry_attempts: int = Field( + 3, description="Number of times to retry failed operations" + ) class StagehandToolSchema(BaseModel): """Schema for the StagehandTool input parameters. - + Examples: ```python # Using the 'act' API to click a button @@ -66,13 +78,13 @@ class StagehandToolSchema(BaseModel): api_method="act", instruction="Click the 'Sign In' button" ) - + # Using the 'extract' API to get text tool.run( api_method="extract", instruction="Get the text content of the main article" ) - + # Using the 'observe' API to monitor changes tool.run( api_method="observe", @@ -80,48 +92,49 @@ class StagehandToolSchema(BaseModel): ) ``` """ + api_method: str = Field( ..., description="The Stagehand API to use: 'act' for interactions, 'extract' for getting content, or 'observe' for monitoring changes", - pattern="^(act|extract|observe)$" + pattern="^(act|extract|observe)$", ) instruction: str = Field( ..., description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability.", min_length=1, - max_length=500 + max_length=500, ) class StagehandTool(BaseTool): """A tool for using Stagehand's AI-powered web automation capabilities. - + This tool provides access to Stagehand's three core APIs: - act: Perform web interactions (e.g., clicking buttons, filling forms) - extract: Extract information from web pages (e.g., getting text content) - observe: Monitor web page changes (e.g., watching for updates) - + Each function takes atomic instructions to increase reliability. - + Required Environment Variables: OPENAI_API_KEY: API key for OpenAI (required by Stagehand) - + Examples: ```python tool = StagehandTool() - + # Perform a web interaction result = tool.run( api_method="act", instruction="Click the 'Sign In' button" ) - + # Extract content from a page content = tool.run( api_method="extract", instruction="Get the text content of the main article" ) - + # Monitor for changes changes = tool.run( api_method="observe", @@ -129,7 +142,7 @@ class StagehandTool(BaseTool): ) ``` """ - + name: str = "StagehandTool" description: str = ( "A tool that uses Stagehand's AI-powered web automation to interact with websites. " @@ -137,27 +150,29 @@ class StagehandTool(BaseTool): "Each instruction should be atomic (simple and specific) to increase reliability." ) args_schema: Type[BaseModel] = StagehandToolSchema - - def __init__(self, config: StagehandToolConfig | None = None, **kwargs: Any) -> None: + + def __init__( + self, config: StagehandToolConfig | None = None, **kwargs: Any + ) -> None: """Initialize the StagehandTool. - + Args: config: Optional configuration for the tool. If not provided, will attempt to use OPENAI_API_KEY from environment. **kwargs: Additional keyword arguments passed to the base class. - + Raises: ImportError: If the stagehand package is not installed ValueError: If no API key is provided via config or environment """ super().__init__(**kwargs) - + if not STAGEHAND_AVAILABLE: raise ImportError( "The 'stagehand' package is required to use this tool. " "Please install it with: pip install stagehand" ) - + # Use config if provided, otherwise try environment variable if config is not None: self.config = config @@ -168,24 +183,22 @@ class StagehandTool(BaseTool): "Either provide config with api_key or set OPENAI_API_KEY environment variable" ) self.config = StagehandToolConfig( - api_key=api_key, - timeout=30, - retry_attempts=3 + api_key=api_key, timeout=30, retry_attempts=3 ) - + @lru_cache(maxsize=100) def _cached_run(self, api_method: str, instruction: str) -> Any: """Execute a cached Stagehand command. - + This method is cached to improve performance for repeated operations. - + Args: api_method: The Stagehand API to use ('act', 'extract', or 'observe') instruction: An atomic instruction for Stagehand to execute - + Returns: The raw result from the Stagehand API call - + Raises: ValueError: If an invalid api_method is provided Exception: If the Stagehand API call fails @@ -193,23 +206,25 @@ class StagehandTool(BaseTool): logger.debug( "Cache operation - Method: %s, Instruction length: %d", api_method, - len(instruction) + len(instruction), ) - + # Initialize Stagehand with configuration logger.info( "Initializing Stagehand (timeout=%ds, retries=%d)", self.config.timeout, - self.config.retry_attempts + self.config.retry_attempts, ) st = stagehand.Stagehand( api_key=self.config.api_key, timeout=self.config.timeout, - retry_attempts=self.config.retry_attempts + retry_attempts=self.config.retry_attempts, ) - + # Call the appropriate Stagehand API based on the method - logger.info("Executing %s operation with instruction: %s", api_method, instruction[:100]) + logger.info( + "Executing %s operation with instruction: %s", api_method, instruction[:100] + ) try: if api_method == "act": result = st.act(instruction) @@ -219,28 +234,27 @@ class StagehandTool(BaseTool): result = st.observe(instruction) else: raise ValueError(f"Unknown api_method: {api_method}") - - + logger.info("Successfully executed %s operation", api_method) return result - + except Exception as e: logger.warning( "Operation failed (method=%s, error=%s), will be retried on next attempt", api_method, - str(e) + str(e), ) raise def _run(self, api_method: str, instruction: str, **kwargs: Any) -> StagehandResult: """Execute a Stagehand command using the specified API method. - + Args: api_method: The Stagehand API to use ('act', 'extract', or 'observe') instruction: An atomic instruction for Stagehand to execute **kwargs: Additional keyword arguments passed to the Stagehand API - - Returns: + + Returns: StagehandResult containing the operation result and status """ try: @@ -249,56 +263,36 @@ class StagehandTool(BaseTool): "Starting operation - Method: %s, Instruction length: %d, Args: %s", api_method, len(instruction), - kwargs + kwargs, ) - + # Use cached execution result = self._cached_run(api_method, instruction) logger.info("Operation completed successfully") return StagehandResult(success=True, data=result) - + except stagehand.AuthenticationError as e: logger.error( - "Authentication failed - Method: %s, Error: %s", - api_method, - str(e) + "Authentication failed - Method: %s, Error: %s", api_method, str(e) ) return StagehandResult( - success=False, - data={}, - error=f"Authentication failed: {str(e)}" + success=False, data={}, error=f"Authentication failed: {str(e)}" ) except stagehand.APIError as e: - logger.error( - "API error - Method: %s, Error: %s", - api_method, - str(e) - ) - return StagehandResult( - success=False, - data={}, - error=f"API error: {str(e)}" - ) + logger.error("API error - Method: %s, Error: %s", api_method, str(e)) + return StagehandResult(success=False, data={}, error=f"API error: {str(e)}") except stagehand.BrowserError as e: - logger.error( - "Browser error - Method: %s, Error: %s", - api_method, - str(e) - ) + logger.error("Browser error - Method: %s, Error: %s", api_method, str(e)) return StagehandResult( - success=False, - data={}, - error=f"Browser error: {str(e)}" + success=False, data={}, error=f"Browser error: {str(e)}" ) except Exception as e: logger.error( "Unexpected error - Method: %s, Error type: %s, Message: %s", api_method, type(e).__name__, - str(e) + str(e), ) return StagehandResult( - success=False, - data={}, - error=f"Unexpected error: {str(e)}" + success=False, data={}, error=f"Unexpected error: {str(e)}" ) diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index 4fbc1df0e..594be0b22 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -1,30 +1,36 @@ import base64 -from typing import Type, Optional from pathlib import Path +from typing import Optional, Type + from crewai.tools import BaseTool from openai import OpenAI from pydantic import BaseModel, validator + class ImagePromptSchema(BaseModel): """Input for Vision Tool.""" + image_path_url: str = "The image path or URL." @validator("image_path_url") def validate_image_path_url(cls, v: str) -> str: if v.startswith("http"): return v - + path = Path(v) if not path.exists(): raise ValueError(f"Image file does not exist: {v}") - + # Validate supported formats valid_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"} if path.suffix.lower() not in valid_extensions: - raise ValueError(f"Unsupported image format. Supported formats: {valid_extensions}") - + raise ValueError( + f"Unsupported image format. Supported formats: {valid_extensions}" + ) + return v + class VisionTool(BaseTool): name: str = "Vision Tool" description: str = ( @@ -45,10 +51,10 @@ class VisionTool(BaseTool): image_path_url = kwargs.get("image_path_url") if not image_path_url: return "Image Path or URL is required." - + # Validate input using Pydantic ImagePromptSchema(image_path_url=image_path_url) - + if image_path_url.startswith("http"): image_data = image_path_url else: @@ -68,12 +74,12 @@ class VisionTool(BaseTool): { "type": "image_url", "image_url": {"url": image_data}, - } + }, ], } ], max_tokens=300, - ) + ) return response.choices[0].message.content diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index 14e10d7c5..53f641272 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -15,9 +15,8 @@ except ImportError: Vectorizers = Any Auth = Any -from pydantic import BaseModel, Field - from crewai.tools import BaseTool +from pydantic import BaseModel, Field class WeaviateToolSchema(BaseModel): diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index faa1a02e8..842462546 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -25,9 +25,7 @@ class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema): class WebsiteSearchTool(RagTool): name: str = "Search in a specific website" - description: str = ( - "A tool that can be used to semantic search a query from a specific URL content." - ) + description: str = "A tool that can be used to semantic search a query from a specific URL content." args_schema: Type[BaseModel] = WebsiteSearchToolSchema def __init__(self, website: Optional[str] = None, **kwargs): diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index b0c6209f1..81ecc30c3 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -25,9 +25,7 @@ class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema): class YoutubeChannelSearchTool(RagTool): name: str = "Search a Youtube Channels content" - description: str = ( - "A tool that can be used to semantic search a query from a Youtube Channels content." - ) + description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 6852fafb4..1ad8434c8 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -25,9 +25,7 @@ class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema): class YoutubeVideoSearchTool(RagTool): name: str = "Search a Youtube Video content" - description: str = ( - "A tool that can be used to semantic search a query from a Youtube Video content." - ) + description: str = "A tool that can be used to semantic search a query from a Youtube Video content." args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): diff --git a/tests/base_tool_test.py b/tests/base_tool_test.py index 4a4e40783..e6f4f127d 100644 --- a/tests/base_tool_test.py +++ b/tests/base_tool_test.py @@ -1,69 +1,104 @@ from typing import Callable + from crewai.tools import BaseTool, tool from crewai.tools.base_tool import to_langchain + def test_creating_a_tool_using_annotation(): - @tool("Name of my tool") - def my_tool(question: str) -> str: - """Clear description for what this tool is useful for, you agent will need this information to use it.""" - return question + @tool("Name of my tool") + def my_tool(question: str) -> str: + """Clear description for what this tool is useful for, you agent will need this information to use it.""" + return question - # Assert all the right attributes were defined - assert my_tool.name == "Name of my tool" - assert my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." - assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert my_tool.func("What is the meaning of life?") == "What is the meaning of life?" + # Assert all the right attributes were defined + assert my_tool.name == "Name of my tool" + assert ( + my_tool.description + == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." + ) + assert my_tool.args_schema.schema()["properties"] == { + "question": {"title": "Question", "type": "string"} + } + assert ( + my_tool.func("What is the meaning of life?") == "What is the meaning of life?" + ) + + # Assert the langchain tool conversion worked as expected + converted_tool = to_langchain([my_tool])[0] + assert converted_tool.name == "Name of my tool" + assert ( + converted_tool.description + == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." + ) + assert converted_tool.args_schema.schema()["properties"] == { + "question": {"title": "Question", "type": "string"} + } + assert ( + converted_tool.func("What is the meaning of life?") + == "What is the meaning of life?" + ) - # Assert the langchain tool conversion worked as expected - converted_tool = to_langchain([my_tool])[0] - assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." - assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert converted_tool.func("What is the meaning of life?") == "What is the meaning of life?" def test_creating_a_tool_using_baseclass(): - class MyCustomTool(BaseTool): - name: str = "Name of my tool" - description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." - def _run(self, question: str) -> str: - return question + def _run(self, question: str) -> str: + return question - my_tool = MyCustomTool() - # Assert all the right attributes were defined - assert my_tool.name == "Name of my tool" - assert my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." - assert my_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert my_tool._run("What is the meaning of life?") == "What is the meaning of life?" + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.name == "Name of my tool" + assert ( + my_tool.description + == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." + ) + assert my_tool.args_schema.schema()["properties"] == { + "question": {"title": "Question", "type": "string"} + } + assert ( + my_tool._run("What is the meaning of life?") == "What is the meaning of life?" + ) + + # Assert the langchain tool conversion worked as expected + converted_tool = to_langchain([my_tool])[0] + assert converted_tool.name == "Name of my tool" + assert ( + converted_tool.description + == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." + ) + assert converted_tool.args_schema.schema()["properties"] == { + "question": {"title": "Question", "type": "string"} + } + assert ( + converted_tool.invoke({"question": "What is the meaning of life?"}) + == "What is the meaning of life?" + ) - # Assert the langchain tool conversion worked as expected - converted_tool = to_langchain([my_tool])[0] - assert converted_tool.name == "Name of my tool" - assert converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." - assert converted_tool.args_schema.schema()["properties"] == {'question': {'title': 'Question', 'type': 'string'}} - assert converted_tool.invoke({"question": "What is the meaning of life?"}) == "What is the meaning of life?" def test_setting_cache_function(): - class MyCustomTool(BaseTool): - name: str = "Name of my tool" - description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." - cache_function: Callable = lambda: False + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + cache_function: Callable = lambda: False - def _run(self, question: str) -> str: - return question + def _run(self, question: str) -> str: + return question + + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.cache_function() == False - my_tool = MyCustomTool() - # Assert all the right attributes were defined - assert my_tool.cache_function() == False def test_default_cache_function_is_true(): - class MyCustomTool(BaseTool): - name: str = "Name of my tool" - description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + class MyCustomTool(BaseTool): + name: str = "Name of my tool" + description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." - def _run(self, question: str) -> str: - return question + def _run(self, question: str) -> str: + return question - my_tool = MyCustomTool() - # Assert all the right attributes were defined - assert my_tool.cache_function() == True \ No newline at end of file + my_tool = MyCustomTool() + # Assert all the right attributes were defined + assert my_tool.cache_function() == True diff --git a/tests/file_read_tool_test.py b/tests/file_read_tool_test.py index 4646df24c..5957f863b 100644 --- a/tests/file_read_tool_test.py +++ b/tests/file_read_tool_test.py @@ -1,7 +1,8 @@ import os -import pytest + from crewai_tools import FileReadTool + def test_file_read_tool_constructor(): """Test FileReadTool initialization with file_path.""" # Create a temporary test file @@ -18,6 +19,7 @@ def test_file_read_tool_constructor(): # Clean up os.remove(test_file) + def test_file_read_tool_run(): """Test FileReadTool _run method with file_path at runtime.""" # Create a temporary test file @@ -34,6 +36,7 @@ def test_file_read_tool_run(): # Clean up os.remove(test_file) + def test_file_read_tool_error_handling(): """Test FileReadTool error handling.""" # Test missing file path @@ -58,6 +61,7 @@ def test_file_read_tool_error_handling(): os.chmod(test_file, 0o666) # Restore permissions to delete os.remove(test_file) + def test_file_read_tool_constructor_and_run(): """Test FileReadTool using both constructor and runtime file paths.""" # Create two test files diff --git a/tests/it/tools/__init__.py b/tests/it/tools/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/it/tools/conftest.py b/tests/it/tools/conftest.py new file mode 100644 index 000000000..a633c22c7 --- /dev/null +++ b/tests/it/tools/conftest.py @@ -0,0 +1,21 @@ +import pytest + + +def pytest_configure(config): + """Register custom markers.""" + config.addinivalue_line("markers", "integration: mark test as an integration test") + config.addinivalue_line("markers", "asyncio: mark test as an async test") + + # Set the asyncio loop scope through ini configuration + config.inicfg["asyncio_mode"] = "auto" + + +@pytest.fixture(scope="function") +def event_loop(): + """Create an instance of the default event loop for each test case.""" + import asyncio + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + yield loop + loop.close() diff --git a/tests/it/tools/snowflake_search_tool_test.py b/tests/it/tools/snowflake_search_tool_test.py new file mode 100644 index 000000000..70dc07953 --- /dev/null +++ b/tests/it/tools/snowflake_search_tool_test.py @@ -0,0 +1,219 @@ +import asyncio +import json +from decimal import Decimal + +import pytest +from snowflake.connector.errors import DatabaseError, OperationalError + +from crewai_tools import SnowflakeConfig, SnowflakeSearchTool + +# Test Data +MENU_ITEMS = [ + (10001, "Ice Cream", "Freezing Point", "Lemonade", "Beverage", "Cold Option", 1, 4), + ( + 10002, + "Ice Cream", + "Freezing Point", + "Vanilla Ice Cream", + "Dessert", + "Ice Cream", + 2, + 6, + ), +] + +INVALID_QUERIES = [ + ("SELECT * FROM nonexistent_table", "relation 'nonexistent_table' does not exist"), + ("SELECT invalid_column FROM menu", "invalid identifier 'invalid_column'"), + ("INVALID SQL QUERY", "SQL compilation error"), +] + + +# Integration Test Fixtures +@pytest.fixture +def config(): + """Create a Snowflake configuration with test credentials.""" + return SnowflakeConfig( + account="lwyhjun-wx11931", + user="crewgitci", + password="crewaiT00ls_publicCIpass123", + warehouse="COMPUTE_WH", + database="tasty_bytes_sample_data", + snowflake_schema="raw_pos", + ) + + +@pytest.fixture +def snowflake_tool(config): + """Create a SnowflakeSearchTool instance.""" + return SnowflakeSearchTool(config=config) + + +# Integration Tests with Real Snowflake Connection +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "menu_id,expected_type,brand,item_name,category,subcategory,cost,price", MENU_ITEMS +) +async def test_menu_items( + snowflake_tool, + menu_id, + expected_type, + brand, + item_name, + category, + subcategory, + cost, + price, +): + """Test menu items with parameterized data for multiple test cases.""" + results = await snowflake_tool._run( + query=f"SELECT * FROM menu WHERE menu_id = {menu_id}" + ) + assert len(results) == 1 + menu_item = results[0] + + # Validate all fields + assert menu_item["MENU_ID"] == menu_id + assert menu_item["MENU_TYPE"] == expected_type + assert menu_item["TRUCK_BRAND_NAME"] == brand + assert menu_item["MENU_ITEM_NAME"] == item_name + assert menu_item["ITEM_CATEGORY"] == category + assert menu_item["ITEM_SUBCATEGORY"] == subcategory + assert menu_item["COST_OF_GOODS_USD"] == cost + assert menu_item["SALE_PRICE_USD"] == price + + # Validate health metrics JSON structure + health_metrics = json.loads(menu_item["MENU_ITEM_HEALTH_METRICS_OBJ"]) + assert "menu_item_health_metrics" in health_metrics + metrics = health_metrics["menu_item_health_metrics"][0] + assert "ingredients" in metrics + assert isinstance(metrics["ingredients"], list) + assert all(isinstance(ingredient, str) for ingredient in metrics["ingredients"]) + assert metrics["is_dairy_free_flag"] in ["Y", "N"] + + +@pytest.mark.integration +@pytest.mark.asyncio +async def test_menu_categories_aggregation(snowflake_tool): + """Test complex aggregation query on menu categories with detailed validations.""" + results = await snowflake_tool._run( + query=""" + SELECT + item_category, + COUNT(*) as item_count, + AVG(sale_price_usd) as avg_price, + SUM(sale_price_usd - cost_of_goods_usd) as total_margin, + COUNT(DISTINCT menu_type) as menu_type_count, + MIN(sale_price_usd) as min_price, + MAX(sale_price_usd) as max_price + FROM menu + GROUP BY item_category + HAVING COUNT(*) > 1 + ORDER BY item_count DESC + """ + ) + + assert len(results) > 0 + for category in results: + # Basic presence checks + assert all( + key in category + for key in [ + "ITEM_CATEGORY", + "ITEM_COUNT", + "AVG_PRICE", + "TOTAL_MARGIN", + "MENU_TYPE_COUNT", + "MIN_PRICE", + "MAX_PRICE", + ] + ) + + # Value validations + assert category["ITEM_COUNT"] > 1 # Due to HAVING clause + assert category["MIN_PRICE"] <= category["MAX_PRICE"] + assert category["AVG_PRICE"] >= category["MIN_PRICE"] + assert category["AVG_PRICE"] <= category["MAX_PRICE"] + assert category["MENU_TYPE_COUNT"] >= 1 + assert isinstance(category["TOTAL_MARGIN"], (float, Decimal)) + + +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize("invalid_query,expected_error", INVALID_QUERIES) +async def test_invalid_queries(snowflake_tool, invalid_query, expected_error): + """Test error handling for invalid queries.""" + with pytest.raises((DatabaseError, OperationalError)) as exc_info: + await snowflake_tool._run(query=invalid_query) + assert expected_error.lower() in str(exc_info.value).lower() + + +@pytest.mark.integration +@pytest.mark.asyncio +async def test_concurrent_queries(snowflake_tool): + """Test handling of concurrent queries.""" + queries = [ + "SELECT COUNT(*) FROM menu", + "SELECT COUNT(DISTINCT menu_type) FROM menu", + "SELECT COUNT(DISTINCT item_category) FROM menu", + ] + + tasks = [snowflake_tool._run(query=query) for query in queries] + results = await asyncio.gather(*tasks) + + assert len(results) == 3 + assert all(isinstance(result, list) for result in results) + assert all(len(result) == 1 for result in results) + assert all(isinstance(result[0], dict) for result in results) + + +@pytest.mark.integration +@pytest.mark.asyncio +async def test_query_timeout(snowflake_tool): + """Test query timeout handling with a complex query.""" + with pytest.raises((DatabaseError, OperationalError)) as exc_info: + await snowflake_tool._run( + query=""" + WITH RECURSIVE numbers AS ( + SELECT 1 as n + UNION ALL + SELECT n + 1 + FROM numbers + WHERE n < 1000000 + ) + SELECT COUNT(*) FROM numbers + """ + ) + assert ( + "timeout" in str(exc_info.value).lower() + or "execution time" in str(exc_info.value).lower() + ) + + +@pytest.mark.integration +@pytest.mark.asyncio +async def test_caching_behavior(snowflake_tool): + """Test query caching behavior and performance.""" + query = "SELECT * FROM menu LIMIT 5" + + # First execution + start_time = asyncio.get_event_loop().time() + results1 = await snowflake_tool._run(query=query) + first_duration = asyncio.get_event_loop().time() - start_time + + # Second execution (should be cached) + start_time = asyncio.get_event_loop().time() + results2 = await snowflake_tool._run(query=query) + second_duration = asyncio.get_event_loop().time() - start_time + + # Verify results + assert results1 == results2 + assert len(results1) == 5 + assert second_duration < first_duration + + # Verify cache invalidation with different query + different_query = "SELECT * FROM menu LIMIT 10" + different_results = await snowflake_tool._run(query=different_query) + assert len(different_results) == 10 + assert different_results != results1 diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py index 264394777..7f5613fe6 100644 --- a/tests/spider_tool_test.py +++ b/tests/spider_tool_test.py @@ -1,5 +1,7 @@ +from crewai import Agent, Crew, Task + from crewai_tools.tools.spider_tool.spider_tool import SpiderTool -from crewai import Agent, Task, Crew + def test_spider_tool(): spider_tool = SpiderTool() @@ -10,38 +12,35 @@ def test_spider_tool(): backstory="An expert web researcher that uses the web extremely well", tools=[spider_tool], verbose=True, - cache=False + cache=False, ) choose_between_scrape_crawl = Task( description="Scrape the page of spider.cloud and return a summary of how fast it is", expected_output="spider.cloud is a fast scraping and crawling tool", - agent=searcher + agent=searcher, ) return_metadata = Task( description="Scrape https://spider.cloud with a limit of 1 and enable metadata", expected_output="Metadata and 10 word summary of spider.cloud", - agent=searcher + agent=searcher, ) css_selector = Task( description="Scrape one page of spider.cloud with the `body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1` CSS selector", expected_output="The content of the element with the css selector body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1", - agent=searcher + agent=searcher, ) crew = Crew( agents=[searcher], - tasks=[ - choose_between_scrape_crawl, - return_metadata, - css_selector - ], - verbose=True + tasks=[choose_between_scrape_crawl, return_metadata, css_selector], + verbose=True, ) crew.kickoff() + if __name__ == "__main__": test_spider_tool() diff --git a/tests/tools/snowflake_search_tool_test.py b/tests/tools/snowflake_search_tool_test.py new file mode 100644 index 000000000..d4851b8ab --- /dev/null +++ b/tests/tools/snowflake_search_tool_test.py @@ -0,0 +1,103 @@ +import asyncio +from unittest.mock import MagicMock, patch + +import pytest + +from crewai_tools import SnowflakeConfig, SnowflakeSearchTool + + +# Unit Test Fixtures +@pytest.fixture +def mock_snowflake_connection(): + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_cursor.description = [("col1",), ("col2",)] + mock_cursor.fetchall.return_value = [(1, "value1"), (2, "value2")] + mock_cursor.execute.return_value = None + mock_conn.cursor.return_value = mock_cursor + return mock_conn + + +@pytest.fixture +def mock_config(): + return SnowflakeConfig( + account="test_account", + user="test_user", + password="test_password", + warehouse="test_warehouse", + database="test_db", + snowflake_schema="test_schema", + ) + + +@pytest.fixture +def snowflake_tool(mock_config): + with patch("snowflake.connector.connect") as mock_connect: + tool = SnowflakeSearchTool(config=mock_config) + yield tool + + +# Unit Tests +@pytest.mark.asyncio +async def test_successful_query_execution(snowflake_tool, mock_snowflake_connection): + with patch.object(snowflake_tool, "_create_connection") as mock_create_conn: + mock_create_conn.return_value = mock_snowflake_connection + + results = await snowflake_tool._run( + query="SELECT * FROM test_table", timeout=300 + ) + + assert len(results) == 2 + assert results[0]["col1"] == 1 + assert results[0]["col2"] == "value1" + mock_snowflake_connection.cursor.assert_called_once() + + +@pytest.mark.asyncio +async def test_connection_pooling(snowflake_tool, mock_snowflake_connection): + with patch.object(snowflake_tool, "_create_connection") as mock_create_conn: + mock_create_conn.return_value = mock_snowflake_connection + + # Execute multiple queries + await asyncio.gather( + snowflake_tool._run("SELECT 1"), + snowflake_tool._run("SELECT 2"), + snowflake_tool._run("SELECT 3"), + ) + + # Should reuse connections from pool + assert mock_create_conn.call_count <= snowflake_tool.pool_size + + +@pytest.mark.asyncio +async def test_cleanup_on_deletion(snowflake_tool, mock_snowflake_connection): + with patch.object(snowflake_tool, "_create_connection") as mock_create_conn: + mock_create_conn.return_value = mock_snowflake_connection + + # Add connection to pool + await snowflake_tool._get_connection() + + # Return connection to pool + async with snowflake_tool._pool_lock: + snowflake_tool._connection_pool.append(mock_snowflake_connection) + + # Trigger cleanup + snowflake_tool.__del__() + + mock_snowflake_connection.close.assert_called_once() + + +def test_config_validation(): + # Test missing required fields + with pytest.raises(ValueError): + SnowflakeConfig() + + # Test invalid account format + with pytest.raises(ValueError): + SnowflakeConfig( + account="invalid//account", user="test_user", password="test_pass" + ) + + # Test missing authentication + with pytest.raises(ValueError): + SnowflakeConfig(account="test_account", user="test_user") diff --git a/tests/tools/test_code_interpreter_tool.py b/tests/tools/test_code_interpreter_tool.py index 6470c9dc1..e281fffaf 100644 --- a/tests/tools/test_code_interpreter_tool.py +++ b/tests/tools/test_code_interpreter_tool.py @@ -7,7 +7,9 @@ from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( class TestCodeInterpreterTool(unittest.TestCase): - @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") + @patch( + "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" + ) def test_run_code_in_docker(self, docker_mock): tool = CodeInterpreterTool() code = "print('Hello, World!')" @@ -15,14 +17,14 @@ class TestCodeInterpreterTool(unittest.TestCase): expected_output = "Hello, World!\n" docker_mock().containers.run().exec_run().exit_code = 0 - docker_mock().containers.run().exec_run().output = ( - expected_output.encode() - ) + docker_mock().containers.run().exec_run().output = expected_output.encode() result = tool.run_code_in_docker(code, libraries_used) self.assertEqual(result, expected_output) - @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") + @patch( + "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" + ) def test_run_code_in_docker_with_error(self, docker_mock): tool = CodeInterpreterTool() code = "print(1/0)" @@ -37,7 +39,9 @@ class TestCodeInterpreterTool(unittest.TestCase): self.assertEqual(result, expected_output) - @patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") + @patch( + "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" + ) def test_run_code_in_docker_with_script(self, docker_mock): tool = CodeInterpreterTool() code = """print("This is line 1") From a606f48b70b346e70bf3bbfdad78290367f4469b Mon Sep 17 00:00:00 2001 From: ArchiusVuong-sudo Date: Sat, 18 Jan 2025 21:58:50 +0700 Subject: [PATCH 260/391] FIX: Fix HTTPError cannot be found in serperai --- .../tools/serpapi_tool/serpapi_google_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py index 199b7f5a2..f8edd6458 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py @@ -3,7 +3,7 @@ from typing import Any, Type, Optional import re from pydantic import BaseModel, Field from .serpapi_base_tool import SerpApiBaseTool -from serpapi import HTTPError +from urllib.error import HTTPError class SerpApiGoogleSearchToolSchema(BaseModel): """Input for Google Search.""" From 659cb6279e2b2833fea0d4c8da4946160100befd Mon Sep 17 00:00:00 2001 From: ArchiusVuong-sudo Date: Sat, 18 Jan 2025 23:01:01 +0700 Subject: [PATCH 261/391] fix: Fixed all from urllib.error import HTTPError --- .../tools/serpapi_tool/serpapi_google_shopping_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py index b44b3a809..5863239c5 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py @@ -3,7 +3,7 @@ from typing import Any, Type, Optional import re from pydantic import BaseModel, Field from .serpapi_base_tool import SerpApiBaseTool -from serpapi import HTTPError +from urllib.error import HTTPError class SerpApiGoogleShoppingToolSchema(BaseModel): """Input for Google Shopping.""" From 4af3724ec203e88a9578ce4456f24fa026926e7f Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Tue, 21 Jan 2025 15:55:05 -0800 Subject: [PATCH 262/391] fix selenium tool --- .../tools/selenium_scraping_tool/selenium_scraping_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index b21d259f5..240269756 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -163,7 +163,7 @@ class SeleniumScrapingTool(BaseTool): if not re.match(r"^https?://", url): raise ValueError("URL must start with http:// or https://") - options = Options() + options = self._options options.add_argument("--headless") driver = self.driver(options=options) driver.get(url) From b4d98bbb86097f068581d9949facf72bad5d081c Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 22 Jan 2025 20:22:57 -0800 Subject: [PATCH 263/391] OCR Tool v1 --- src/crewai_tools/tools/ocr_tool/README.md | 42 +++++++ src/crewai_tools/tools/ocr_tool/ocr_tool.py | 126 ++++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 src/crewai_tools/tools/ocr_tool/README.md create mode 100644 src/crewai_tools/tools/ocr_tool/ocr_tool.py diff --git a/src/crewai_tools/tools/ocr_tool/README.md b/src/crewai_tools/tools/ocr_tool/README.md new file mode 100644 index 000000000..f5375ca18 --- /dev/null +++ b/src/crewai_tools/tools/ocr_tool/README.md @@ -0,0 +1,42 @@ +# OCR Tool + +## Description + +This tool performs Optical Character Recognition (OCR) on images using supported LLMs. It can extract text from both local image files and images available via URLs. The tool leverages the LLM's vision capabilities to provide accurate text extraction from images. + +## Installation +Install the crewai_tools package +```shell +pip install 'crewai[tools]' +``` + +## Supported LLMs + +Any LLM that supports the `vision` feature should work. It must accept image_url as a user message. +The tool has been tested with: +- OpenAI's `gpt-4o` +- Gemini's `gemini/gemini-1.5-pro` + +## Usage + +In order to use the OCRTool, make sure your LLM supports the `vision` feature and the appropriate API key is set in the environment (e.g., `OPENAI_API_KEY` for OpenAI). + +```python +from crewai_tools import OCRTool + +selected_llm = LLM(model="gpt-4o") # select your LLM, the tool has been tested with gpt-4o and gemini/gemini-1.5-pro + +ocr_tool = OCRTool(llm=selected_llm) + +@agent +def researcher(self) -> Agent: + return Agent( + config=self.agents_config["researcher"], + allow_delegation=False, + tools=[ocr_tool] + ) +``` + +The tool accepts either a local file path or a URL to the image: +- For local files, provide the absolute or relative path +- For remote images, provide the complete URL starting with 'http' or 'https' diff --git a/src/crewai_tools/tools/ocr_tool/ocr_tool.py b/src/crewai_tools/tools/ocr_tool/ocr_tool.py new file mode 100644 index 000000000..aabe0ffbd --- /dev/null +++ b/src/crewai_tools/tools/ocr_tool/ocr_tool.py @@ -0,0 +1,126 @@ +""" +Optical Character Recognition (OCR) Tool + +This tool provides functionality for extracting text from images using supported LLMs. Make sure your model supports the `vision` feature. +""" + +import base64 +from typing import Optional, Type + +from openai import OpenAI +from pydantic import BaseModel, PrivateAttr + +from crewai.tools.base_tool import BaseTool +from crewai import LLM + + +class OCRToolSchema(BaseModel): + """Input schema for Optical Character Recognition Tool. + + Attributes: + image_path_url (str): Path to a local image file or URL of an image. + For local files, provide the absolute or relative path. + For remote images, provide the complete URL starting with 'http' or 'https'. + """ + + image_path_url: str = "The image path or URL." + + +class OCRTool(BaseTool): + """A tool for performing Optical Character Recognition on images. + + This tool leverages LLMs to extract text from images. It can process + both local image files and images available via URLs. + + Attributes: + name (str): Name of the tool. + description (str): Description of the tool's functionality. + args_schema (Type[BaseModel]): Pydantic schema for input validation. + + Private Attributes: + _llm (Optional[LLM]): Language model instance for making API calls. + """ + + name: str = "Optical Character Recognition Tool" + description: str = ( + "This tool uses an LLM's API to extract text from an image file." + ) + _llm: Optional[LLM] = PrivateAttr(default=None) + + args_schema: Type[BaseModel] = OCRToolSchema + + def __init__(self, llm: LLM = None, **kwargs): + """Initialize the OCR tool. + + Args: + llm (LLM, optional): Language model instance to use for API calls. + If not provided, a default LLM with gpt-4o model will be used. + **kwargs: Additional arguments passed to the parent class. + """ + super().__init__(**kwargs) + + if llm is None: + # Use the default LLM + llm = LLM( + model="gpt-4o", + temperature=0.7, + ) + + self._llm = llm + + def _run(self, **kwargs) -> str: + """Execute the OCR operation on the provided image. + + Args: + **kwargs: Keyword arguments containing the image_path_url. + + Returns: + str: Extracted text from the image. + If no image path/URL is provided, returns an error message. + + Note: + The method handles both local image files and remote URLs: + - For local files: The image is read and encoded to base64 + - For URLs: The URL is passed directly to the Vision API + """ + image_path_url = kwargs.get("image_path_url") + + if not image_path_url: + return "Image Path or URL is required." + + if image_path_url.startswith("http"): + image_data = image_path_url + else: + base64_image = self._encode_image(image_path_url) + image_data = f"data:image/jpeg;base64,{base64_image}" + + messages=[ + { + "role": "system", + "content": "You are an expert OCR specialist. Extract complete text from the provided image. Provide the result as a raw text." + }, + { + "role": "user", + "content": [ + { + "type": "image_url", + "image_url": {"url": image_data}, + } + ], + } + ] + + response = self._llm.call(messages=messages) + return response + + def _encode_image(self, image_path: str): + """Encode an image file to base64 format. + + Args: + image_path (str): Path to the local image file. + + Returns: + str: Base64-encoded image data as a UTF-8 string. + """ + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") From 43d045f542480ca5483e4178c410b71b5f9c27f2 Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Thu, 23 Jan 2025 14:43:52 -0500 Subject: [PATCH 264/391] Fix for GUI --- .../patronus_local_evaluator_tool.py | 101 ++++++++---------- .../snowflake_search_tool.py | 100 +++++++++++++---- 2 files changed, 123 insertions(+), 78 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index 053314b48..54dde463d 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,16 +1,17 @@ -from typing import Any, Type +from typing import TYPE_CHECKING, Any, Type from crewai.tools import BaseTool -from patronus import Client from pydantic import BaseModel, Field +if TYPE_CHECKING: + from patronus import Client, EvaluationResult + try: - from patronus import Client + import patronus PYPATRONUS_AVAILABLE = True except ImportError: PYPATRONUS_AVAILABLE = False - Client = Any class FixedLocalEvaluatorToolSchema(BaseModel): @@ -31,59 +32,49 @@ class FixedLocalEvaluatorToolSchema(BaseModel): class PatronusLocalEvaluatorTool(BaseTool): name: str = "Patronus Local Evaluator Tool" - evaluator: str = "The registered local evaluator" - evaluated_model_gold_answer: str = "The agent's gold answer" - description: str = "This tool is used to evaluate the model input and output using custom function evaluators." - description: str = "This tool is used to evaluate the model input and output using custom function evaluators." - client: Any = None + description: str = ( + "This tool is used to evaluate the model input and output using custom function evaluators." + ) args_schema: Type[BaseModel] = FixedLocalEvaluatorToolSchema + client: "Client" = None + evaluator: str + evaluated_model_gold_answer: str class Config: arbitrary_types_allowed = True def __init__( self, - patronus_client: Client, - evaluator: str, - evaluated_model_gold_answer: str, - **kwargs: Any, - ): - def __init__( - self, - patronus_client: Client, - evaluator: str, - evaluated_model_gold_answer: str, + patronus_client: "Client" = None, + evaluator: str = "", + evaluated_model_gold_answer: str = "", **kwargs: Any, ): super().__init__(**kwargs) if PYPATRONUS_AVAILABLE: self.client = patronus_client - if evaluator: - self.evaluator = evaluator - self.evaluated_model_gold_answer = evaluated_model_gold_answer - self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluator}, evaluated_model_gold_answer={evaluated_model_gold_answer}" + self.evaluator = evaluator + self.evaluated_model_gold_answer = evaluated_model_gold_answer self._generate_description() print( - f"Updating judge evaluator, gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + f"Updating evaluator and gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" ) else: - import click - - if click.confirm( - "You are missing the 'patronus' package. Would you like to install it?" - ): - import subprocess - - subprocess.run(["uv", "add", "patronus"], check=True) - else: - raise ImportError( - "You are missing the patronus package. Would you like to install it?" - ) + raise ImportError( + "The 'patronus' package is not installed. " + "Please install it by running `uv add patronus` to use PatronusLocalEvaluatorTool." + ) def _run( self, **kwargs: Any, ) -> Any: + if not PYPATRONUS_AVAILABLE: + raise ImportError( + "The 'patronus' package is not installed. " + "Please install it by running `uv add patronus` to use PatronusLocalEvaluatorTool." + ) + evaluated_model_input = kwargs.get("evaluated_model_input") evaluated_model_output = kwargs.get("evaluated_model_output") evaluated_model_retrieved_context = kwargs.get( @@ -92,30 +83,22 @@ class PatronusLocalEvaluatorTool(BaseTool): evaluated_model_gold_answer = self.evaluated_model_gold_answer evaluator = self.evaluator - result = self.client.evaluate( + result: "EvaluationResult" = self.client.evaluate( evaluator=evaluator, - evaluated_model_input=( - evaluated_model_input - if isinstance(evaluated_model_input, str) - else evaluated_model_input.get("description") - ), - evaluated_model_output=( - evaluated_model_output - if isinstance(evaluated_model_output, str) - else evaluated_model_output.get("description") - ), - evaluated_model_retrieved_context=( - evaluated_model_retrieved_context - if isinstance(evaluated_model_retrieved_context, str) - else evaluated_model_retrieved_context.get("description") - ), - evaluated_model_gold_answer=( - evaluated_model_gold_answer - if isinstance(evaluated_model_gold_answer, str) - else evaluated_model_gold_answer.get("description") - ), - tags={}, # Optional metadata, supports arbitrary kv pairs - tags={}, # Optional metadata, supports arbitrary kv pairs + evaluated_model_input=evaluated_model_input, + evaluated_model_output=evaluated_model_output, + evaluated_model_retrieved_context=evaluated_model_retrieved_context, + evaluated_model_gold_answer=evaluated_model_gold_answer, + tags={}, # Optional metadata, supports arbitrary key-value pairs ) output = f"Evaluation result: {result.pass_}, Explanation: {result.explanation}" return output + + +try: + # Only rebuild if the class hasn't been initialized yet + if not hasattr(PatronusLocalEvaluatorTool, "_model_rebuilt"): + PatronusLocalEvaluatorTool.model_rebuild() + PatronusLocalEvaluatorTool._model_rebuilt = True +except Exception: + pass diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py index 75c671d21..e49764795 100644 --- a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -1,15 +1,29 @@ import asyncio import logging from concurrent.futures import ThreadPoolExecutor -from typing import Any, Dict, List, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type -import snowflake.connector from crewai.tools.base_tool import BaseTool -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization from pydantic import BaseModel, ConfigDict, Field, SecretStr -from snowflake.connector.connection import SnowflakeConnection -from snowflake.connector.errors import DatabaseError, OperationalError + +if TYPE_CHECKING: + # Import types for type checking only + from snowflake.connector.connection import SnowflakeConnection + from snowflake.connector.errors import DatabaseError, OperationalError + +try: + import snowflake.connector + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization + + SNOWFLAKE_AVAILABLE = True +except ImportError: + # Set modules to None + snowflake = None # type: ignore + default_backend = None # type: ignore + serialization = None # type: ignore + + SNOWFLAKE_AVAILABLE = False # Configure logging logging.basicConfig(level=logging.INFO) @@ -83,24 +97,48 @@ class SnowflakeSearchTool(BaseTool): default=True, description="Enable query result caching" ) - model_config = ConfigDict(arbitrary_types_allowed=True) + model_config = ConfigDict( + arbitrary_types_allowed=True, validate_assignment=True, frozen=False + ) + + # Internal attributes + _connection_pool: Optional[List["SnowflakeConnection"]] = ( + None # Use forward reference + ) + _pool_lock: Optional[asyncio.Lock] = None + _thread_pool: Optional[ThreadPoolExecutor] = None + _model_rebuilt: bool = False def __init__(self, **data): """Initialize SnowflakeSearchTool.""" super().__init__(**data) - self._connection_pool: List[SnowflakeConnection] = [] + self._initialize() + + def _initialize(self): + if not SNOWFLAKE_AVAILABLE: + return # Snowflake is not installed + self._connection_pool = [] self._pool_lock = asyncio.Lock() self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) - async def _get_connection(self) -> SnowflakeConnection: + async def _get_connection(self) -> "SnowflakeConnection": """Get a connection from the pool or create a new one.""" + if not SNOWFLAKE_AVAILABLE: + raise ImportError( + "The 'snowflake-connector-python' package is not installed. " + "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " + "to use SnowflakeSearchTool." + ) + async with self._pool_lock: if not self._connection_pool: - conn = self._create_connection() + conn = await asyncio.get_event_loop().run_in_executor( + self._thread_pool, self._create_connection + ) self._connection_pool.append(conn) return self._connection_pool.pop() - def _create_connection(self) -> SnowflakeConnection: + def _create_connection(self) -> "SnowflakeConnection": """Create a new Snowflake connection.""" conn_params = { "account": self.config.account, @@ -114,7 +152,7 @@ class SnowflakeSearchTool(BaseTool): if self.config.password: conn_params["password"] = self.config.password.get_secret_value() - elif self.config.private_key_path: + elif self.config.private_key_path and serialization: with open(self.config.private_key_path, "rb") as key_file: p_key = serialization.load_pem_private_key( key_file.read(), password=None, backend=default_backend() @@ -131,6 +169,13 @@ class SnowflakeSearchTool(BaseTool): self, query: str, timeout: int = 300 ) -> List[Dict[str, Any]]: """Execute a query with retries and return results.""" + if not SNOWFLAKE_AVAILABLE: + raise ImportError( + "The 'snowflake-connector-python' package is not installed. " + "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " + "to use SnowflakeSearchTool." + ) + if self.enable_caching: cache_key = self._get_cache_key(query, timeout) if cache_key in _query_cache: @@ -174,6 +219,13 @@ class SnowflakeSearchTool(BaseTool): **kwargs: Any, ) -> Any: """Execute the search query.""" + if not SNOWFLAKE_AVAILABLE: + raise ImportError( + "The 'snowflake-connector-python' package is not installed. " + "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " + "to use SnowflakeSearchTool." + ) + try: # Override database/schema if provided if database: @@ -190,12 +242,22 @@ class SnowflakeSearchTool(BaseTool): def __del__(self): """Cleanup connections on deletion.""" try: - for conn in getattr(self, "_connection_pool", []): - try: - conn.close() - except: - pass - if hasattr(self, "_thread_pool"): + if self._connection_pool: + for conn in self._connection_pool: + try: + conn.close() + except Exception: + pass + if self._thread_pool: self._thread_pool.shutdown() - except: + except Exception: pass + + +try: + # Only rebuild if the class hasn't been initialized yet + if not hasattr(SnowflakeSearchTool, "_model_rebuilt"): + SnowflakeSearchTool.model_rebuild() + SnowflakeSearchTool._model_rebuilt = True +except Exception: + pass From 141ff864f205e963e780bd446b3cdb5841912508 Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Thu, 23 Jan 2025 15:11:45 -0500 Subject: [PATCH 265/391] clean up --- .../patronus_local_evaluator_tool.py | 53 ++++++++++++---- .../snowflake_search_tool.py | 62 ++++++++++++++----- 2 files changed, 86 insertions(+), 29 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index 54dde463d..8e5f95168 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -51,19 +51,46 @@ class PatronusLocalEvaluatorTool(BaseTool): **kwargs: Any, ): super().__init__(**kwargs) - if PYPATRONUS_AVAILABLE: - self.client = patronus_client - self.evaluator = evaluator - self.evaluated_model_gold_answer = evaluated_model_gold_answer - self._generate_description() - print( - f"Updating evaluator and gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" - ) - else: - raise ImportError( - "The 'patronus' package is not installed. " - "Please install it by running `uv add patronus` to use PatronusLocalEvaluatorTool." - ) + self.evaluator = evaluator + self.evaluated_model_gold_answer = evaluated_model_gold_answer + self._initialize_patronus(patronus_client) + + def _initialize_patronus(self, patronus_client: "Client") -> None: + try: + if PYPATRONUS_AVAILABLE: + self.client = patronus_client + self._generate_description() + print( + f"Updating evaluator and gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + ) + else: + raise ImportError + except ImportError: + import click + + if click.confirm( + "You are missing the 'patronus' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "patronus"], check=True) + global patronus # Needed to re-import patronus after installation + import patronus # noqa + + global PYPATRONUS_AVAILABLE + PYPATRONUS_AVAILABLE = True + self.client = patronus_client + self._generate_description() + print( + f"Updating evaluator and gold_answer to: {self.evaluator}, {self.evaluated_model_gold_answer}" + ) + except subprocess.CalledProcessError: + raise ImportError("Failed to install 'patronus' package") + else: + raise ImportError( + "`patronus` package not found, please run `uv add patronus`" + ) def _run( self, diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py index e49764795..a1d731d98 100644 --- a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -18,11 +18,6 @@ try: SNOWFLAKE_AVAILABLE = True except ImportError: - # Set modules to None - snowflake = None # type: ignore - default_backend = None # type: ignore - serialization = None # type: ignore - SNOWFLAKE_AVAILABLE = False # Configure logging @@ -101,10 +96,7 @@ class SnowflakeSearchTool(BaseTool): arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) - # Internal attributes - _connection_pool: Optional[List["SnowflakeConnection"]] = ( - None # Use forward reference - ) + _connection_pool: Optional[List["SnowflakeConnection"]] = None _pool_lock: Optional[asyncio.Lock] = None _thread_pool: Optional[ThreadPoolExecutor] = None _model_rebuilt: bool = False @@ -112,14 +104,52 @@ class SnowflakeSearchTool(BaseTool): def __init__(self, **data): """Initialize SnowflakeSearchTool.""" super().__init__(**data) - self._initialize() + self._initialize_snowflake() - def _initialize(self): - if not SNOWFLAKE_AVAILABLE: - return # Snowflake is not installed - self._connection_pool = [] - self._pool_lock = asyncio.Lock() - self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) + def _initialize_snowflake(self) -> None: + try: + if SNOWFLAKE_AVAILABLE: + self._connection_pool = [] + self._pool_lock = asyncio.Lock() + self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) + else: + raise ImportError + except ImportError: + import click + + if click.confirm( + "You are missing the 'snowflake-connector-python' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run( + [ + "uv", + "add", + "cryptography", + "snowflake-connector-python", + "snowflake-sqlalchemy", + ], + check=True, + ) + global snowflake, default_backend, serialization # Needed to re-import after installation + import snowflake.connector # noqa + from cryptography.hazmat.backends import default_backend # noqa + from cryptography.hazmat.primitives import serialization # noqa + + global SNOWFLAKE_AVAILABLE + SNOWFLAKE_AVAILABLE = True + self._connection_pool = [] + self._pool_lock = asyncio.Lock() + self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) + except subprocess.CalledProcessError: + raise ImportError("Failed to install Snowflake dependencies") + else: + raise ImportError( + "Snowflake dependencies not found. Please install them by running " + "`uv add cryptography snowflake-connector-python snowflake-sqlalchemy`" + ) async def _get_connection(self) -> "SnowflakeConnection": """Get a connection from the pool or create a new one.""" From bcb72a9305c0bd90365c1240cacca7e53249f88a Mon Sep 17 00:00:00 2001 From: Brandon Hancock Date: Thu, 23 Jan 2025 15:23:12 -0500 Subject: [PATCH 266/391] Clean up and follow auto import pattern --- .../firecrawl_search_tool.py | 18 ++++++++----- .../patronus_local_evaluator_tool.py | 11 -------- .../snowflake_search_tool.py | 25 ------------------- 3 files changed, 12 insertions(+), 42 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index b8e934f96..f7f4f3677 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,13 +1,18 @@ -from typing import Any, Dict, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr -# Type checking import +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + + try: from firecrawl import FirecrawlApp + + FIRECRAWL_AVAILABLE = True except ImportError: - FirecrawlApp = Any + FIRECRAWL_AVAILABLE = False class FirecrawlSearchToolSchema(BaseModel): @@ -51,9 +56,10 @@ class FirecrawlSearchTool(BaseTool): def _initialize_firecrawl(self) -> None: try: - from firecrawl import FirecrawlApp # type: ignore - - self.firecrawl = FirecrawlApp(api_key=self.api_key) + if FIRECRAWL_AVAILABLE: + self._firecrawl = FirecrawlApp(api_key=self.api_key) + else: + raise ImportError except ImportError: import click diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index 8e5f95168..dfc9e757f 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -75,11 +75,6 @@ class PatronusLocalEvaluatorTool(BaseTool): try: subprocess.run(["uv", "add", "patronus"], check=True) - global patronus # Needed to re-import patronus after installation - import patronus # noqa - - global PYPATRONUS_AVAILABLE - PYPATRONUS_AVAILABLE = True self.client = patronus_client self._generate_description() print( @@ -96,12 +91,6 @@ class PatronusLocalEvaluatorTool(BaseTool): self, **kwargs: Any, ) -> Any: - if not PYPATRONUS_AVAILABLE: - raise ImportError( - "The 'patronus' package is not installed. " - "Please install it by running `uv add patronus` to use PatronusLocalEvaluatorTool." - ) - evaluated_model_input = kwargs.get("evaluated_model_input") evaluated_model_output = kwargs.get("evaluated_model_output") evaluated_model_retrieved_context = kwargs.get( diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py index a1d731d98..3db816899 100644 --- a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -133,13 +133,7 @@ class SnowflakeSearchTool(BaseTool): ], check=True, ) - global snowflake, default_backend, serialization # Needed to re-import after installation - import snowflake.connector # noqa - from cryptography.hazmat.backends import default_backend # noqa - from cryptography.hazmat.primitives import serialization # noqa - global SNOWFLAKE_AVAILABLE - SNOWFLAKE_AVAILABLE = True self._connection_pool = [] self._pool_lock = asyncio.Lock() self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size) @@ -153,13 +147,6 @@ class SnowflakeSearchTool(BaseTool): async def _get_connection(self) -> "SnowflakeConnection": """Get a connection from the pool or create a new one.""" - if not SNOWFLAKE_AVAILABLE: - raise ImportError( - "The 'snowflake-connector-python' package is not installed. " - "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " - "to use SnowflakeSearchTool." - ) - async with self._pool_lock: if not self._connection_pool: conn = await asyncio.get_event_loop().run_in_executor( @@ -199,12 +186,6 @@ class SnowflakeSearchTool(BaseTool): self, query: str, timeout: int = 300 ) -> List[Dict[str, Any]]: """Execute a query with retries and return results.""" - if not SNOWFLAKE_AVAILABLE: - raise ImportError( - "The 'snowflake-connector-python' package is not installed. " - "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " - "to use SnowflakeSearchTool." - ) if self.enable_caching: cache_key = self._get_cache_key(query, timeout) @@ -249,12 +230,6 @@ class SnowflakeSearchTool(BaseTool): **kwargs: Any, ) -> Any: """Execute the search query.""" - if not SNOWFLAKE_AVAILABLE: - raise ImportError( - "The 'snowflake-connector-python' package is not installed. " - "Please install it by running `uv add cryptography snowflake-connector-python snowflake-sqlalchemy` " - "to use SnowflakeSearchTool." - ) try: # Override database/schema if provided From 3808f98c14738b86dae67748b23694965d22fcf6 Mon Sep 17 00:00:00 2001 From: Luis Cardoso Date: Tue, 28 Jan 2025 10:11:46 +0100 Subject: [PATCH 267/391] fix(serper-dev): restore search localization parameters - Re-add country (gl), location, and locale (hl) parameters to SerperDevTool class - Update payload construction in _make_api_request to include localization params - Add schema validation for localization parameters - Update documentation and examples to demonstrate parameter usage These parameters were accidentally removed in the previous enhancement PR and are crucial for: - Getting region-specific search results (via country/gl) - Targeting searches to specific cities (via location) - Getting results in specific languages (via locale/hl) BREAKING CHANGE: None - This restores previously available functionality --- .../tools/serper_dev_tool/README.md | 5 +- .../tools/serper_dev_tool/serper_dev_tool.py | 16 +- tests/tools/serper_dev_tool_test.py | 151 ++++++++++++++++++ 3 files changed, 169 insertions(+), 3 deletions(-) create mode 100644 tests/tools/serper_dev_tool_test.py diff --git a/src/crewai_tools/tools/serper_dev_tool/README.md b/src/crewai_tools/tools/serper_dev_tool/README.md index 0beb9f2ab..06f1abd56 100644 --- a/src/crewai_tools/tools/serper_dev_tool/README.md +++ b/src/crewai_tools/tools/serper_dev_tool/README.md @@ -26,7 +26,10 @@ from crewai_tools import SerperDevTool tool = SerperDevTool( n_results=10, # Optional: Number of results to return (default: 10) save_file=False, # Optional: Save results to file (default: False) - search_type="search" # Optional: Type of search - "search" or "news" (default: "search") + search_type="search", # Optional: Type of search - "search" or "news" (default: "search") + country="us", # Optional: Country for search (default: "") + location="New York", # Optional: Location for search (default: "") + locale="en-US" # Optional: Locale for search (default: "") ) # Execute a search diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 2db347190..629016189 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -2,7 +2,7 @@ import datetime import json import logging import os -from typing import Any, Type +from typing import Any, Type, Optional import requests from crewai.tools import BaseTool @@ -45,6 +45,9 @@ class SerperDevTool(BaseTool): n_results: int = 10 save_file: bool = False search_type: str = "search" + country: Optional[str] = "" + location: Optional[str] = "" + locale: Optional[str] = "" def _get_search_url(self, search_type: str) -> str: """Get the appropriate endpoint URL based on search type.""" @@ -146,11 +149,20 @@ class SerperDevTool(BaseTool): def _make_api_request(self, search_query: str, search_type: str) -> dict: """Make API request to Serper.""" search_url = self._get_search_url(search_type) - payload = json.dumps({"q": search_query, "num": self.n_results}) + payload = {"q": search_query, "num": self.n_results} + + if self.country != "": + payload["gl"] = self.country + if self.location != "": + payload["location"] = self.location + if self.locale != "": + payload["hl"] = self.locale + headers = { "X-API-KEY": os.environ["SERPER_API_KEY"], "content-type": "application/json", } + payload = json.dumps(payload) response = None try: diff --git a/tests/tools/serper_dev_tool_test.py b/tests/tools/serper_dev_tool_test.py new file mode 100644 index 000000000..d02f0606e --- /dev/null +++ b/tests/tools/serper_dev_tool_test.py @@ -0,0 +1,151 @@ +from unittest.mock import patch +import pytest +from crewai_tools.tools.serper_dev_tool.serper_dev_tool import SerperDevTool +import os + + +@pytest.fixture(autouse=True) +def mock_serper_api_key(): + with patch.dict(os.environ, {"SERPER_API_KEY": "test_key"}): + yield + + +@pytest.fixture +def serper_tool(): + return SerperDevTool(n_results=2) + + +def test_serper_tool_initialization(): + tool = SerperDevTool() + assert tool.n_results == 10 + assert tool.save_file is False + assert tool.search_type == "search" + assert tool.country == "" + assert tool.location == "" + assert tool.locale == "" + + +def test_serper_tool_custom_initialization(): + tool = SerperDevTool( + n_results=5, + save_file=True, + search_type="news", + country="US", + location="New York", + locale="en" + ) + assert tool.n_results == 5 + assert tool.save_file is True + assert tool.search_type == "news" + assert tool.country == "US" + assert tool.location == "New York" + assert tool.locale == "en" + + +@patch("requests.post") +def test_serper_tool_search(mock_post): + tool = SerperDevTool(n_results=2) + mock_response = { + "searchParameters": { + "q": "test query", + "type": "search" + }, + "organic": [ + { + "title": "Test Title 1", + "link": "http://test1.com", + "snippet": "Test Description 1", + "position": 1 + }, + { + "title": "Test Title 2", + "link": "http://test2.com", + "snippet": "Test Description 2", + "position": 2 + } + ], + "peopleAlsoAsk": [ + { + "question": "Test Question", + "snippet": "Test Answer", + "title": "Test Source", + "link": "http://test.com" + } + ] + } + mock_post.return_value.json.return_value = mock_response + mock_post.return_value.status_code = 200 + + result = tool.run(search_query="test query") + + assert "searchParameters" in result + assert result["searchParameters"]["q"] == "test query" + assert len(result["organic"]) == 2 + assert result["organic"][0]["title"] == "Test Title 1" + + +@patch("requests.post") +def test_serper_tool_news_search(mock_post): + tool = SerperDevTool(n_results=2, search_type="news") + mock_response = { + "searchParameters": { + "q": "test news", + "type": "news" + }, + "news": [ + { + "title": "News Title 1", + "link": "http://news1.com", + "snippet": "News Description 1", + "date": "2024-01-01", + "source": "News Source 1", + "imageUrl": "http://image1.com" + } + ] + } + mock_post.return_value.json.return_value = mock_response + mock_post.return_value.status_code = 200 + + result = tool.run(search_query="test news") + + assert "news" in result + assert len(result["news"]) == 1 + assert result["news"][0]["title"] == "News Title 1" + + +@patch("requests.post") +def test_serper_tool_with_location_params(mock_post): + tool = SerperDevTool( + n_results=2, + country="US", + location="New York", + locale="en" + ) + + tool.run(search_query="test") + + called_payload = mock_post.call_args.kwargs["json"] + assert called_payload["gl"] == "US" + assert called_payload["location"] == "New York" + assert called_payload["hl"] == "en" + + +def test_invalid_search_type(): + tool = SerperDevTool() + with pytest.raises(ValueError) as exc_info: + tool.run(search_query="test", search_type="invalid") + assert "Invalid search type" in str(exc_info.value) + + +@patch("requests.post") +def test_api_error_handling(mock_post): + tool = SerperDevTool() + mock_post.side_effect = Exception("API Error") + + with pytest.raises(Exception) as exc_info: + tool.run(search_query="test") + assert "API Error" in str(exc_info.value) + + +if __name__ == "__main__": + pytest.main([__file__]) From 199044f866b4fb6f56784b9bb97829c1e8a1d938 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Tue, 28 Jan 2025 10:11:37 -0300 Subject: [PATCH 268/391] fixing test --- .../stagehand_tool/stagehand_extract_tool.py | 207 ++++++++++++++++++ tests/__init__.py | 0 2 files changed, 207 insertions(+) create mode 100644 src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py create mode 100644 tests/__init__.py diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py new file mode 100644 index 000000000..03c14fd43 --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py @@ -0,0 +1,207 @@ +"""Tool for using Stagehand's AI-powered extraction capabilities in CrewAI.""" + +import logging +import os +from typing import Any, Dict, Optional, Type +import subprocess +import json + +from pydantic import BaseModel, Field +from crewai.tools.base_tool import BaseTool + +# Set up logging +logger = logging.getLogger(__name__) + +class StagehandExtractSchema(BaseModel): + """Schema for data extraction using Stagehand. + + Examples: + ```python + # Extract a product price + tool.run( + url="https://example.com/product", + instruction="Extract the price of the item", + schema={ + "price": {"type": "number"} + } + ) + + # Extract article content + tool.run( + url="https://example.com/article", + instruction="Extract the article title and content", + schema={ + "title": {"type": "string"}, + "content": {"type": "string"}, + "date": {"type": "string", "optional": True} + } + ) + ``` + """ + url: str = Field( + ..., + description="The URL of the website to extract data from" + ) + instruction: str = Field( + ..., + description="Instructions for what data to extract", + min_length=1, + max_length=500 + ) + schema: Dict[str, Dict[str, Any]] = Field( + ..., + description="Zod-like schema defining the structure of data to extract" + ) + + +class StagehandExtractTool(BaseTool): + name: str = "StagehandExtractTool" + description: str = ( + "A tool that uses Stagehand's AI-powered extraction to get structured data from websites. " + "Requires a schema defining the structure of data to extract." + ) + args_schema: Type[BaseModel] = StagehandExtractSchema + config: Optional[Dict[str, Any]] = None + + def __init__(self, **kwargs: Any) -> None: + """Initialize the StagehandExtractTool. + + Args: + **kwargs: Additional keyword arguments passed to the base class. + """ + super().__init__(**kwargs) + + # Use provided API key or try environment variable + if not os.getenv("OPENAI_API_KEY"): + raise ValueError( + "Set OPENAI_API_KEY environment variable, mandatory for Stagehand" + ) + + def _convert_to_zod_schema(self, schema: Dict[str, Dict[str, Any]]) -> str: + """Convert Python schema definition to Zod schema string.""" + zod_parts = [] + for field_name, field_def in schema.items(): + field_type = field_def["type"] + is_optional = field_def.get("optional", False) + + if field_type == "string": + zod_type = "z.string()" + elif field_type == "number": + zod_type = "z.number()" + elif field_type == "boolean": + zod_type = "z.boolean()" + elif field_type == "array": + item_type = field_def.get("items", {"type": "string"}) + zod_type = f"z.array({self._convert_to_zod_schema({'item': item_type})})" + else: + zod_type = "z.string()" # Default to string for unknown types + + if is_optional: + zod_type += ".optional()" + + zod_parts.append(f"{field_name}: {zod_type}") + + return f"z.object({{ {', '.join(zod_parts)} }})" + + def _run(self, url: str, instruction: str, schema: Dict[str, Dict[str, Any]]) -> Any: + """Execute a Stagehand extract command. + + Args: + url: The URL to extract data from + instruction: What data to extract + schema: Schema defining the structure of data to extract + + Returns: + The extracted data matching the provided schema + """ + logger.debug( + "Starting extraction - URL: %s, Instruction: %s, Schema: %s", + url, + instruction, + schema + ) + + # Convert Python schema to Zod schema + zod_schema = self._convert_to_zod_schema(schema) + + # Prepare the Node.js command + command = [ + "node", + "-e", + f""" + const {{ Stagehand }} = require('@browserbasehq/stagehand'); + const z = require('zod'); + + async function run() {{ + console.log('Initializing Stagehand...'); + const stagehand = new Stagehand({{ + apiKey: '{os.getenv("OPENAI_API_KEY")}', + env: 'LOCAL' + }}); + + try {{ + console.log('Initializing browser...'); + await stagehand.init(); + + console.log('Navigating to:', '{url}'); + await stagehand.page.goto('{url}'); + + console.log('Extracting data...'); + const result = await stagehand.page.extract({{ + instruction: '{instruction}', + schema: {zod_schema} + }}); + + process.stdout.write('RESULT_START'); + process.stdout.write(JSON.stringify({{ data: result, success: true }})); + process.stdout.write('RESULT_END'); + + await stagehand.close(); + }} catch (error) {{ + console.error('Extraction failed:', error); + process.stdout.write('RESULT_START'); + process.stdout.write(JSON.stringify({{ + error: error.message, + name: error.name, + success: false + }})); + process.stdout.write('RESULT_END'); + process.exit(1); + }} + }} + + run(); + """ + ] + + try: + # Execute Node.js script + result = subprocess.run( + command, + check=True, + capture_output=True, + text=True + ) + + # Extract the JSON result using markers + if 'RESULT_START' in result.stdout and 'RESULT_END' in result.stdout: + json_str = result.stdout.split('RESULT_START')[1].split('RESULT_END')[0] + try: + parsed_result = json.loads(json_str) + logger.info("Successfully parsed result: %s", parsed_result) + if parsed_result.get('success', False): + return parsed_result.get('data') + else: + raise Exception(f"Extraction failed: {parsed_result.get('error', 'Unknown error')}") + except json.JSONDecodeError as e: + logger.error("Failed to parse JSON output: %s", json_str) + raise Exception(f"Invalid JSON response: {e}") + else: + logger.error("No valid result markers found in output") + raise ValueError("No valid output from Stagehand command") + + except subprocess.CalledProcessError as e: + logger.error("Node.js script failed with exit code %d", e.returncode) + if e.stderr: + logger.error("Error output: %s", e.stderr) + raise Exception(f"Stagehand command failed: {e}") \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb From 90cdb48db017a89863ff35498cd5c4576b0aacee Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Thu, 30 Jan 2025 15:09:47 -0800 Subject: [PATCH 269/391] latest version of exa --- .../tools/exa_tools/exa_base_tool.py | 47 ------------ .../tools/exa_tools/exa_search_tool.py | 73 ++++++++++++++----- 2 files changed, 55 insertions(+), 65 deletions(-) delete mode 100644 src/crewai_tools/tools/exa_tools/exa_base_tool.py diff --git a/src/crewai_tools/tools/exa_tools/exa_base_tool.py b/src/crewai_tools/tools/exa_tools/exa_base_tool.py deleted file mode 100644 index 295b283ad..000000000 --- a/src/crewai_tools/tools/exa_tools/exa_base_tool.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import Type - -from crewai.tools import BaseTool -from pydantic import BaseModel, Field - - -class EXABaseToolToolSchema(BaseModel): - """Input for EXABaseTool.""" - - search_query: str = Field( - ..., description="Mandatory search query you want to use to search the internet" - ) - - -class EXABaseTool(BaseTool): - name: str = "Search the internet" - description: str = ( - "A tool that can be used to search the internet from a search_query" - ) - args_schema: Type[BaseModel] = EXABaseToolToolSchema - search_url: str = "https://api.exa.ai/search" - n_results: int = None - headers: dict = { - "accept": "application/json", - "content-type": "application/json", - } - - def _parse_results(self, results): - string = [] - for result in results: - try: - string.append( - "\n".join( - [ - f"Title: {result['title']}", - f"Score: {result['score']}", - f"Url: {result['url']}", - f"ID: {result['id']}", - "---", - ] - ) - ) - except KeyError: - continue - - content = "\n".join(string) - return f"\nSearch results: {content}\n" diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 6724c2417..6681e8d1b 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,30 +1,67 @@ -import os -from typing import Any +from typing import Any, Optional, Type +from pydantic import BaseModel, Field -import requests +try: + from exa_py import Exa -from .exa_base_tool import EXABaseTool + EXA_INSTALLED = True +except ImportError: + Exa = Any + EXA_INSTALLED = False -class EXASearchTool(EXABaseTool): +class EXABaseToolToolSchema(BaseModel): + search_query: str = Field( + ..., description="Mandatory search query you want to use to search the internet" + ) + + +class EXASearchTool: + args_schema: Type[BaseModel] = EXABaseToolToolSchema + client: Optional["Exa"] = Field(default=None, description="Exa search client") + + def __init__( + self, + api_key: str, + content: bool = False, + highlights: bool = False, + type: str = "keyword", + use_autoprompt: bool = True, + ): + if not EXA_INSTALLED: + raise ImportError("`exa-py` package not found, please run `uv add exa-py`") + self.client = Exa(api_key=api_key) + self.content = content + self.highlights = highlights + self.type = type + self.use_autoprompt = use_autoprompt + def _run( self, - **kwargs: Any, + search_query: str, + start_published_date: Optional[str] = None, + end_published_date: Optional[str] = None, + include_domains: Optional[list[str]] = None, ) -> Any: - search_query = kwargs.get("search_query") - if search_query is None: - search_query = kwargs.get("query") + if self.client is None: + raise ValueError("Client not initialized") - payload = { - "query": search_query, - "type": "magic", + search_params = { + "use_autoprompt": self.use_autoprompt, + "type": self.type, } - headers = self.headers.copy() - headers["x-api-key"] = os.environ["EXA_API_KEY"] + if start_published_date: + search_params["start_published_date"] = start_published_date + if end_published_date: + search_params["end_published_date"] = end_published_date + if include_domains: + search_params["include_domains"] = include_domains - response = requests.post(self.search_url, json=payload, headers=headers) - results = response.json() - if "results" in results: - results = super()._parse_results(results["results"]) + if self.content: + results = self.client.search_and_contents( + search_query, highlights=self.highlights, **search_params + ) + else: + results = self.client.search(search_query, **search_params) return results From bcfe015d9d01ab287ae275234517a110a62e41fb Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Thu, 30 Jan 2025 15:53:57 -0800 Subject: [PATCH 270/391] ensure works on agent --- .../tools/exa_tools/exa_search_tool.py | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 6681e8d1b..6bf834d6c 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,5 +1,6 @@ from typing import Any, Optional, Type from pydantic import BaseModel, Field +from crewai.tools import BaseTool try: from exa_py import Exa @@ -10,31 +11,48 @@ except ImportError: EXA_INSTALLED = False -class EXABaseToolToolSchema(BaseModel): +class EXABaseToolSchema(BaseModel): search_query: str = Field( ..., description="Mandatory search query you want to use to search the internet" ) + start_published_date: Optional[str] = Field( + None, description="Start date for the search" + ) + end_published_date: Optional[str] = Field( + None, description="End date for the search" + ) + include_domains: Optional[list[str]] = Field( + None, description="List of domains to include in the search" + ) -class EXASearchTool: - args_schema: Type[BaseModel] = EXABaseToolToolSchema - client: Optional["Exa"] = Field(default=None, description="Exa search client") +class EXASearchTool(BaseTool): + model_config = {"arbitrary_types_allowed": True} + name: str = "EXASearchTool" + description: str = "Search the internet using Exa" + args_schema: Type[BaseModel] = EXABaseToolSchema + client: Optional["Exa"] = None + content: Optional[bool] = False + summary: Optional[bool] = False + type: Optional[str] = "auto" def __init__( self, api_key: str, - content: bool = False, - highlights: bool = False, - type: str = "keyword", - use_autoprompt: bool = True, + content: Optional[bool] = False, + summary: Optional[bool] = False, + type: Optional[str] = "auto", + **kwargs, ): + super().__init__( + **kwargs, + ) if not EXA_INSTALLED: raise ImportError("`exa-py` package not found, please run `uv add exa-py`") self.client = Exa(api_key=api_key) self.content = content - self.highlights = highlights + self.summary = summary self.type = type - self.use_autoprompt = use_autoprompt def _run( self, @@ -47,7 +65,6 @@ class EXASearchTool: raise ValueError("Client not initialized") search_params = { - "use_autoprompt": self.use_autoprompt, "type": self.type, } @@ -60,7 +77,7 @@ class EXASearchTool: if self.content: results = self.client.search_and_contents( - search_query, highlights=self.highlights, **search_params + search_query, summary=self.summary, **search_params ) else: results = self.client.search(search_query, **search_params) From 9a09ea7703821997db911ff4b469d3dcf24d4bde Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Thu, 30 Jan 2025 16:04:33 -0800 Subject: [PATCH 271/391] better docs and download missing packaged --- src/crewai_tools/tools/exa_tools/README.md | 4 ++-- .../tools/exa_tools/exa_search_tool.py | 14 +++++++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/README.md b/src/crewai_tools/tools/exa_tools/README.md index 8d556dab3..1d1d20150 100644 --- a/src/crewai_tools/tools/exa_tools/README.md +++ b/src/crewai_tools/tools/exa_tools/README.md @@ -6,7 +6,7 @@ This tool is designed to perform a semantic search for a specified query from a ## Installation To incorporate this tool into your project, follow the installation instructions below: ```shell -pip install 'crewai[tools]' +uv add crewai[tools] exa_py ``` ## Example @@ -16,7 +16,7 @@ The following example demonstrates how to initialize the tool and execute a sear from crewai_tools import EXASearchTool # Initialize the tool for internet searching capabilities -tool = EXASearchTool() +tool = EXASearchTool(api_key="your_api_key") ``` ## Steps to Get Started diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 6bf834d6c..f094b0495 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -48,7 +48,19 @@ class EXASearchTool(BaseTool): **kwargs, ) if not EXA_INSTALLED: - raise ImportError("`exa-py` package not found, please run `uv add exa-py`") + import click + + if click.confirm( + "You are missing the 'exa_py' package. Would you like to install it?" + ): + import subprocess + + subprocess.run(["uv", "add", "exa_py"], check=True) + + else: + raise ImportError( + "You are missing the 'exa_py' package. Would you like to install it?" + ) self.client = Exa(api_key=api_key) self.content = content self.summary = summary From dcd4481ae2e35b856ebc29738dd65097adbcd966 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:31:15 -0800 Subject: [PATCH 272/391] enable qdrant as vector search tool for crew agents --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/qdrant_vector_search_tool/README.md | 49 +++++ .../qdrant_search_tool.py | 191 ++++++++++++++++++ 4 files changed, 242 insertions(+) create mode 100644 src/crewai_tools/tools/qdrant_vector_search_tool/README.md create mode 100644 src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7a149598a..4d2ea7e16 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -30,6 +30,7 @@ from .tools import ( PatronusPredefinedCriteriaEvalTool, PDFSearchTool, PGSearchTool, + QdrantVectorSearchTool, RagTool, ScrapeElementFromWebsiteTool, ScrapegraphScrapeTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 2b83fe278..4a9786fe6 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -35,6 +35,7 @@ from .patronus_eval_tool import ( ) from .pdf_search_tool.pdf_search_tool import PDFSearchTool from .pg_seach_tool.pg_search_tool import PGSearchTool +from .qdrant_vector_search_tool.qdrant_search_tool import QdrantVectorSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( ScrapeElementFromWebsiteTool, diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md new file mode 100644 index 000000000..92aeb60f6 --- /dev/null +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md @@ -0,0 +1,49 @@ +# QdrantVectorSearchTool + +## Description + +This tool is specifically crafted for conducting semantic searches within docs within a Qdrant vector database. Use this tool to find semantically similar docs to a given query. + +Qdrant is a vector database that is used to store and query vector embeddings. You can follow their docs here: https://qdrant.tech/documentation/ + +## Installation + +Install the crewai_tools package by executing the following command in your terminal: + +```shell +uv pip install 'crewai[tools]' +``` + +## Example + +To utilize the QdrantVectorSearchTool for different use cases, follow these examples: + +```python +from crewai_tools import QdrantVectorSearchTool + +# To enable the tool to search any website the agent comes across or learns about during its operation +tool = QdrantVectorSearchTool( + collection_name="example_collections", + limit=3, + qdrant_url="https://your-qdrant-cluster-url.com", + qdrant_api_key="your-qdrant-api-key", +) + + +# Adding the tool to an agent +rag_agent = Agent( + name="rag_agent", + role="You are a helpful assistant that can answer questions with the help of the QdrantVectorSearchTool. Retrieve the most relevant docs from the Qdrant database.", + llm="gpt-4o-mini", + tools=[tool], +) +``` + +## Arguments + +- `collection_name` : The name of the collection to search within. (Required) +- `qdrant_url` : The URL of the Qdrant cluster. (Required) +- `qdrant_api_key` : The API key for the Qdrant cluster. (Required) +- `limit` : The number of results to return. (Optional) +- `vectorizer` : The vectorizer to use. (Optional) + diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py new file mode 100644 index 000000000..307fcb8d1 --- /dev/null +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -0,0 +1,191 @@ +import json +from typing import Any, Optional, Type + +try: + from qdrant_client import QdrantClient + from qdrant_client.http.models import Filter, FieldCondition, MatchValue + + QDRANT_AVAILABLE = True +except ImportError: + QDRANT_AVAILABLE = False + QdrantClient = Any + Filter = Any + FieldCondition = Any + MatchValue = Any + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + + +class QdrantToolSchema(BaseModel): + """Input for QdrantTool.""" + + query: str = Field( + ..., + description="The query to search retrieve relevant information from the Qdrant database. Pass only the query, not the question.", + ) + filter_by: Optional[str] = Field( + default=None, + description="Filter by properties. Pass only the properties, not the question.", + ) + filter_value: Optional[str] = Field( + default=None, + description="Filter by value. Pass only the value, not the question.", + ) + + +class QdrantVectorSearchTool(BaseTool): + """Tool to query and filter results from a Qdrant vector database. + + This tool provides functionality to perform semantic search operations on documents + stored in a Qdrant collection, with optional filtering capabilities. + + Attributes: + name (str): Name of the tool + description (str): Description of the tool's functionality + client (QdrantClient): Qdrant client instance + collection_name (str): Name of the Qdrant collection to search + limit (int): Maximum number of results to return + score_threshold (float): Minimum similarity score threshold + """ + + name: str = "QdrantVectorSearchTool" + description: str = "A tool to search the Qdrant database for relevant information on internal documents." + args_schema: Type[BaseModel] = QdrantToolSchema + + model_config = {"arbitrary_types_allowed": True} + client: Optional[QdrantClient] = None + collection_name: str = Field( + ..., + description="The name of the Qdrant collection to search", + ) + limit: Optional[int] = Field(default=3) + score_threshold: float = Field(default=0.35) + qdrant_url: str = Field( + ..., + description="The URL of the Qdrant server", + ) + qdrant_api_key: Optional[str] = Field( + default=None, + description="The API key for the Qdrant server", + ) + vectorizer: Optional[str] = Field( + default="BAAI/bge-small-en-v1.5", + description="The vectorizer to use for the Qdrant server", + ) + + def __init__( + self, + qdrant_url: str, + collection_name: str, + qdrant_api_key: Optional[str] = None, + vectorizer: Optional[str] = None, + **kwargs, + ) -> None: + """Initialize the QdrantVectorSearchTool. + + Args: + qdrant_url: URL of the Qdrant server + collection_name: Name of the collection to search + qdrant_api_key: Optional API key for authentication + vectorizer: Optional model name for text vectorization + + Raises: + ImportError: If qdrant-client package is not installed + ConnectionError: If unable to connect to Qdrant server + """ + kwargs["qdrant_url"] = qdrant_url + kwargs["collection_name"] = collection_name + kwargs["qdrant_api_key"] = qdrant_api_key + if vectorizer: + kwargs["vectorizer"] = vectorizer + + super().__init__(**kwargs) + + if QDRANT_AVAILABLE: + try: + self.client = QdrantClient( + url=qdrant_url, + api_key=qdrant_api_key, + ) + # Verify connection + self.client.get_collections() + except Exception as e: + raise ConnectionError(f"Failed to connect to Qdrant server: {str(e)}") + else: + import click + + if click.confirm( + "You are missing the 'qdrant-client' package. Would you like to install it?" + ): + import subprocess + + subprocess.run( + ["uv", "add", "crewai[tools]", "qdrant-client"], check=True + ) + else: + raise ImportError( + "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " + "Please install it with: uv add crewai[tools] qdrant-client" + ) + if vectorizer: + self.client.set_model(self.vectorizer) + + def _run( + self, + query: str, + filter_by: Optional[str] = None, + filter_value: Optional[str] = None, + ) -> str: + """Execute the vector search query. + + Args: + query: Search query text + filter_by: Optional field name to filter results + filter_value: Optional value to filter by + + Returns: + JSON string containing search results with metadata + + Raises: + ValueError: If filter_by is provided without filter_value or vice versa + """ + if bool(filter_by) != bool(filter_value): + raise ValueError( + "Both filter_by and filter_value must be provided together" + ) + + search_filter = None + if filter_by and filter_value: + search_filter = Filter( + must=[ + FieldCondition(key=filter_by, match=MatchValue(value=filter_value)) + ] + ) + + try: + search_results = self.client.query( + collection_name=self.collection_name, + query_text=[query], + query_filter=search_filter, + limit=self.limit, + score_threshold=self.score_threshold, + ) + + results = [ + { + "id": point.id, + "metadata": point.metadata, + "context": point.document, + "score": point.score, + } + for point in search_results + ] + + if not results: + return json.dumps({"message": "No results found", "results": []}) + + return json.dumps(results, indent=2) + + except Exception as e: + raise RuntimeError(f"Error executing Qdrant search: {str(e)}") From aff40529a5f9853e36fd46f420734c6460cdc3f9 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:32:31 -0800 Subject: [PATCH 273/391] updated docs --- src/crewai_tools/tools/qdrant_vector_search_tool/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md index 92aeb60f6..f5a7f5e30 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md @@ -11,7 +11,7 @@ Qdrant is a vector database that is used to store and query vector embeddings. Y Install the crewai_tools package by executing the following command in your terminal: ```shell -uv pip install 'crewai[tools]' +uv pip install 'crewai[tools] qdrant-client fastembed' ``` ## Example From 052a07ddc7e6a6c83b4a0d85226b1c2b634a3868 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:38:24 -0800 Subject: [PATCH 274/391] setup common default model --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 307fcb8d1..050862fef 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -70,7 +70,7 @@ class QdrantVectorSearchTool(BaseTool): description="The API key for the Qdrant server", ) vectorizer: Optional[str] = Field( - default="BAAI/bge-small-en-v1.5", + default="fast-bge-small-en-v1.5", description="The vectorizer to use for the Qdrant server", ) From 6b19a3d15646d6dec821a51a3d4a3ab98cf9182b Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:40:37 -0800 Subject: [PATCH 275/391] default set --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 050862fef..307fcb8d1 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -70,7 +70,7 @@ class QdrantVectorSearchTool(BaseTool): description="The API key for the Qdrant server", ) vectorizer: Optional[str] = Field( - default="fast-bge-small-en-v1.5", + default="BAAI/bge-small-en-v1.5", description="The vectorizer to use for the Qdrant server", ) From d6a6325b5579fb2db0ea5bbc0499e894645fecfa Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:43:09 -0800 Subject: [PATCH 276/391] set vectorizer --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 307fcb8d1..5b42bf74b 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -70,7 +70,7 @@ class QdrantVectorSearchTool(BaseTool): description="The API key for the Qdrant server", ) vectorizer: Optional[str] = Field( - default="BAAI/bge-small-en-v1.5", + default="BAAI/bge-base-en-v1.5", description="The vectorizer to use for the Qdrant server", ) From 05982aeef29f3fb3e18d40a6c4a3036b2ed81087 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Sat, 1 Feb 2025 23:44:40 -0800 Subject: [PATCH 277/391] set default vectorizer --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 5b42bf74b..307fcb8d1 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -70,7 +70,7 @@ class QdrantVectorSearchTool(BaseTool): description="The API key for the Qdrant server", ) vectorizer: Optional[str] = Field( - default="BAAI/bge-base-en-v1.5", + default="BAAI/bge-small-en-v1.5", description="The vectorizer to use for the Qdrant server", ) From 5a9bb24b63a13cddf9bd47645de9fb9b17f5a231 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Mon, 3 Feb 2025 16:19:03 -0800 Subject: [PATCH 278/391] default openai --- .../qdrant_search_tool.py | 200 ++++++++---------- 1 file changed, 91 insertions(+), 109 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 307fcb8d1..1dd8c6078 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -1,5 +1,8 @@ import json +import os from typing import Any, Optional, Type +import ollama + try: from qdrant_client import QdrantClient @@ -8,7 +11,7 @@ try: QDRANT_AVAILABLE = True except ImportError: QDRANT_AVAILABLE = False - QdrantClient = Any + QdrantClient = Any # type placeholder Filter = Any FieldCondition = Any MatchValue = Any @@ -35,101 +38,51 @@ class QdrantToolSchema(BaseModel): class QdrantVectorSearchTool(BaseTool): - """Tool to query and filter results from a Qdrant vector database. + """Tool to query and filter results from a Qdrant database. - This tool provides functionality to perform semantic search operations on documents - stored in a Qdrant collection, with optional filtering capabilities. + This tool enables vector similarity search on internal documents stored in Qdrant, + with optional filtering capabilities. Attributes: - name (str): Name of the tool - description (str): Description of the tool's functionality - client (QdrantClient): Qdrant client instance - collection_name (str): Name of the Qdrant collection to search - limit (int): Maximum number of results to return - score_threshold (float): Minimum similarity score threshold + client: Configured QdrantClient instance + collection_name: Name of the Qdrant collection to search + limit: Maximum number of results to return + score_threshold: Minimum similarity score threshold + qdrant_url: Qdrant server URL + qdrant_api_key: Authentication key for Qdrant """ + model_config = {"arbitrary_types_allowed": True} + client: QdrantClient = None name: str = "QdrantVectorSearchTool" description: str = "A tool to search the Qdrant database for relevant information on internal documents." args_schema: Type[BaseModel] = QdrantToolSchema - - model_config = {"arbitrary_types_allowed": True} - client: Optional[QdrantClient] = None - collection_name: str = Field( - ..., - description="The name of the Qdrant collection to search", - ) + query: Optional[str] = None + filter_by: Optional[str] = None + filter_value: Optional[str] = None + collection_name: Optional[str] = None limit: Optional[int] = Field(default=3) score_threshold: float = Field(default=0.35) qdrant_url: str = Field( ..., description="The URL of the Qdrant server", ) - qdrant_api_key: Optional[str] = Field( - default=None, + qdrant_api_key: str = Field( + ..., description="The API key for the Qdrant server", ) - vectorizer: Optional[str] = Field( - default="BAAI/bge-small-en-v1.5", - description="The vectorizer to use for the Qdrant server", + custom_embedding_fn: Optional[callable] = Field( + default=None, + description="A custom embedding function to use for vectorization. If not provided, the default model will be used.", ) - def __init__( - self, - qdrant_url: str, - collection_name: str, - qdrant_api_key: Optional[str] = None, - vectorizer: Optional[str] = None, - **kwargs, - ) -> None: - """Initialize the QdrantVectorSearchTool. - - Args: - qdrant_url: URL of the Qdrant server - collection_name: Name of the collection to search - qdrant_api_key: Optional API key for authentication - vectorizer: Optional model name for text vectorization - - Raises: - ImportError: If qdrant-client package is not installed - ConnectionError: If unable to connect to Qdrant server - """ - kwargs["qdrant_url"] = qdrant_url - kwargs["collection_name"] = collection_name - kwargs["qdrant_api_key"] = qdrant_api_key - if vectorizer: - kwargs["vectorizer"] = vectorizer - + def __init__(self, **kwargs): super().__init__(**kwargs) - if QDRANT_AVAILABLE: - try: - self.client = QdrantClient( - url=qdrant_url, - api_key=qdrant_api_key, - ) - # Verify connection - self.client.get_collections() - except Exception as e: - raise ConnectionError(f"Failed to connect to Qdrant server: {str(e)}") - else: - import click - - if click.confirm( - "You are missing the 'qdrant-client' package. Would you like to install it?" - ): - import subprocess - - subprocess.run( - ["uv", "add", "crewai[tools]", "qdrant-client"], check=True - ) - else: - raise ImportError( - "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " - "Please install it with: uv add crewai[tools] qdrant-client" - ) - if vectorizer: - self.client.set_model(self.vectorizer) + self.client = QdrantClient( + url=self.qdrant_url, + api_key=self.qdrant_api_key, + ) def _run( self, @@ -137,24 +90,30 @@ class QdrantVectorSearchTool(BaseTool): filter_by: Optional[str] = None, filter_value: Optional[str] = None, ) -> str: - """Execute the vector search query. + """Execute vector similarity search on Qdrant. Args: - query: Search query text - filter_by: Optional field name to filter results + query: Search query to vectorize and match + filter_by: Optional metadata field to filter on filter_value: Optional value to filter by Returns: - JSON string containing search results with metadata + JSON string containing search results with metadata and scores Raises: - ValueError: If filter_by is provided without filter_value or vice versa + ImportError: If qdrant-client is not installed + ValueError: If Qdrant credentials are missing """ - if bool(filter_by) != bool(filter_value): - raise ValueError( - "Both filter_by and filter_value must be provided together" + if not QDRANT_AVAILABLE: + raise ImportError( + "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " + "Please install it with: pip install qdrant-client" ) + if not self.qdrant_url: + raise ValueError("QDRANT_URL is not set") + + # Create filter if filter parameters are provided search_filter = None if filter_by and filter_value: search_filter = Filter( @@ -163,29 +122,52 @@ class QdrantVectorSearchTool(BaseTool): ] ) - try: - search_results = self.client.query( - collection_name=self.collection_name, - query_text=[query], - query_filter=search_filter, - limit=self.limit, - score_threshold=self.score_threshold, + # Search in Qdrant using the built-in query method + + query_vector = ( + self._vectorize_query(query) + if not self.custom_embedding_fn + else self.custom_embedding_fn(query) + ) + search_results = self.client.query_points( + collection_name=self.collection_name, + query=query_vector, + query_filter=search_filter, + limit=self.limit, + score_threshold=self.score_threshold, + ) + + # Format results similar to storage implementation + results = [] + # Extract the list of ScoredPoint objects from the tuple + for point in search_results: + result = { + "metadata": point[1][0].payload.get("metadata", {}), + "context": point[1][0].payload.get("text", ""), + "distance": point[1][0].score, + } + results.append(result) + + return json.dumps(results, indent=2) + + def _vectorize_query(self, query: str) -> list[float]: + """Default vectorization function with openai. + + Args: + query (str): The query to vectorize + + Returns: + list[float]: The vectorized query + """ + import openai + + client = openai.Client(api_key=os.getenv("OPENAI_API_KEY")) + embedding = ( + client.embeddings.create( + input=[query], + model="text-embedding-3-small", ) - - results = [ - { - "id": point.id, - "metadata": point.metadata, - "context": point.document, - "score": point.score, - } - for point in search_results - ] - - if not results: - return json.dumps({"message": "No results found", "results": []}) - - return json.dumps(results, indent=2) - - except Exception as e: - raise RuntimeError(f"Error executing Qdrant search: {str(e)}") + .data[0] + .embedding + ) + return embedding From 96c3fbdddfa707062f3f039ca19308916b7534a9 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Mon, 3 Feb 2025 16:19:33 -0800 Subject: [PATCH 279/391] remove ollama from here --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 1dd8c6078..aff25d4f1 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -1,7 +1,6 @@ import json import os from typing import Any, Optional, Type -import ollama try: From 12927ba79d8ac9107d3f4b6596dee80e205fb860 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Mon, 3 Feb 2025 16:19:53 -0800 Subject: [PATCH 280/391] cleanup --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index aff25d4f1..bd9cd8701 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -122,7 +122,6 @@ class QdrantVectorSearchTool(BaseTool): ) # Search in Qdrant using the built-in query method - query_vector = ( self._vectorize_query(query) if not self.custom_embedding_fn From 554bba80360060c9b2d0b7b54e6955f0f64d7bdd Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Mon, 3 Feb 2025 16:22:09 -0800 Subject: [PATCH 281/391] clearer docs --- src/crewai_tools/tools/qdrant_vector_search_tool/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md index f5a7f5e30..131dbca15 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md @@ -11,12 +11,12 @@ Qdrant is a vector database that is used to store and query vector embeddings. Y Install the crewai_tools package by executing the following command in your terminal: ```shell -uv pip install 'crewai[tools] qdrant-client fastembed' +uv pip install 'crewai[tools] qdrant-client openai' ``` ## Example -To utilize the QdrantVectorSearchTool for different use cases, follow these examples: +To utilize the QdrantVectorSearchTool for different use cases, follow these examples: Default model is openai. ```python from crewai_tools import QdrantVectorSearchTool From 837198ae08f07efef14d7a44a0280e23cdd1d3c9 Mon Sep 17 00:00:00 2001 From: Lorenze Jay Date: Fri, 7 Feb 2025 10:03:37 -0800 Subject: [PATCH 282/391] Add interactive Qdrant client installation prompt --- .../qdrant_search_tool.py | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index bd9cd8701..c59dd29d5 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -82,6 +82,21 @@ class QdrantVectorSearchTool(BaseTool): url=self.qdrant_url, api_key=self.qdrant_api_key, ) + else: + import click + + if click.confirm( + "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " + "Would you like to install it?" + ): + import subprocess + + subprocess.run(["uv", "add", "qdrant-client"], check=True) + else: + raise ImportError( + "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " + "Please install it with: uv add qdrant-client" + ) def _run( self, @@ -103,11 +118,6 @@ class QdrantVectorSearchTool(BaseTool): ImportError: If qdrant-client is not installed ValueError: If Qdrant credentials are missing """ - if not QDRANT_AVAILABLE: - raise ImportError( - "The 'qdrant-client' package is required to use the QdrantVectorSearchTool. " - "Please install it with: pip install qdrant-client" - ) if not self.qdrant_url: raise ValueError("QDRANT_URL is not set") From f1187c546984cc46657a4c0603a71befa7d2927b Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Thu, 27 Feb 2025 08:44:43 -0800 Subject: [PATCH 283/391] S3 Tools --- src/crewai_tools/aws/__init__.py | 3 ++ src/crewai_tools/aws/s3/README.md | 52 ++++++++++++++++++++++++++ src/crewai_tools/aws/s3/__init__.py | 2 + src/crewai_tools/aws/s3/reader_tool.py | 42 +++++++++++++++++++++ src/crewai_tools/aws/s3/writer_tool.py | 37 ++++++++++++++++++ 5 files changed, 136 insertions(+) create mode 100644 src/crewai_tools/aws/__init__.py create mode 100644 src/crewai_tools/aws/s3/README.md create mode 100644 src/crewai_tools/aws/s3/__init__.py create mode 100644 src/crewai_tools/aws/s3/reader_tool.py create mode 100644 src/crewai_tools/aws/s3/writer_tool.py diff --git a/src/crewai_tools/aws/__init__.py b/src/crewai_tools/aws/__init__.py new file mode 100644 index 000000000..ea4626a32 --- /dev/null +++ b/src/crewai_tools/aws/__init__.py @@ -0,0 +1,3 @@ +from .s3 import S3ReaderTool, S3WriterTool + +__all__ = ['S3ReaderTool', 'S3WriterTool'] \ No newline at end of file diff --git a/src/crewai_tools/aws/s3/README.md b/src/crewai_tools/aws/s3/README.md new file mode 100644 index 000000000..ffd74d88c --- /dev/null +++ b/src/crewai_tools/aws/s3/README.md @@ -0,0 +1,52 @@ +# AWS S3 Tools + +## Description + +These tools provide a way to interact with Amazon S3, a cloud storage service. + +## Installation + +Install the crewai_tools package + +```shell +pip install 'crewai[tools]' +``` + +## AWS Connectivity + +The tools use `boto3` to connect to AWS S3. +You can configure your environment to use AWS IAM roles, see [AWS IAM Roles documentation](https://docs.aws.amazon.com/sdk-for-python/v1/developer-guide/iam-roles.html#creating-an-iam-role) + +Set the following environment variables: + +- `CREW_AWS_REGION` +- `CREW_AWS_ACCESS_KEY_ID` +- `CREW_AWS_SEC_ACCESS_KEY` + +## Usage + +To use the AWS S3 tools in your CrewAI agents, import the necessary tools and include them in your agent's configuration: + +```python +from crewai_tools.aws.s3 import S3ReaderTool, S3WriterTool + +# For reading from S3 +@agent +def file_retriever(self) -> Agent: + return Agent( + config=self.agents_config['file_retriever'], + verbose=True, + tools=[S3ReaderTool()] + ) + +# For writing to S3 +@agent +def file_uploader(self) -> Agent: + return Agent( + config=self.agents_config['file_uploader'], + verbose=True, + tools=[S3WriterTool()] + ) +``` + +These tools can be used to read from and write to S3 buckets within your CrewAI workflows. Make sure you have properly configured your AWS credentials as mentioned in the AWS Connectivity section above. diff --git a/src/crewai_tools/aws/s3/__init__.py b/src/crewai_tools/aws/s3/__init__.py new file mode 100644 index 000000000..4c858837c --- /dev/null +++ b/src/crewai_tools/aws/s3/__init__.py @@ -0,0 +1,2 @@ +from .reader_tool import S3ReaderTool +from .writer_tool import S3WriterTool \ No newline at end of file diff --git a/src/crewai_tools/aws/s3/reader_tool.py b/src/crewai_tools/aws/s3/reader_tool.py new file mode 100644 index 000000000..7cd734081 --- /dev/null +++ b/src/crewai_tools/aws/s3/reader_tool.py @@ -0,0 +1,42 @@ +from typing import Type +import os + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import boto3 +from botocore.exceptions import ClientError + + +class S3ReaderToolInput(BaseModel): + """Input schema for S3ReaderTool.""" + + file_path: str = Field(..., description="S3 file path (e.g., 's3://bucket-name/file-name')") + + +class S3ReaderTool(BaseTool): + name: str = "S3 Reader Tool" + description: str = "Reads a file from Amazon S3 given an S3 file path" + args_schema: Type[BaseModel] = S3ReaderToolInput + + def _run(self, file_path: str) -> str: + try: + bucket_name, object_key = self._parse_s3_path(file_path) + + s3 = boto3.client( + 's3', + region_name=os.getenv('CREW_AWS_REGION', 'us-east-1'), + aws_access_key_id=os.getenv('CREW_AWS_ACCESS_KEY_ID'), + aws_secret_access_key=os.getenv('CREW_AWS_SEC_ACCESS_KEY') + ) + + # Read file content from S3 + response = s3.get_object(Bucket=bucket_name, Key=object_key) + file_content = response['Body'].read().decode('utf-8') + + return file_content + except ClientError as e: + return f"Error reading file from S3: {str(e)}" + + def _parse_s3_path(self, file_path: str) -> tuple: + parts = file_path.replace("s3://", "").split("/", 1) + return parts[0], parts[1] diff --git a/src/crewai_tools/aws/s3/writer_tool.py b/src/crewai_tools/aws/s3/writer_tool.py new file mode 100644 index 000000000..0c4201e0f --- /dev/null +++ b/src/crewai_tools/aws/s3/writer_tool.py @@ -0,0 +1,37 @@ +from typing import Type +import os + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import boto3 +from botocore.exceptions import ClientError + +class S3WriterToolInput(BaseModel): + """Input schema for S3WriterTool.""" + file_path: str = Field(..., description="S3 file path (e.g., 's3://bucket-name/file-name')") + content: str = Field(..., description="Content to write to the file") + +class S3WriterTool(BaseTool): + name: str = "S3 Writer Tool" + description: str = "Writes content to a file in Amazon S3 given an S3 file path" + args_schema: Type[BaseModel] = S3WriterToolInput + + def _run(self, file_path: str, content: str) -> str: + try: + bucket_name, object_key = self._parse_s3_path(file_path) + + s3 = boto3.client( + 's3', + region_name=os.getenv('CREW_AWS_REGION', 'us-east-1'), + aws_access_key_id=os.getenv('CREW_AWS_ACCESS_KEY_ID'), + aws_secret_access_key=os.getenv('CREW_AWS_SEC_ACCESS_KEY') + ) + + s3.put_object(Bucket=bucket_name, Key=object_key, Body=content.encode('utf-8')) + return f"Successfully wrote content to {file_path}" + except ClientError as e: + return f"Error writing file to S3: {str(e)}" + + def _parse_s3_path(self, file_path: str) -> tuple: + parts = file_path.replace("s3://", "").split("/", 1) + return parts[0], parts[1] From 13bad2bb691bc74fdb7e8092820e9a18a57da34b Mon Sep 17 00:00:00 2001 From: MQ Date: Thu, 27 Feb 2025 21:08:05 +0100 Subject: [PATCH 284/391] initial implementation --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + src/crewai_tools/tools/apify_actors/README.md | 51 +++++++++++++++++ .../tools/apify_actors/apify_actors.py | 57 +++++++++++++++++++ 4 files changed, 110 insertions(+) create mode 100644 src/crewai_tools/tools/apify_actors/README.md create mode 100644 src/crewai_tools/tools/apify_actors/apify_actors.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 4d2ea7e16..6cd3125ff 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,5 +1,6 @@ from .tools import ( AIMindTool, + ApifyActorsTool, BraveSearchTool, BrowserbaseLoadTool, CodeDocsSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4a9786fe6..34b200c7a 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,4 +1,5 @@ from .ai_mind_tool.ai_mind_tool import AIMindTool +from .apify_actors.apify_actors import ApifyActorsTool from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md new file mode 100644 index 000000000..8172fadc5 --- /dev/null +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -0,0 +1,51 @@ +# ApifyActorsTool + +## Description +The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify Actors](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). + +For more details on using Apify with CrewAI, visit the [Apify CrewAI integration documentation](https://docs.apify.com/platform/integrations/crewai). + +## Installation +To use the `ApifyActorsTool`, you'll need to install the `crewai[tools]` package along with the `langchain-apify` package. Additionally, you must have an Apify API token, which you can obtain by following the instructions in the [Apify API documentation](https://docs.apify.com/platform/integrations/api). Set your API token as an environment variable (`APIFY_API_TOKEN`) to authenticate requests. + +Install the required packages using pip: + +```shell +pip install 'crewai[tools]' langchain-apify +``` + +Set your Apify API token in your environment: + +```shell +export APIFY_API_TOKEN='Your Apify API token' +``` + +## Example +The `ApifyActorsTool` is straightforward to integrate into your CrewAI projects. Below is an example of how to initialize and use the tool to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) to search the web: + +```python +from crewai_tools import ApifyActorsTool + +# Initialize the tool with the desired Apify Actor +tool = ApifyActorsTool(actor_name="apify/rag-web-browser") + +# Run the tool with a specific input, e.g., a search query +results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) +print(results) +``` + +## Arguments +The `ApifyActorsTool` requires a few key arguments to function correctly: + +- `actor_name`: A mandatory argument specifying the ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). You can explore available Actors in the [Apify Actors documentation](https://docs.apify.com/platform/actors). +- `run_input`: A dictionary containing the input parameters for the Actor, such as `query` or `maxResults`. The specific inputs depend on the Actor being used. Refer to the Actor's detail page for input schema; for example, [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser/input-schema) + +The tool dynamically adapts to the chosen Actor, offering flexibility and ease of use for a wide range of automation and scraping tasks. + +## Resources +- [Apify Platform](https://apify.com/) - Learn more about Apify and its ecosystem. +- [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) - A popular Actor for web searching and data retrieval. +- [Apify Actors Documentation](https://docs.apify.com/platform/actors) - Detailed guide to Apify Actors and their capabilities. +- [CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai) - Official documentation for integrating Apify with CrewAI. + +The `ApifyActorsTool` empowers your CrewAI agents with robust web scraping and automation capabilities, streamlining complex workflows with minimal setup. diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py new file mode 100644 index 000000000..75e0bd7b3 --- /dev/null +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -0,0 +1,57 @@ +from crewai.tools import BaseTool +from pydantic import Field +from typing import Any + +try: + from langchain_apify import ApifyActorsTool as _ApifyActorsTool +except ImportError: + raise ImportError( + "Could not import langchain_apify python package. " + "Please install it with `pip install langchain-apify` or `uv add langchain-apify`." + ) + + +class ApifyActorsTool(BaseTool): + """Tool that runs Apify Actors. + + To use, you should have the environment variable `APIFY_API_TOKEN` set + with your API key. + + For details, see https://docs.apify.com/platform/integrations/crewai + + Example: + .. code-block:: python + from crewai.tools import ApifyActorsTool + + tool = ApifyActorsTool(actor_id="apify/rag-web-browser") + + results = tool.run({"query": "what is Apify?", "maxResults": 5}) + print(results) + """ + actor_tool: _ApifyActorsTool | None = Field( + default=None, description="Apify Actor Tool" + ) + + def __init__( + self, + actor_name: str, + *args: Any, + **kwargs: Any + ) -> None: + actor_tool = _ApifyActorsTool(actor_name) + + kwargs.update( + { + "name": actor_tool.name, + "description": actor_tool.description, + "args_schema": actor_tool.args_schema, + } + ) + super().__init__(*args, **kwargs) + self.actor_tool = actor_tool + + def _run(self, run_input: dict) -> list[dict]: + if self.actor_tool is None: + msg = "ApifyActorsToolCrewAI is not initialized" + raise ValueError(msg) + return self.actor_tool._run(run_input) From 3fcc7b42cb41006deda1e829cdfab0c81134f2c6 Mon Sep 17 00:00:00 2001 From: MQ Date: Thu, 27 Feb 2025 21:12:38 +0100 Subject: [PATCH 285/391] fix docstring --- src/crewai_tools/tools/apify_actors/apify_actors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index 75e0bd7b3..af1e6d863 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -21,7 +21,7 @@ class ApifyActorsTool(BaseTool): Example: .. code-block:: python - from crewai.tools import ApifyActorsTool + from crewai_tools import ApifyActorsTool tool = ApifyActorsTool(actor_id="apify/rag-web-browser") From 5bcb598f75ffcdccc65df091bd3d1fc80b82ffd6 Mon Sep 17 00:00:00 2001 From: MQ Date: Thu, 27 Feb 2025 21:14:42 +0100 Subject: [PATCH 286/391] fix readme --- src/crewai_tools/tools/apify_actors/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index 8172fadc5..03e279d01 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -1,7 +1,7 @@ # ApifyActorsTool ## Description -The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify Actors](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). +The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). For more details on using Apify with CrewAI, visit the [Apify CrewAI integration documentation](https://docs.apify.com/platform/integrations/crewai). @@ -37,8 +37,8 @@ print(results) ## Arguments The `ApifyActorsTool` requires a few key arguments to function correctly: -- `actor_name`: A mandatory argument specifying the ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). You can explore available Actors in the [Apify Actors documentation](https://docs.apify.com/platform/actors). -- `run_input`: A dictionary containing the input parameters for the Actor, such as `query` or `maxResults`. The specific inputs depend on the Actor being used. Refer to the Actor's detail page for input schema; for example, [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser/input-schema) +- `actor_name`: A mandatory argument specifying the ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). You can explore available Actors in the [Apify Store](https://apify.com/store). +- `run_input`: A dictionary containing the input parameters for the Actor, such as `query` or `maxResults`. The specific inputs depend on the Actor being used. Refer to the Actor's detail page for input schema; for example, [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser/input-schema). The tool dynamically adapts to the chosen Actor, offering flexibility and ease of use for a wide range of automation and scraping tasks. From 867305540c2860433b75df448bbea35de0d6130d Mon Sep 17 00:00:00 2001 From: MQ Date: Thu, 27 Feb 2025 21:27:05 +0100 Subject: [PATCH 287/391] improve --- .../tools/apify_actors/apify_actors.py | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index af1e6d863..ec08a553d 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -1,6 +1,7 @@ from crewai.tools import BaseTool from pydantic import Field -from typing import Any +from typing import Any, Dict, List +import os try: from langchain_apify import ApifyActorsTool as _ApifyActorsTool @@ -38,6 +39,14 @@ class ApifyActorsTool(BaseTool): *args: Any, **kwargs: Any ) -> None: + if not os.environ.get("APIFY_API_TOKEN"): + msg = ( + "APIFY_API_TOKEN environment variable is not set. " + "Please set it to your API key, to learn how to get it, " + "see https://docs.apify.com/platform/integrations/api" + ) + raise ValueError(msg) + actor_tool = _ApifyActorsTool(actor_name) kwargs.update( @@ -50,7 +59,15 @@ class ApifyActorsTool(BaseTool): super().__init__(*args, **kwargs) self.actor_tool = actor_tool - def _run(self, run_input: dict) -> list[dict]: + def _run(self, run_input: Dict[str, Any]) -> List[Dict[str, Any]]: + """Run the Actor tool with the given input. + + Returns: + List[Dict[str, Any]]: Results from the actor execution. + + Raises: + ValueError: If 'actor_tool' is not initialized. + """ if self.actor_tool is None: msg = "ApifyActorsToolCrewAI is not initialized" raise ValueError(msg) From 35aff6e84edac4c87d68784e5cfa0b8d030d363d Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 10:10:56 +0100 Subject: [PATCH 288/391] improve code, lazy import, improve readme --- src/crewai_tools/tools/apify_actors/README.md | 6 ++- .../tools/apify_actors/apify_actors.py | 54 ++++++++++++------- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index 03e279d01..a29e3ae97 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -1,7 +1,7 @@ # ApifyActorsTool ## Description -The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). +The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify Actors](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). For more details on using Apify with CrewAI, visit the [Apify CrewAI integration documentation](https://docs.apify.com/platform/integrations/crewai). @@ -31,7 +31,9 @@ tool = ApifyActorsTool(actor_name="apify/rag-web-browser") # Run the tool with a specific input, e.g., a search query results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) -print(results) +for result in results: + print(result['metadata']['url']) + print(result['markdown']) ``` ## Arguments diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index ec08a553d..dc229214d 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -1,16 +1,10 @@ from crewai.tools import BaseTool from pydantic import Field -from typing import Any, Dict, List +from typing import TYPE_CHECKING, Any, Dict, List import os -try: +if TYPE_CHECKING: from langchain_apify import ApifyActorsTool as _ApifyActorsTool -except ImportError: - raise ImportError( - "Could not import langchain_apify python package. " - "Please install it with `pip install langchain-apify` or `uv add langchain-apify`." - ) - class ApifyActorsTool(BaseTool): """Tool that runs Apify Actors. @@ -20,18 +14,30 @@ class ApifyActorsTool(BaseTool): For details, see https://docs.apify.com/platform/integrations/crewai + Args: + actor_name (str): The name of the Apify Actor to run. + *args: Variable length argument list passed to BaseTool. + **kwargs: Arbitrary keyword arguments passed to BaseTool. + + Returns: + List[Dict[str, Any]]: Results from the actor execution. + + Raises: + ValueError: If `APIFY_API_TOKEN` is not set or if the tool is not initialized. + ImportError: If `langchain_apify` package is not installed. + Example: .. code-block:: python from crewai_tools import ApifyActorsTool - tool = ApifyActorsTool(actor_id="apify/rag-web-browser") + tool = ApifyActorsTool(actor_name="apify/rag-web-browser") - results = tool.run({"query": "what is Apify?", "maxResults": 5}) - print(results) + results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) + for result in results: + print(result['metadata']['url']) + print(result['markdown']) """ - actor_tool: _ApifyActorsTool | None = Field( - default=None, description="Apify Actor Tool" - ) + actor_tool: _ApifyActorsTool = Field(description="Apify Actor Tool") def __init__( self, @@ -47,6 +53,13 @@ class ApifyActorsTool(BaseTool): ) raise ValueError(msg) + try: + from langchain_apify import ApifyActorsTool as _ApifyActorsTool + except ImportError: + raise ImportError( + "Could not import langchain_apify python package. " + "Please install it with `pip install langchain-apify` or `uv add langchain-apify`." + ) actor_tool = _ApifyActorsTool(actor_name) kwargs.update( @@ -68,7 +81,12 @@ class ApifyActorsTool(BaseTool): Raises: ValueError: If 'actor_tool' is not initialized. """ - if self.actor_tool is None: - msg = "ApifyActorsToolCrewAI is not initialized" - raise ValueError(msg) - return self.actor_tool._run(run_input) + try: + return self.actor_tool._run(run_input) + except Exception as e: + msg = ( + f'Failed to run ApifyActorsTool {self.name}. ' + 'Please check your Apify account Actor run logs for more details.' + f'Error: {e}' + ) + raise RuntimeError(msg) from e From 884ea63b493ab34dc9b40c01b73e4aeee30c6d83 Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 10:22:56 +0100 Subject: [PATCH 289/391] other improvements --- src/crewai_tools/tools/apify_actors/README.md | 94 +++++++++++++------ .../tools/apify_actors/apify_actors.py | 4 +- 2 files changed, 68 insertions(+), 30 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index a29e3ae97..9917c8d81 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -1,53 +1,91 @@ # ApifyActorsTool +Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows with Ease. ## Description -The `ApifyActorsTool` is a powerful utility that enables seamless integration of [Apify Actors](https://apify.com/) into your CrewAI workflows. Apify Actors are cloud-based web scraping and automation programs that allow you to extract data, crawl websites, and automate tasks without managing infrastructure. This tool provides an efficient way to run Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) directly within your agents, making it ideal for tasks requiring real-time web data extraction or automation. For more Actors, visit the [Apify Store](https://apify.com/store). +The `ApifyActorsTool` seamlessly integrates [Apify Actors](https://apify.com/) - cloud-based web scraping and automation programs—into your CrewAI workflows. Whether you need to extract data, crawl websites, or automate tasks, this tool simplifies the process without requiring infrastructure management. -For more details on using Apify with CrewAI, visit the [Apify CrewAI integration documentation](https://docs.apify.com/platform/integrations/crewai). +Key features: +- **Run Actors Directly**: Execute Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) within CrewAI agents. +- **Real-Time Data**: Ideal for tasks requiring up-to-date web data or automation. +- **Explore More**: Discover additional Actors in the [Apify Store](https://apify.com/store). + +For detailed integration guidance, see the [Apify CrewAI documentation](https://docs.apify.com/platform/integrations/crewai). ## Installation -To use the `ApifyActorsTool`, you'll need to install the `crewai[tools]` package along with the `langchain-apify` package. Additionally, you must have an Apify API token, which you can obtain by following the instructions in the [Apify API documentation](https://docs.apify.com/platform/integrations/api). Set your API token as an environment variable (`APIFY_API_TOKEN`) to authenticate requests. +To use `ApifyActorsTool`, install the required packages and configure your Apify API token. You’ll need an API token from Apify - see the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for instructions. -Install the required packages using pip: +### Steps +1. **Install Dependencies** + Use pip to install `crewai[tools]` and `langchain-apify`: + ```bash + pip install 'crewai[tools]' langchain-apify + ``` + Alternatively, with `uv`: + ```bash + uv pip install 'crewai[tools]' langchain-apify + ``` -```shell -pip install 'crewai[tools]' langchain-apify -``` +2. **Set Your API Token** + Export the token as an environment variable: + - On Linux/macOS: + ```bash + export APIFY_API_TOKEN='your-api-token-here' + ``` + - On Windows (Command Prompt): + ```cmd + set APIFY_API_TOKEN=your-api-token-here + ``` + - Or add it to your `.env` file and load it with a library like `python-dotenv`. -Set your Apify API token in your environment: +3. **Verify Installation** + Run `python -c "import langchain_apify; print('Setup complete')"` to ensure dependencies are installed. -```shell -export APIFY_API_TOKEN='Your Apify API token' -``` - -## Example -The `ApifyActorsTool` is straightforward to integrate into your CrewAI projects. Below is an example of how to initialize and use the tool to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) to search the web: +## Usage example +Here’s how to use `ApifyActorsTool` to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) for web searching within a CrewAI workflow: ```python from crewai_tools import ApifyActorsTool -# Initialize the tool with the desired Apify Actor +# Initialize the tool with an Apify Actor tool = ApifyActorsTool(actor_name="apify/rag-web-browser") -# Run the tool with a specific input, e.g., a search query +# Run the tool with input parameters results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) + +# Process the results for result in results: - print(result['metadata']['url']) - print(result['markdown']) + print(f"URL: {result['metadata']['url']}") + print(f"Content: {result['markdown'][:100]}...") # Snippet of markdown content ``` -## Arguments -The `ApifyActorsTool` requires a few key arguments to function correctly: +### Expected output +``` +URL: https://www.example.com/crewai-intro +Content: CrewAI is a framework for building AI-powered workflows... +URL: https://docs.crewai.com/ +Content: Official documentation for CrewAI... +``` -- `actor_name`: A mandatory argument specifying the ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). You can explore available Actors in the [Apify Store](https://apify.com/store). -- `run_input`: A dictionary containing the input parameters for the Actor, such as `query` or `maxResults`. The specific inputs depend on the Actor being used. Refer to the Actor's detail page for input schema; for example, [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser/input-schema). +Try other Actors from the [Apify Store](https://apify.com/store) by changing `actor_name` and adjusting `run_input` per the Actor's input schema. -The tool dynamically adapts to the chosen Actor, offering flexibility and ease of use for a wide range of automation and scraping tasks. +## Configuration +The `ApifyActorsTool` requires specific inputs to operate: + +- **`actor_name` (str, required)** + The ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). Find Actors in the [Apify Store](https://apify.com/store). +- **`run_input` (dict, required at runtime)** + A dictionary of input parameters for the Actor. Examples: + - For `apify/rag-web-browser`: `{"query": "search term", "maxResults": 5}` + - Check each Actor’s [input schema](https://apify.com/apify/rag-web-browser/input-schema) for details. + +The tool adapts dynamically to the chosen Actor. ## Resources -- [Apify Platform](https://apify.com/) - Learn more about Apify and its ecosystem. -- [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) - A popular Actor for web searching and data retrieval. -- [Apify Actors Documentation](https://docs.apify.com/platform/actors) - Detailed guide to Apify Actors and their capabilities. -- [CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai) - Official documentation for integrating Apify with CrewAI. +- **[Apify Platform](https://apify.com/)**: Explore the Apify ecosystem. +- **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: Try this popular Actor for web data retrieval. +- **[Apify Actors Documentation](https://docs.apify.com/platform/actors)**: Learn how to use and create Actors. +- **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Official guide for Apify and CrewAI. -The `ApifyActorsTool` empowers your CrewAI agents with robust web scraping and automation capabilities, streamlining complex workflows with minimal setup. +--- + +Streamline your CrewAI workflows with `ApifyActorsTool` - combine the power of Apify’s web scraping and automation with agent-based intelligence. diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index dc229214d..bb3e39ea1 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -20,7 +20,7 @@ class ApifyActorsTool(BaseTool): **kwargs: Arbitrary keyword arguments passed to BaseTool. Returns: - List[Dict[str, Any]]: Results from the actor execution. + List[Dict[str, Any]]: Results from the Actor execution. Raises: ValueError: If `APIFY_API_TOKEN` is not set or if the tool is not initialized. @@ -76,7 +76,7 @@ class ApifyActorsTool(BaseTool): """Run the Actor tool with the given input. Returns: - List[Dict[str, Any]]: Results from the actor execution. + List[Dict[str, Any]]: Results from the Actor execution. Raises: ValueError: If 'actor_tool' is not initialized. From 975c71a920fe7708637b433ae70fe0f931f3d227 Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 10:29:50 +0100 Subject: [PATCH 290/391] fix example --- src/crewai_tools/tools/apify_actors/README.md | 2 +- src/crewai_tools/tools/apify_actors/apify_actors.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index 9917c8d81..288d54be0 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -55,7 +55,7 @@ results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) # Process the results for result in results: print(f"URL: {result['metadata']['url']}") - print(f"Content: {result['markdown'][:100]}...") # Snippet of markdown content + print(f"Content: {result.get('markdown', 'N/A')[:100]}...") # Snippet of markdown content ``` ### Expected output diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index bb3e39ea1..b5da1b7b3 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -34,8 +34,8 @@ class ApifyActorsTool(BaseTool): results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) for result in results: - print(result['metadata']['url']) - print(result['markdown']) + print(f"URL: {result['metadata']['url']}") + print(f"Content: {result.get('markdown', 'N/A')[:100]}...") """ actor_tool: _ApifyActorsTool = Field(description="Apify Actor Tool") From f329b0d9d2839e60dd24e46230cd8c9c1451716a Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 12:35:23 +0100 Subject: [PATCH 291/391] improve readme, add link to template --- src/crewai_tools/tools/apify_actors/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index 288d54be0..f443037a6 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -1,7 +1,9 @@ # ApifyActorsTool + Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows with Ease. ## Description + The `ApifyActorsTool` seamlessly integrates [Apify Actors](https://apify.com/) - cloud-based web scraping and automation programs—into your CrewAI workflows. Whether you need to extract data, crawl websites, or automate tasks, this tool simplifies the process without requiring infrastructure management. Key features: @@ -12,9 +14,11 @@ Key features: For detailed integration guidance, see the [Apify CrewAI documentation](https://docs.apify.com/platform/integrations/crewai). ## Installation + To use `ApifyActorsTool`, install the required packages and configure your Apify API token. You’ll need an API token from Apify - see the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for instructions. ### Steps + 1. **Install Dependencies** Use pip to install `crewai[tools]` and `langchain-apify`: ```bash @@ -41,6 +45,7 @@ To use `ApifyActorsTool`, install the required packages and configure your Apify Run `python -c "import langchain_apify; print('Setup complete')"` to ensure dependencies are installed. ## Usage example + Here’s how to use `ApifyActorsTool` to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) for web searching within a CrewAI workflow: ```python @@ -59,6 +64,7 @@ for result in results: ``` ### Expected output + ``` URL: https://www.example.com/crewai-intro Content: CrewAI is a framework for building AI-powered workflows... @@ -66,9 +72,12 @@ URL: https://docs.crewai.com/ Content: Official documentation for CrewAI... ``` +For a more comprehensive example with tool-agent, see [CrewAI Apify Actor template](https://apify.com/templates/python-crewai). + Try other Actors from the [Apify Store](https://apify.com/store) by changing `actor_name` and adjusting `run_input` per the Actor's input schema. ## Configuration + The `ApifyActorsTool` requires specific inputs to operate: - **`actor_name` (str, required)** @@ -81,6 +90,7 @@ The `ApifyActorsTool` requires specific inputs to operate: The tool adapts dynamically to the chosen Actor. ## Resources + - **[Apify Platform](https://apify.com/)**: Explore the Apify ecosystem. - **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: Try this popular Actor for web data retrieval. - **[Apify Actors Documentation](https://docs.apify.com/platform/actors)**: Learn how to use and create Actors. From 7148c52bf6b14069aeb6486dee35c975688e2815 Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 12:43:52 +0100 Subject: [PATCH 292/391] format --- src/crewai_tools/tools/apify_actors/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md index f443037a6..9260215cd 100644 --- a/src/crewai_tools/tools/apify_actors/README.md +++ b/src/crewai_tools/tools/apify_actors/README.md @@ -1,6 +1,6 @@ # ApifyActorsTool -Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows with Ease. +Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows with ease. ## Description @@ -65,7 +65,7 @@ for result in results: ### Expected output -``` +```text URL: https://www.example.com/crewai-intro Content: CrewAI is a framework for building AI-powered workflows... URL: https://docs.crewai.com/ @@ -87,7 +87,7 @@ The `ApifyActorsTool` requires specific inputs to operate: - For `apify/rag-web-browser`: `{"query": "search term", "maxResults": 5}` - Check each Actor’s [input schema](https://apify.com/apify/rag-web-browser/input-schema) for details. -The tool adapts dynamically to the chosen Actor. +The tool adapts dynamically to the chosen [Actor](https://docs.apify.com/platform/actors). ## Resources From 3df25e65d54da49c53979fe8bb0ade8d9aced927 Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 28 Feb 2025 15:40:08 +0100 Subject: [PATCH 293/391] fix --- src/crewai_tools/tools/apify_actors/apify_actors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors/apify_actors.py index b5da1b7b3..9fc2d965b 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors/apify_actors.py @@ -37,7 +37,7 @@ class ApifyActorsTool(BaseTool): print(f"URL: {result['metadata']['url']}") print(f"Content: {result.get('markdown', 'N/A')[:100]}...") """ - actor_tool: _ApifyActorsTool = Field(description="Apify Actor Tool") + actor_tool: '_ApifyActorsTool' = Field(description="Apify Actor Tool") def __init__( self, @@ -67,10 +67,10 @@ class ApifyActorsTool(BaseTool): "name": actor_tool.name, "description": actor_tool.description, "args_schema": actor_tool.args_schema, + "actor_tool": actor_tool, } ) super().__init__(*args, **kwargs) - self.actor_tool = actor_tool def _run(self, run_input: Dict[str, Any]) -> List[Dict[str, Any]]: """Run the Actor tool with the given input. From 9c7c7d3d75ea21632b6e7f786001bc58d9f576b6 Mon Sep 17 00:00:00 2001 From: MQ Date: Tue, 4 Mar 2025 11:10:41 +0100 Subject: [PATCH 294/391] apify_actors -> apify_actors_tool, refactor readme --- src/crewai_tools/tools/__init__.py | 2 +- src/crewai_tools/tools/apify_actors/README.md | 101 ------------------ .../tools/apify_actors_tool/README.md | 86 +++++++++++++++ .../apify_actors_tool.py} | 42 ++++---- 4 files changed, 108 insertions(+), 123 deletions(-) delete mode 100644 src/crewai_tools/tools/apify_actors/README.md create mode 100644 src/crewai_tools/tools/apify_actors_tool/README.md rename src/crewai_tools/tools/{apify_actors/apify_actors.py => apify_actors_tool/apify_actors_tool.py} (65%) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 34b200c7a..b1c3b6d68 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,5 +1,5 @@ from .ai_mind_tool.ai_mind_tool import AIMindTool -from .apify_actors.apify_actors import ApifyActorsTool +from .apify_actors_tool.apify_actors_tool import ApifyActorsTool from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool diff --git a/src/crewai_tools/tools/apify_actors/README.md b/src/crewai_tools/tools/apify_actors/README.md deleted file mode 100644 index 9260215cd..000000000 --- a/src/crewai_tools/tools/apify_actors/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# ApifyActorsTool - -Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows with ease. - -## Description - -The `ApifyActorsTool` seamlessly integrates [Apify Actors](https://apify.com/) - cloud-based web scraping and automation programs—into your CrewAI workflows. Whether you need to extract data, crawl websites, or automate tasks, this tool simplifies the process without requiring infrastructure management. - -Key features: -- **Run Actors Directly**: Execute Actors like the [RAG Web Browser](https://apify.com/apify/rag-web-browser) within CrewAI agents. -- **Real-Time Data**: Ideal for tasks requiring up-to-date web data or automation. -- **Explore More**: Discover additional Actors in the [Apify Store](https://apify.com/store). - -For detailed integration guidance, see the [Apify CrewAI documentation](https://docs.apify.com/platform/integrations/crewai). - -## Installation - -To use `ApifyActorsTool`, install the required packages and configure your Apify API token. You’ll need an API token from Apify - see the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for instructions. - -### Steps - -1. **Install Dependencies** - Use pip to install `crewai[tools]` and `langchain-apify`: - ```bash - pip install 'crewai[tools]' langchain-apify - ``` - Alternatively, with `uv`: - ```bash - uv pip install 'crewai[tools]' langchain-apify - ``` - -2. **Set Your API Token** - Export the token as an environment variable: - - On Linux/macOS: - ```bash - export APIFY_API_TOKEN='your-api-token-here' - ``` - - On Windows (Command Prompt): - ```cmd - set APIFY_API_TOKEN=your-api-token-here - ``` - - Or add it to your `.env` file and load it with a library like `python-dotenv`. - -3. **Verify Installation** - Run `python -c "import langchain_apify; print('Setup complete')"` to ensure dependencies are installed. - -## Usage example - -Here’s how to use `ApifyActorsTool` to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) for web searching within a CrewAI workflow: - -```python -from crewai_tools import ApifyActorsTool - -# Initialize the tool with an Apify Actor -tool = ApifyActorsTool(actor_name="apify/rag-web-browser") - -# Run the tool with input parameters -results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) - -# Process the results -for result in results: - print(f"URL: {result['metadata']['url']}") - print(f"Content: {result.get('markdown', 'N/A')[:100]}...") # Snippet of markdown content -``` - -### Expected output - -```text -URL: https://www.example.com/crewai-intro -Content: CrewAI is a framework for building AI-powered workflows... -URL: https://docs.crewai.com/ -Content: Official documentation for CrewAI... -``` - -For a more comprehensive example with tool-agent, see [CrewAI Apify Actor template](https://apify.com/templates/python-crewai). - -Try other Actors from the [Apify Store](https://apify.com/store) by changing `actor_name` and adjusting `run_input` per the Actor's input schema. - -## Configuration - -The `ApifyActorsTool` requires specific inputs to operate: - -- **`actor_name` (str, required)** - The ID of the Apify Actor to run (e.g., `"apify/rag-web-browser"`). Find Actors in the [Apify Store](https://apify.com/store). -- **`run_input` (dict, required at runtime)** - A dictionary of input parameters for the Actor. Examples: - - For `apify/rag-web-browser`: `{"query": "search term", "maxResults": 5}` - - Check each Actor’s [input schema](https://apify.com/apify/rag-web-browser/input-schema) for details. - -The tool adapts dynamically to the chosen [Actor](https://docs.apify.com/platform/actors). - -## Resources - -- **[Apify Platform](https://apify.com/)**: Explore the Apify ecosystem. -- **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: Try this popular Actor for web data retrieval. -- **[Apify Actors Documentation](https://docs.apify.com/platform/actors)**: Learn how to use and create Actors. -- **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Official guide for Apify and CrewAI. - ---- - -Streamline your CrewAI workflows with `ApifyActorsTool` - combine the power of Apify’s web scraping and automation with agent-based intelligence. diff --git a/src/crewai_tools/tools/apify_actors_tool/README.md b/src/crewai_tools/tools/apify_actors_tool/README.md new file mode 100644 index 000000000..f075a896a --- /dev/null +++ b/src/crewai_tools/tools/apify_actors_tool/README.md @@ -0,0 +1,86 @@ +# ApifyActorsTool + +Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows. + +## Description + +ApifyActorsTool connects [Apify Actors](https://apify.com/), cloud-based programs for web scraping and automation, to your CrewAI workflows. You can extract data, crawl websites, and automate tasks, all without requiring infrastructure management. + +**Key features**: +- **Run Actors** directly, like the [RAG Web Browser](https://apify.com/apify/rag-web-browser), with CrewAI agents. +- **Access real-time data** for tasks that need fresh web content or automation. + +See the [Apify CrewAI documentation](https://docs.apify.com/platform/integrations/crewai) for a detailed integration guide. + +## Installation + +To use ApifyActorsTool, install the necessary packages and set up your Apify API token. Follow the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for steps to obtain the token. + +### Steps + +1. **Install dependencies** + Use pip to install `crewai[tools]` and `langchain-apify`: + ```bash + pip install 'crewai[tools]' langchain-apify + ``` + Or, with `uv`: + ```bash + uv pip install 'crewai[tools]' langchain-apify + ``` + +2. **Set your API token** + Export the token as an environment variable: + ```bash + export APIFY_API_TOKEN='your-api-token-here' + ``` + +## Usage example + +Use ApifyActorsTool to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) and perform a web search: + +```python +from crewai_tools import ApifyActorsTool + +# Initialize the tool with an Apify Actor +tool = ApifyActorsTool(actor_name="apify/rag-web-browser") + +# Run the tool with input parameters +results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) + +# Process the results +for result in results: + print(f"URL: {result['metadata']['url']}") + print(f"Content: {result.get('markdown', 'N/A')[:100]}...") +``` + +### Expected output + +Here is the output from running the code above: + +```text +URL: https://www.example.com/crewai-intro +Content: CrewAI is a framework for building AI-powered workflows... +URL: https://docs.crewai.com/ +Content: Official documentation for CrewAI... +``` + +Experiment with other Actors from the [Apify Store](https://apify.com/store) by updating `actor_name` and `run_input` based on each Actor's input schema. + +For an example of usage with agents, see the [CrewAI Apify Actor template](https://apify.com/templates/python-crewai). + +## Configuration + +ApifyActorsTool requires these inputs to work: + +- **`actor_name`** + The ID of the Apify Actor to run, e.g., `"apify/rag-web-browser"`. Browse options in the [Apify Store](https://apify.com/store). +- **`run_input`** + A dictionary of input parameters for the Actor. Examples: + - For `apify/rag-web-browser`: `{"query": "search term", "maxResults": 5}` + - See each Actor's [input schema](https://apify.com/apify/rag-web-browser/input-schema) for details. + +## Resources + +- **[Apify Platform](https://apify.com/)**: Dive into the Apify ecosystem. +- **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: Test this popular Actor for web data retrieval. +- **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Follow the official guide for Apify and CrewAI. diff --git a/src/crewai_tools/tools/apify_actors/apify_actors.py b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py similarity index 65% rename from src/crewai_tools/tools/apify_actors/apify_actors.py rename to src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py index 9fc2d965b..37ae7312b 100644 --- a/src/crewai_tools/tools/apify_actors/apify_actors.py +++ b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py @@ -9,34 +9,34 @@ if TYPE_CHECKING: class ApifyActorsTool(BaseTool): """Tool that runs Apify Actors. - To use, you should have the environment variable `APIFY_API_TOKEN` set - with your API key. + To use, you should have the environment variable `APIFY_API_TOKEN` set + with your API key. - For details, see https://docs.apify.com/platform/integrations/crewai + For details, see https://docs.apify.com/platform/integrations/crewai - Args: - actor_name (str): The name of the Apify Actor to run. - *args: Variable length argument list passed to BaseTool. - **kwargs: Arbitrary keyword arguments passed to BaseTool. + Args: + actor_name (str): The name of the Apify Actor to run. + *args: Variable length argument list passed to BaseTool. + **kwargs: Arbitrary keyword arguments passed to BaseTool. - Returns: - List[Dict[str, Any]]: Results from the Actor execution. + Returns: + List[Dict[str, Any]]: Results from the Actor execution. - Raises: - ValueError: If `APIFY_API_TOKEN` is not set or if the tool is not initialized. - ImportError: If `langchain_apify` package is not installed. + Raises: + ValueError: If `APIFY_API_TOKEN` is not set or if the tool is not initialized. + ImportError: If `langchain_apify` package is not installed. - Example: - .. code-block:: python - from crewai_tools import ApifyActorsTool + Example: + .. code-block:: python + from crewai_tools import ApifyActorsTool - tool = ApifyActorsTool(actor_name="apify/rag-web-browser") + tool = ApifyActorsTool(actor_name="apify/rag-web-browser") - results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) - for result in results: - print(f"URL: {result['metadata']['url']}") - print(f"Content: {result.get('markdown', 'N/A')[:100]}...") - """ + results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5}) + for result in results: + print(f"URL: {result['metadata']['url']}") + print(f"Content: {result.get('markdown', 'N/A')[:100]}...") + """ actor_tool: '_ApifyActorsTool' = Field(description="Apify Actor Tool") def __init__( From 7718df5437458c655c00b64481de9c70acf06ac2 Mon Sep 17 00:00:00 2001 From: MQ Date: Tue, 4 Mar 2025 12:45:52 +0100 Subject: [PATCH 295/391] minor consistency improvements --- src/crewai_tools/tools/apify_actors_tool/README.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors_tool/README.md b/src/crewai_tools/tools/apify_actors_tool/README.md index f075a896a..aafd77c54 100644 --- a/src/crewai_tools/tools/apify_actors_tool/README.md +++ b/src/crewai_tools/tools/apify_actors_tool/README.md @@ -4,7 +4,7 @@ Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows. ## Description -ApifyActorsTool connects [Apify Actors](https://apify.com/), cloud-based programs for web scraping and automation, to your CrewAI workflows. You can extract data, crawl websites, and automate tasks, all without requiring infrastructure management. +The `ApifyActorsTool` connects [Apify Actors](https://apify.com/), cloud-based programs for web scraping and automation, to your CrewAI workflows. You can extract data, crawl websites, and automate tasks, all without requiring infrastructure management. **Key features**: - **Run Actors** directly, like the [RAG Web Browser](https://apify.com/apify/rag-web-browser), with CrewAI agents. @@ -14,19 +14,15 @@ See the [Apify CrewAI documentation](https://docs.apify.com/platform/integration ## Installation -To use ApifyActorsTool, install the necessary packages and set up your Apify API token. Follow the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for steps to obtain the token. +To use `ApifyActorsTool`, install the necessary packages and set up your Apify API token. Follow the [Apify API documentation](https://docs.apify.com/platform/integrations/api) for steps to obtain the token. ### Steps 1. **Install dependencies** - Use pip to install `crewai[tools]` and `langchain-apify`: + Install `crewai[tools]` and `langchain-apify`: ```bash pip install 'crewai[tools]' langchain-apify ``` - Or, with `uv`: - ```bash - uv pip install 'crewai[tools]' langchain-apify - ``` 2. **Set your API token** Export the token as an environment variable: @@ -36,7 +32,7 @@ To use ApifyActorsTool, install the necessary packages and set up your Apify API ## Usage example -Use ApifyActorsTool to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) and perform a web search: +Use `ApifyActorsTool` to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) and perform a web search: ```python from crewai_tools import ApifyActorsTool @@ -70,7 +66,7 @@ For an example of usage with agents, see the [CrewAI Apify Actor template](https ## Configuration -ApifyActorsTool requires these inputs to work: +The `ApifyActorsTool` requires these inputs to work: - **`actor_name`** The ID of the Apify Actor to run, e.g., `"apify/rag-web-browser"`. Browse options in the [Apify Store](https://apify.com/store). From cad804e87bd2f7e915aefa9d0930da67fee7b1d4 Mon Sep 17 00:00:00 2001 From: MQ Date: Fri, 7 Mar 2025 11:00:54 +0100 Subject: [PATCH 296/391] update readme --- .../tools/apify_actors_tool/README.md | 47 ++++++++++++------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors_tool/README.md b/src/crewai_tools/tools/apify_actors_tool/README.md index aafd77c54..7e465037c 100644 --- a/src/crewai_tools/tools/apify_actors_tool/README.md +++ b/src/crewai_tools/tools/apify_actors_tool/README.md @@ -1,16 +1,13 @@ # ApifyActorsTool -Integrate [Apify Actors](https://apify.com/) into your CrewAI workflows. +Integrate [Apify Actors](https://apify.com/actors) into your CrewAI workflows. ## Description -The `ApifyActorsTool` connects [Apify Actors](https://apify.com/), cloud-based programs for web scraping and automation, to your CrewAI workflows. You can extract data, crawl websites, and automate tasks, all without requiring infrastructure management. +The `ApifyActorsTool` connects [Apify Actors](https://apify.com/actors), cloud-based programs for web scraping and automation, to your CrewAI workflows. +Use any of the 4,000+ Actors on [Apify Store](https://apify.com/store) for use cases such as extracting data from social media, search engines, online maps, e-commerce sites, travel portals, or general websites. -**Key features**: -- **Run Actors** directly, like the [RAG Web Browser](https://apify.com/apify/rag-web-browser), with CrewAI agents. -- **Access real-time data** for tasks that need fresh web content or automation. - -See the [Apify CrewAI documentation](https://docs.apify.com/platform/integrations/crewai) for a detailed integration guide. +For details, see the [Apify CrewAI integration](https://docs.apify.com/platform/integrations/crewai) in Apify documentation. ## Installation @@ -32,7 +29,7 @@ To use `ApifyActorsTool`, install the necessary packages and set up your Apify A ## Usage example -Use `ApifyActorsTool` to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) and perform a web search: +Use the `ApifyActorsTool` manually to run the [RAG Web Browser Actor](https://apify.com/apify/rag-web-browser) to perform a web search: ```python from crewai_tools import ApifyActorsTool @@ -60,23 +57,39 @@ URL: https://docs.crewai.com/ Content: Official documentation for CrewAI... ``` -Experiment with other Actors from the [Apify Store](https://apify.com/store) by updating `actor_name` and `run_input` based on each Actor's input schema. +The `ApifyActorsTool` automatically fetches the Actor definition and input schema from Apify using the provided `actor_name` and then constructs the tool description and argument schema. This means you need to specify only a valid `actor_name`, and the tool handles the rest when used with agents—no need to specify the `run_input`. Here's how it works: -For an example of usage with agents, see the [CrewAI Apify Actor template](https://apify.com/templates/python-crewai). +```python +from crewai import Agent +from crewai_tools import ApifyActorsTool + +rag_browser = ApifyActorsTool(actor_name="apify/rag-web-browser") + +agent = Agent( + role="Research Analyst", + goal="Find and summarize information about specific topics", + backstory="You are an experienced researcher with attention to detail", + tools=[rag_browser], +) +``` + +You can run other Actors from [Apify Store](https://apify.com/store) simply by changing the `actor_name` and, when using it manually, adjusting the `run_input` based on the Actor input schema. + +For an example of usage with agents, see the [CrewAI Actor template](https://apify.com/templates/python-crewai). ## Configuration The `ApifyActorsTool` requires these inputs to work: - **`actor_name`** - The ID of the Apify Actor to run, e.g., `"apify/rag-web-browser"`. Browse options in the [Apify Store](https://apify.com/store). + The ID of the Apify Actor to run, e.g., `"apify/rag-web-browser"`. Browse all Actors on [Apify Store](https://apify.com/store). - **`run_input`** - A dictionary of input parameters for the Actor. Examples: - - For `apify/rag-web-browser`: `{"query": "search term", "maxResults": 5}` - - See each Actor's [input schema](https://apify.com/apify/rag-web-browser/input-schema) for details. + A dictionary of input parameters for the Actor when running the tool manually. + - For example, for the `apify/rag-web-browser` Actor: `{"query": "search term", "maxResults": 5}` + - See the Actor's [input schema](https://apify.com/apify/rag-web-browser/input-schema) for the list of input parameters. ## Resources -- **[Apify Platform](https://apify.com/)**: Dive into the Apify ecosystem. -- **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: Test this popular Actor for web data retrieval. -- **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Follow the official guide for Apify and CrewAI. +- **[Apify](https://apify.com/)**: Explore the Apify platform. +- **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: A popular Actor for web search for LLMs. +- **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Follow the official guide for integrating Apify and CrewAI. From 5af2108307bcc7c2cca23c154acf02093a31540f Mon Sep 17 00:00:00 2001 From: Shady Ali <121682078+SHIXOOM@users.noreply.github.com> Date: Sat, 8 Mar 2025 09:35:23 +0200 Subject: [PATCH 297/391] Fix: FireCrawl FirecrawlCrawlWebsiteTool for crawling. FireCrawl API does not recognize sent paramters (HTTPError: Unexpected error during start crawl job: Status code 400. Bad Request - [{'code': 'unrecognized_keys', 'keys': ['crawlerOptions', 'timeout'], 'path': [], 'message': 'Unrecognized key in body -- please review the v1 API documentation for request body changes'}]) because it has been updated to v1. I updated the sent parameters to match v1 and updated their description in the readme file --- .../firecrawl_crawl_website_tool/README.md | 11 ++++------ .../firecrawl_crawl_website_tool.py | 21 +++++++++++++------ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md index 46d011602..f0bf66918 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md @@ -31,12 +31,9 @@ tool = FirecrawlCrawlWebsiteTool(url='firecrawl.dev') - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. - `crawler_options`: Optional. Options for controlling the crawling behavior. - - `includes`: Optional. URL patterns to include in the crawl. - - `exclude`: Optional. URL patterns to exclude from the crawl. - - `generateImgAltText`: Optional. Generate alt text for images using LLMs (requires a paid plan). - - `returnOnlyUrls`: Optional. If true, returns only the URLs as a list in the crawl status. Note: the response will be a list of URLs inside the data, not a list of documents. - - `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children, and so on. - - `mode`: Optional. The crawling mode to use. Fast mode crawls 4x faster on websites without a sitemap but may not be as accurate and shouldn't be used on heavily JavaScript-rendered websites. + - `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on. - `limit`: Optional. Maximum number of pages to crawl. - - `timeout`: Optional. Timeout in milliseconds for the crawling operation. + - `scrapeOptions`: Optional. Additional options for controlling the crawler. + - `formats`: Optional. Formats for the page's content to be returned (eg. markdown, html, screenshot, links). + - `timeout`: Optional. Timeout in milliseconds for the crawling operation. diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index b95199c84..878063953 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -68,13 +68,22 @@ class FirecrawlCrawlWebsiteTool(BaseTool): timeout: Optional[int] = 30000, ): if crawler_options is None: - crawler_options = {} + crawler_options = { + "maxDepth": 2, + "limit": 10, + "scrapeOptions": { + # same options as in /scrape + "formats": ["markdown", "screenshot", "links"], + "timeout": timeout + } + } - options = { - "crawlerOptions": crawler_options, - "timeout": timeout, - } - return self._firecrawl.crawl_url(url, options) + + else: + crawler_options["scrapeOptions"]["timeout"] = timeout + + + return self._firecrawl.crawl_url(url, crawler_options) try: From c19591a6893c595f0292a41a160e2804fadf55f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Sun, 9 Mar 2025 04:01:12 -0700 Subject: [PATCH 298/391] updatign readme --- README.md | 153 +++++++++++++++----------------------- assets/crew_only_logo.png | Bin 96169 -> 14642 bytes assets/crewai_logo.png | Bin 99658 -> 14642 bytes 3 files changed, 61 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index 43cdc9b57..d68d5ff73 100644 --- a/README.md +++ b/README.md @@ -4,140 +4,109 @@
-# **crewAI Tools** -Welcome to crewAI Tools! This repository provides a comprehensive guide for setting up sophisticated tools for [crewAI](https://github.com/crewAIInc/crewAI) agents, empowering your AI solutions with bespoke tooling. +# CrewAI Tools -In the realm of CrewAI agents, tools are pivotal for enhancing functionality. This guide outlines the steps to equip your agents with an arsenal of ready-to-use tools and the methodology to craft your own. +Empower your CrewAI agents with powerful, customizable tools to elevate their capabilities and tackle sophisticated, real-world tasks. -
+CrewAI Tools provide the essential functionality to extend your agents, helping you rapidly enhance your automations with reliable, ready-to-use tools or custom-built solutions tailored precisely to your needs. -

+--- -[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb) | [Discourse](https://community.crewai.com/) +## Quick Links -

+[Homepage](https://www.crewai.com/) | [Documentation](https://docs.crewai.com/) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Community](https://community.crewai.com/) -
- -## Table of contents - -- [Creating Your Tools](#creating-your-tools) - - [Subclassing `BaseTool`](#subclassing-basetool) - - [Utilizing the `tool` Decorator](#utilizing-the-tool-decorator) -- [Contribution Guidelines](#contribution-guidelines) -- [Development Setup](#development-setup) +--- ## Available Tools -crewAI Tools provides a wide range of pre-built tools, including: +CrewAI provides an extensive collection of powerful tools ready to enhance your agents: -- File operations (FileWriterTool, FileReadTool) -- Web scraping (ScrapeWebsiteTool, SeleniumScrapingTool) -- Database interactions (PGSearchTool, MySQLSearchTool) -- API integrations (SerperApiTool, EXASearchTool) -- AI-powered tools (DallETool, VisionTool) -- And many more! +- **File Management**: `FileReadTool`, `FileWriteTool` +- **Web Scraping**: `ScrapeWebsiteTool`, `SeleniumScrapingTool` +- **Database Integrations**: `PGSearchTool`, `MySQLSearchTool` +- **API Integrations**: `SerperApiTool`, `EXASearchTool` +- **AI-powered Tools**: `DallETool`, `VisionTool` -For a complete list and detailed documentation of each tool, please refer to the individual tool README files in the repository. +And many more robust tools to simplify your agent integrations. -## Creating Your Tools +--- -Tools are always expect to return strings, as they are meant to be used by the agents to generate responses. +## Creating Custom Tools -There are three ways to create tools for crewAI agents: -- [Subclassing `BaseTool`](#subclassing-basetool) -- [Using the `tool` decorator](#utilizing-the-tool-decorator) +CrewAI offers two straightforward approaches to creating custom tools: ### Subclassing `BaseTool` +Define your tool by subclassing: + ```python from crewai.tools import BaseTool class MyCustomTool(BaseTool): - name: str = "Name of my tool" - description: str = "Clear description for what this tool is useful for, you agent will need this information to use it." + name: str = "Tool Name" + description: str = "Detailed description here." - def _run(self, argument: str) -> str: - # Implementation goes here - pass + def _run(self, *args, **kwargs): + # Your tool logic here ``` -Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic. +### Using the `tool` Decorator - -### Utilizing the `tool` Decorator - -For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic. +Quickly create lightweight tools using decorators: ```python -from crewai.tools import BaseTool -@tool("Name of my tool") -def my_tool(question: str) -> str: - """Clear description for what this tool is useful for, you agent will need this information to use it.""" - # Function logic here +from crewai import tool + +@tool("Tool Name") +def my_custom_function(input): + # Tool logic here + return output ``` -The `tool` decorator simplifies the process, transforming functions into tools with minimal overhead. +--- + +## Why Use CrewAI Tools? + +- **Simplicity & Flexibility**: Easy-to-use yet powerful enough for complex workflows. +- **Rapid Integration**: Seamlessly incorporate external services, APIs, and databases. +- **Enterprise Ready**: Built for stability, performance, and consistent results. + +--- ## Contribution Guidelines -We welcome contributions! Here's how you can help: +We welcome contributions from the community! -1. Fork the repository -2. Create a feature branch (`git checkout -b feature/AmazingFeature`) -3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) -4. Push to the branch (`git push origin feature/AmazingFeature`) -5. Open a Pull Request +1. Fork and clone the repository. +2. Create a new branch (`git checkout -b feature/my-feature`). +3. Commit your changes (`git commit -m 'Add my feature'`). +4. Push your branch (`git push origin feature/my-feature`). +5. Open a pull request. -Please ensure your code adheres to our coding standards and includes appropriate tests. +--- -## **Development Setup** +## Developer Quickstart -**Installing Dependencies:** - -```bash -uv sync +```shell +pip install crewai[tools] ``` -**Activating Virtual Environment:** +### Development Setup -```bash -uv venv -source .venv/bin/activate -``` +- Install dependencies: `uv sync` +- Run tests: `uv run pytest` +- Run static type checking: `uv run pyright` +- Set up pre-commit hooks: `pre-commit install` -**Setting Up Pre-commit Hooks:** +--- -```bash -pre-commit install -``` +## Support and Community -**Running Tests:** +Join our rapidly growing community and receive real-time support: -```bash -uv run pytest -``` +- [Discourse](https://community.crewai.com/) +- [Open an Issue](https://github.com/crewAIInc/crewAI/issues) -**Static Type Checking:** +Build smarter, faster, and more powerful AI solutions—powered by CrewAI Tools. -```bash -uv run pyright -``` - -**Packaging:** - -```bash -uv build -``` - -**Local Installation:** - -```bash -pip install dist/*.tar.gz -``` - -Thank you for your interest in enhancing the capabilities of AI agents through advanced tooling. Your contributions make a significant impact. - -## Contact - -For questions or support, please join our [Discord community](https://discord.com/invite/X4JWnZnxPb), [Discourse](https://community.crewai.com/) or open an issue in this repository. diff --git a/assets/crew_only_logo.png b/assets/crew_only_logo.png index f769da538a5f2f1fa615293e175a758c8b3ba978..bc44909d0566b2c0466b7be688cad684b7801c8d 100644 GIT binary patch literal 14642 zcmXwg1z1#V*Y!}+-AFf5(%s!9-5pAIcb9ZG(%mH>DN@qiB`|dNe|Vqop9_==&dj-E z-)pbE_BxTuic-i31PCAy2w6s2TonX@a0h;Ng8KmcTjrJU0Q?8Y&Hqc1L*-X)n66KS%AlUR4nOd7nyBLtz00+Z1C-){*~K1{ ziLKu3c4x=oyPkM?sg`SYD3BWjZHfLWhC3qg>XwH*7YvBpj{R7%U~-Vl_4jI_$3}4L zixf_`za?F2trpqZ4Sc|eQ(pc5?iWOo1cHVrPC$OT1xvc#@ah*bnLnp!ZsKO;4PV>) z%5^>PgJ=RK2R<@7w$|0vZ4MifV%%~Fb>Vui^Y-3A(t%QOakq@mcDBd$L9Bxx8UDMf zA3mbcIF%_qerHQ@hKbXCo}sJGHp^p11oW^wEL3SD}lpG03vy!UHRZnxhY zuD{EP&V~3_t^mh?|5jX*CqV1>&Y=S=4wDufl{Hg3H}Pmg?XTTy_N5nnT&8VlcdQjY z!Qo+ngNy@NU`czB!v?ZWVb7cXd;4Ps^d@S=4>aY=M$0%NsI#VzWra+`e1_TmN@6q_ z^Upbvm{q`38S^?rftMe#r2JyM?>U6pVrg+{!69`iwJ~gfE{$I35WZT1;HOZpF@Nwf zl^-8^FQMEjBLf4&^_A~s;cI=KGO8>|3MBCH>5$_lPF@5AmhwMN&t9BBz((NMfBG*P zL`jlJqpaUfF5=5!YsN+f2I@_b^uGaDBT&bcp8B6`7^3%#|Nl#cD4B0hmK1Wt#|im9 zY>an@jl!g{Ok8zt(8yQB{r`(LsF^7>oFN&$>e468%#_vStVhEiBG)whFBn8gxDs*o z{lSIA?Cy}0-+J({aFxFA*5DgQ{%^8^Ly5SSot@>d*0FJ0q5myc8oGe5Z$(_=|K7jN z8q{Lq`kVrPnt7GlrmLl;rAOU$HOO<+eSr_({bKOn3NT@=zXqMIm&HT=V!5#KMd`8HfA%nCm|8HD= zN(PJ5wzI*ChHX~&qEa2+=}Z}eZ`VrOt$C0dO)>8qWuW*ScgAwOnE2Xp(hy0%xU`m} zxgcrwrJO=a`na4E^nbV8qqy9je}0$^MsdHx>yWnn$5wLlR-0Dw8A6n*q!gkOBkBFa z(6=EUV?Wv#Q=>l3Wgm7-zu5735-{iRE365<{-HnjMl z9;V?k{L#-yYV0!bL6@d)2LrD?viohOptzyelaPPWEx@^yhO7kf!@v#JU*R$o6SwW` zTUQ{sm7(8)D~AhSe%=jcxk1w6*yAZ~=c48l3Zw6a>LCw<*au&ycEW&|WVlDioeJIP z#wYTBYy^wIU=Ex5o>J~dCi{Rv-vzzcZ=a8Z5)OY;XLUdzUBV+b>rz9BPP$|C>Vp2{ z8!kc^@rD>&QwB(x9&iPlqn+wLPWJU}SN0Qv;ALxeGYcNpy&oUzPm7eWLj_=ZQ_~D_ z5WPvDCx=MG{y5PTb|bKw5J!rm!9k-TeL#nU{yQ}lI59C$vCWx>?t;ztr-0lK3lq6HT04XGd*w>kl;Z{yYy1e>rbX+ZgCY1SmcjcPoYhnqZ9k>q3L zen%9txx5lto9fL>bIY99kT+(Zv+JQ?!o3FfGKo+6tOyI@hu_rSX&u~^0MQSA^sDpX z@dMzM155Q!@-=&LJV7YMnu_}HC>#L0&|xiSrlLx^YXHx7Hr>N?M zZEfk)mlt@-Fd7gZkiE3+Ii>wX+~<{VvWvH|{`asKW6jBSAi09oe;OhV3JOK~x-j~n zbx<9|bM+P>oG2aN_BPxhR_rtK1DfQwYsUswv)w!PG4uy~X+rp1@ylRHC(`HK2ej2p z8*bQw9g4RmR`|X%Z^Y|XXPh;VAEa|k4z#fl(P=M{T-rAHjX*c_k7#@y)ClI?eGF@0 zgqN;vmNjm5MTMxgHa^&`)5Dn!Ixx``BV>rzYO%Y(ixDyqBna=$>V0sbX%J}Nspqk| z6+Y<4B8(?mJ+%43QbUX+z-Z3fXf<&pkTluciBBnNV@d-9Q{&9N z>A>yqlt?u#NpAt_UCGx?Z1T1Z5}ytJMf&U~1l)o=0Zx=`OEA(K6*M~NjuN!zFWm3@ zTSSugA-}UaKu*O7)@c178LX%-L-GpUYaD2+Gl5?UdK!XLq}`O<)|W~A6o~~u{Gq3_ z?r62V9MLA4D7CHgeACz0Aw5uE;+yW(0X@F@B*Z;IzzE)|C;Gyscl}N>QY5tTMyn;) zsqR&P%~yTY!y+lu(NSi(H%SR&*fHEYB)C|%Es?7SA1gb%eT(wO-{a$<3&*iSy(}oA z9=``r$AM6TnfiO~(gqdJf1Y2mg+m6@%h+hK6>7Uu-1!@|Xz_2W?NZRo(`~_VynX`* z!HlZf#ZC5c3PC$@Af@twq$03bj{P49J?<8!`?@k^0_#AO{_Fg!I~US?62f)6Bq0q{ z8qgwU+jq;$%LsXsTviidF#Ut2Zo4#LJUzvbCR9QQf*QV(V7V+RB{ zpv!>?>im;u5|VM=(eJ)1M#kv_k(4^9{ir8kDOl|apo2xeXll}EL67uWG!~SUe4wJD z+8!S!g)`s^A+h@?j$FGHnYZ=8sQdTnNBx-;^*4C5s)j=&b{8FP?C9A9iu2f!r25kY z|0K-EE6{0}m%p1Jbb)-`=l508^jqr7W@i;e2(VYo5q2u5V&hq6%uj0RM!KIvXICdt(mw09uhh5#bBDko1@50h*Hi{c(Cj=7I2<5#JDI; z8Pw6it8b?g^>n2NCYGUTL+U$A+*G1TikGD@t}c!v4D_dCNT!~imL&;@ZNoxif$t?j z^EPNkcp5|cXklJFyg_n$s{!EBs;48z#cSuhFQnEr zWOPbmCQBzPjUV4`e^Zn^;`|uP=7mubxXW%bDH-YV8Dc}RIgGP`n)JW#h)aY zH+c@1Nqz|naSbW`TjHyvZW@K}wUx)u8?7cYI#=kbK8}0%fKarUC?w&}y>`x>*sVJjy?;HAcdloTp&c}q(g;CN?dW`05Rb#!FuGN=U$ve8JXO2zfoKEFa2$V!Uk z@wg%wto@Yg$gqIe6(*Mju(9neTxfTB$r?CI{a+VMJWclb>U+~D=GU)VI5>9q_e~aR z^mk8q;55!5}Fp89kyf!3!Wa$4TdpsM+W02w$B@xv4x zx!Y*C2P@&I&YdL~;;M66ivzKbi$gTULj!4O)v%Al+HBZ21xDK*t6V&~>VLU+ ztN2Qe%;9Cmu)!k9si_qiUJ()ou%REpW~7*hT|Kw z=xqEak4SGG7X-eHx@nv8MmT85vZ^YxsE9=joerb!PYV&lJ0dbN@N){qKb-Lq%oy&O z;QG7n?MsI_%v)Es+&Xs>u$s=KCF?@x*EIu+QwdtA9Z2e(l^`>T+wmJWJ z;;jsY;#{QEl@yPRfyL&3ShWEC8QSnoYP?tje|0fDPKCOpfRyQWTOwp-_lN-I4cFPf z6|ivpL&rlBK2)@%e+T1yc{7>M<3UNeGi``ljFcXD4`a!$a{ynE( zCvL+t4AO^Su=r=Z7{p?NV>Rk_|7=qdp8#z4jcRLXH4Tj+^MSef`4X>F@o(b*^kZSy z$7qU_br~?Ls;Oa53Vi+?fNS6Zp(OY~d6S;Q=Z@hh5H2@=LD|b6tsgV+_NEOcE@mfd zVG=oE)7H=k<2&oZTrluvl#Ib6%=s8A>Gb*H67_SDwNt zS_DMIj35@s!el$vtx03+dnLy**jkN$ns|%;60M zPVv&y7kY4p>F{Rv$intKyblf^y@KM=)=^VExBDyPP5b-Y6}Wg@U1dXR5OE!cpGZbd zlc2?2Abx-GHlWzfg(7<8j~@=MsQC1AVOE|NOVrxBoJpwGz?f!%ks(?CnI;;z<>Sq9 zy#tv5NfQCEAV7-xySywbKn)}O^m^OoEm>W+a^eQ8#H#Cpe%Pr>q%v&?Ffd?ik63^T z&dBpn-^!$|7Xbm`d{#|K<*qS8vZ(E_B+KkhMvo0N+w33SA2>85se|;Cf2FRWp~`nu zRmISCqp>PLb^Fs&IAXgU0R-Su`ji+949sGiBZaE!b)*__vaGGGpMUBY8)hU$V)InE z%5peC=JB{8b!Yh^TbM6y!*_yyE%+AmReZ9>jSa(BV^PEEuYpAtbXUNv@KSqCOewu^ zsEvw3kTAaU`YD6$CXCm|$;oN+nmuzk%Lv}>upu@fPk*{Q<6-~@Y#vwD)}}Fa$>fu~ z6CTj7;Tp-s97P%(9UXaGH{ttJO20jkYd@ZVRKwTA4k2b{)GSWhXt zURf2E_u+TC*H&;sz-_etzF~#iPK-`qUY-kCbY5Cr9m8_FBsv6!@$aAC)5=C+e!g%@ zU1_|m$d!VkS_lz{y0p~T9K3p1q-pAV2~vRrcRt4mU1y#`;bBTNc_xWyK$1Tv=a217w(qgs;T%$}3Vj{P36 zgTV}J0<`Un5k{F)Tk7iSQ`4k?c6I7WLHt@y2?iLMm9=%k%~MwB*uWO)*Xl( zSGPgw0$+bs^t%Ti`Bo_r5eQD(<>EgF%tIE9EC+Vtm{Xb}4hcA9sr1x%NPW1+y?YMa zgnHc+ARxd8OBAxOLVabu2;5frZNLA%-0xgkFgguEV&B;Pg8eg5@Dt3PLbt{Y$F$TB zhvR&~Qy!%?_p5B4?N-?4Y|Z$#G_ho1Sp{B*+`jd{e*+$_4vJb@G}Od{hIb061PR1V zO)1hIK=U`Uva)r~2a0MiYFJ4E5xG6D*b*=>QAwphMwIgP{dssr;k5XT^C*Z72grn( zpM-kKtSZDQ$(Juv^u}Ud3i0uG#zSAk zx~&;!m7Y2(65T?8&B#2pP5C1sYr{QOw@O8C$q{N~jeSQLky_K*(((y`f!R1-vWS+E zQK3%Ks^5Y~nw6Fc7^HmkCYi@i12HC&W;1hhnX5y}zrF=&51zFPSy)_1(W|R^CDAgn z08DgCOk(^d`my)a>2|`pdus$=x7%^yiZDJtJ~r|UoFj##QX8XKj6_DTS4N^=XvGfe zaNHF3B-3zY?$sB6p^U`Xu<|u2QYEWmu7hAmCJB3B&!4sqk^yvH@x^gsZ0RXyf%svzZy`j=LPvi_NBaR}DfP%E`0(LF*;a%Z zaejGu9o$w9A= z$c^v>EH$_9Rw1|>s7{JsZx5Ps4Q==G+Ju;a_oR7UMX^JW(t`s8EA=BYGYM*tg$QDS zVgseT?(5*8OXg83YY+i_dTDJAw&|G*Z^od7M;xh__u&=!^lj!hcckM?XBsm+ojB&b z=D5js(F3>@xrN2z-_NE9t|vw2-*Z@r~S zQcOH}1PcH$Z;#}|#aLuqTZ(rvUzB?{_we2`v<5f%J1?$bqE3~w*!y`Gwb5O0@NC0y zM}1k?+?>LLXb3Qc!P3?Fc}acaavGw~fi1FgF$n>H00Jb(!a6z4V1e3OFKq|RtJ653{&lKGkx7nZhC8PL405{h zzNBngNiw;~a;`51HBYNfs8RSD-s}W}Ky`vW`4J(s&gfTANE`!3+kEQJMHTt6`esHCV zIf*$AxqM{7S^uU(){wL`*h%G!FSTN2Kd(?h(MCTN?MeW};taxW0vrSI;e~}k9UFU2 z_zg^Z#2^jAHiN>cvO=wv=H`OBI%Rh|=jJk*(x>~Kup4}xy7$vzqTVBUhG_s6bHzW)fC6%5u$|m zC&TFfGE#1t*=n}Al6FUJarc?vx7gP5tNfZ$vt6=OXPEQ2ubSQ@46T?-Z~j0Wt4Vvh+aV((S4mQt=_aQ&po9BzpiMhFX7-NqWdj($Tj#E1le0}!WG*TK;v@mJ!Gao<~Gy|0@ z;07C=5kp1g4V&F3|-y`R!7KNKiS&u^pmvkmE;+T--E*l6l{%G$HbuoI+UrvwE-jvtu( zx@>LSv~7Nye0xT9C5hCutd{fz*AKU?qmqt6{KlsNdNo@nONmjkvSWLAR7FEn{c5Y) z?Pk+b@k7_*T$y%hNTkfC5`T@FKQ?Cq`WqX0fDvU^%zmN_j#*;DTwuE%Q%(1+na)X| zu(0bK-A_B3#04V8QH_UI(OzF!&t-G2pcoK(GQU$fhHZgkp{ zJ-x$>l{7z?2Z&U^IYu)O?&=)Mo=lVkc1-v(O!#6<_(pYmxlmI!Uo=c8iBmV`t98|% zxNZ#O6g?6Tzt&b(s>Kj*osgTn*zJ1p`<~cTxpl79bV5Z-7JYF%;K`^M-slFVcdGG} z!Dd-@myneetxPLkU1z58uV~dd8E8zcIQcYd@J*Isb1-FS6Vliv)jP<!Sd&A0DY@O2O$K5kQ+rwtgSyJ+BQ#))lqTChUq2YR!xK~^ii>> zIx^ERYZdOgbt3Ef*?d}!Rym%*W=q!aro~d_7_o9OPF;0P=_B!^{;uluhNqBSVg`L) zMO{3%VW&aMI7)>XDPG3oMey;X0=#cAqn2#T+yq;E2SJ+r{vwMEV&HBwFe9y1XVQx3 zKVDXzS|>IPf-E{oiX+-5!cGG@_e*oVPkza~^0^w`-rjsyUUsb?->Eoz4^Tk6gi;@g zRQ9aJWQ#MT$!lof%>4s6!Vd8nDqKOl6Jjcpd*us9sD$cnX4k*N<5D+jI=nB6lXY?* zq_OUNF3Hj0lxb9R6ic^8q^mXf^1)IX&OQx&B$btw>TkYXnwo&%XE8^fi_Q~{LX^06 z>ee|mJ#E4*W<`po95cTis^QW_mn}%eg1*_0_K;p>OoAT5=Jh8~DtL z4Z%>;oJon}@a(zFZWo8ZrbB6r%`>653CK%xT(N|iYr^>mW93hEqnyqJ-{eyqL8i!hu% zTr|=TEE0&s&ncci_*C@a?NM)fqzV)j%ugzA{zS_A4g@41K)ATM6&4jirKF@ph?Bqb zc8LH@6&yMMMbz(UmHh5HH_8R)87gyHl;z;rL`jr#+Q4a+AQ0Z-yBFXw#wO$f0d;l7 zHMJvOY7CGX+V$e|hiqW)OSp}GV0F_>Dc{4?e#K~&M-hi{)0SY2(J{9vl(G^hM+dZE zwy)yTHO4Kq`U(i4W=Sb2ppx#qq?k@={$_1faT{1=q;7gCyjtNV%eI zG=xwjr8t;i5eBWg0I$59oSX&<)W*TJgDdNa{@fUoB@^5P)xh%t3*dxhGTh>`Sva&a zRpPZ^u_cY$E;UHZTzGnVmV^j)1N8HTXV1LZUB9hNi_{b-Yg*@Oq2~um(&gi{X|!K& z=@t$mHl|1I4u!w9LmBC!N)AS75}yHueP5p_J3Bk2{O765U!7}qYBZ`T)UQ^Q6bRz{ z_1Q5K;;6u&fO0{?*0$Qln3<05R|q_2n_iwel5P}!C^Jx_xe-_-PXOTJVY4H7WN8Q?RszRvp-*gd{^D-w<)2f0d|bCbV zxOe?feLqLndNMG}C0AK2qC54NzHf;GoDg7B7MeK;s=iKTM2;B=Vqy6=?yN;F>%G08 z)1P++dd1JN{8v?7)yhye-6KnS&K+p8$um#^gP67ZZsYwiD0@K9cDwb_NkI)SSvPF% zcTAY@k#>aHbr*n&v)JXbj*Cm0@nXYyuMC~>mOKJC^+4nU z5ej&HnRB#gq0lf!p?hZr=;FZNU-{5LwA&M-@0&b{we%kRjNcQTv_jheI5U1upIWVv zdD2yriu-O6xVvu;%M|&<-w;U6E&0-HgUE3;BwwjNLX-qFQI|KoNx0TMiokxV#N$XL zHG1lmX1LT4OzOP|p`zR_|GtwgWaIatlpULmH!VZhhAEkv7STO?`$SLAf&(>c>anM7 z{LlqdgO2fWD&Drjcsb{P6j0%u5!t&_6aH#fAZ|_dl8)x6&#l1`5fOz6*5Wn}=*RL6 z9b<CIH^A>Qn#zc25PaHyfPJUXX!d_4shu{-k8EX@gg<<|;d2>20ta3fD@m(N z1Y%5kXB$WsLoM{}ru!3_*oLavLiWUdOD$@*ZTtHccG&6vDLhD_SvJ3bQh48nV)4Wc z=s=|JW8mZE>sCP5E{ytas~!u0_S;PjJhnnTbS>84nMvC8wDOnw6gz9uGw?Z2Pf6#^ zj;^)gscxFaODc^hLF#c$;BTg4KMK+p*+y+pLq(%b({WpDqkDRWcOWW9G&D8U5Sh!U z(BSgx>ONcdp}=7Wj$Q8noT;g)DgEeJBPyANex@+hn>2kZ4*o+h{NA%wQhC=-_m2Zk z-9?C&za{aqf$qx-?S@Hn1*_@zV4s1fWeSKg1RTzzRD1--o+rD9P&`SM<2W)i-Ui3f zD($gJfR`3FHGN^xufHeMY22{YjsLj7yt7ntk9;{L||Z`$k62PKnNe1 z!{4ktj|Dw|`1P$w6_9_2d~eE$-x-YUTs79R6`8A9-(A%Nb1C}VwjKL7=8OaagW84U zf-Y4%$8x?8y^3&O;aEeSDu#W(5(hYOBHOyk%$aD^;9Tlg(zz)1Z#netTu@ejk0O|_ z(oRY~u0x0ex?)|=m!qpvsLWmGefbh(pJ8jPnBe|YSV#LB;QW~B2x zXzS_4ygc0%6&HtOIB6l&Ki{2g)E>L5@!`t^Tez@uayl_W_XWA;l$21P-U03ejAyVR zcJY*;MYw)LVqxs$rf!ycoJbsd);RS?6zkc%tdtZO0AMq3{<(E--FPW{B!h;8*f-k) z>{svBt99|At`!Id$U2M&c>et5po7Sa=n_!D$?U>HQFk{nP*eaVWJ6`5X9?`-(Ge@y zZ*+82uFmA@@{(ObJBP=4=-fu-Pyi%)WV>!xz-Bq2?A(Nk6xqyu_+2yU+p$yrAvFzP z^C86P_G={TvG8%lqW?5h#(gfIVgocZE^gA8}N(>U#;gg0RxC)#ot%sXUhBjq@w+deR#~ezmGW_|oWfo#FE97FTXU~>KQmAZ@x3{k8>>n}#AOl( zoDR(=^(F)3qT^Ajh`a-gn3LH92qITcFJa+JUgY$8Ot?6VI>i+TnD)u6^s>|{oe))O z+#2lU(S}RY4!N|KED9n5L52eH1|}g&BVG!RR*B3ITTxS-Nb(V zdS*m+c6h8*!B@n2jqmmNsAAvOS6GWcVKVgrAGAHOk!9Br$vftAT;1BXD+wr+fI}vD zyIJO2E6@q*-@?1}IDkRKQ63ZkTFqAe%w#ZE)u8n`hesZe586Io+k}PhOh)uN^B#rF z*w~B-wG#m0H8Hohx5bQ{M<*wPTs;rtB0wAOZ=KdT<^Iirq0+yQ;_sQA#iozHF84g1 z&W)E$&$6;@by_4@nhPe+Ef%!R;)sG5>~k$1Z;G$4Z=ALNx)gW9s!Q!P`cC9<$Pse- zq7^nBcaf%gbBQNa*tKSLs5Sty;An+-)_Bzh0>h7=KXa^`?H@gZI`?Ut;Q363 zlb|tgKz8BE+!D6gv!D>XB~vsTWl$0mn z`*QC8YKdijK?lv298o%)^&3$`F%_s$`;(;75M>ME3-!jJK(p05C5p9zegM zyUQN{xLB_LtEYrdbrrK=ebBNUAeq;8Bqk6K^eHOqj%DIbX8-Fg5TdI6+?f$E zD3IIoT|JcvBQn3HW&->~-~<%N8P?AJkN1-U_8T93G1Qs!MBYa<;)ohzMKtKc`O66H zc%DuH6^XG}t0nV#aLWOY&kd~1KLJ(;_w`M%qZayUOfUpyp_3R4A_0r=%)s@>ZE4i{`M?1m=ZtS>L~euMD?m^m#2!)(*J z&CBBrxAT4s-5ucVz+yBOu3J~i;WhswKs&XvQpxR!04IW+*z*If*WhwrDpCXzKD*li zeKLDfGiwVFmrgz2C~OE{4iRzdH6^b-DO_RkV`rCETyf?|d_~v%KRI4=&Ho*wmDXfr zPr#K`U#F+1f3sL)ldV*_IvB_7dBJ+FcB9x8GyAEz+L-A18U23iQuhNS1Qs@S0nnIO zI7nmEwcut~{Zp=1T2{%sr`2SoG7a+zba1u0JnBCgr-e{#Ad_KvVK3ELji|=-BpRge zjMp??SmFEx7R+>XWTuSQOYedoa59ejHjzqB5K(ePDT^r?3eEdo)AjKGmb&=pKttZ60dvr_mWwT|T~Z!9p5(lyl3=QPb1Y zWcr)4OG}Zi$MXOYnpMsWao~;_jK+2Tng&RbeT5Ry?EZFBg%UC!*B)Q|fH6xsV)IBW zW|`CqL7{PR_}4#p?zgXld802;gFAfAy-o9c4|lEAhp&hk(IU`B;U7NgTV_yQ?s^F0;wq>=+m&V8U&fHD}z2YElz9Lz2g;nh;tg;C2Ogcsq3tBWzFew2# zI)E%a_NPvzlHPv!AiZXift@%V)vDVcN^Wzl>B&70^}zHpPq!6n*3i-1tlpTSxE7w{ty7?b$)Mly<%*n&%6M{oTs_AtA22hyuY1ze{JhoauRnizdAB;MalnG$ zl1dXz_d;?UK=qa+pjXFv`Ro+!*#UCjFS7hlrb>n+3tj9$Z@~!gm##Vf12D{EUp(BV zHTpZx<#Grl#V-Gu&r~c|$7`|9EI>QSNZ~p$soUJFfxgNn>TYaiB-i#(`*BTY+d66N z?X4T&AYuvcW=caOjt{3(p0dIMT2z4LO+$}m<$PE2;?Li$Z_Svg5Q|~yR4xB!HaXg+ z<^j9$t=hSFc|_!*bTu}z<&v?Y!TodUd^c6OqP}vIU6lYW0CV#nN6x!@d&U66Nf-3x zb>;X6K%g6Vv4A!kFtq@!vFW{By+#XUyPg*}fT${%UwL_Z*L&aD%@v}+ZN(*S!ll+> zzCw~B|K&WA!vQ44IvXcWyPWR@I=f2O*~7PSldezGY{PNC2I_+x^Go|>AAn=P6NExNY6Tt+QbwsS2GXx5G83h;F^VnmYSAywQ) z%W5P;ENd2_VBdhYli>J?HA-jT0RvIz8dIkg4PY|^t|N$bl40Wq$jO1O&cMjhHeTdC z9W8AyW9ur$bTYHTw%5mF`q=D0ejkcZe23xhPO(*U$5 z1L^fLY!6{H&(@XL#6%3S(@bSrtat+!J`$eT@;Cy2SR2xdKdI0IUtE;O_#%dY@9~mA}VMm9Tmf4zKn*$lp ztZuXV!vlA6Lwh3@EQ5CgapZfhBLm?O{o8{et_bGmZ@f_9#A`8`Rlo?fS(1dT1D$Ks zy0X*nWpaa_t!No!6)s8!03so;3+<~HZ36o3=r1CQ;ZL9}QZKEKx%c?F8x0akS=9Ld z-?jHh4M8?=B?5Ov$VS>UUKy|(Q5jR0M2!A8EWVh*kYxD9}SH9I#)KH+vdCPHB9v9`fv$LzMb*GFKFUT#zncN?oB*OH6rWl1OzvK-2^z9UuEc3__mQ`cvnTapb{& zAL_7VT=pu^-GwDL0ofU_V1%h>dmojl{5v^j^U1{mjlxdapn|mG1`y=ZD3VBJxRBsb zr-zU~)}3wsD%QEALrd-Sf16NkhlNBVjv@(q15;u~7NjXT{X7~Ieh0>*$fT}W&UV>7w?ejX@I3E8uO+Osi9b%!)k zrUTe4HF<1tp^dpHcIFA;hz^knCwV|*E!{|IlIFpyBLCn1p5|G2JyoRAw3}y5F~Ggc z!;OuCI-T_HroO<(cWBpZ8{SFC0f&FK)`=sVMrPbr^dRW<-9`pD8{pjhf+!Is3N_p< z(0Q@k(-XAuA{cV|_JZj8FCCKf>d5hT9#{M=MFv3)JCX-_UoeOCF2t|Gjf+3@+-~=9 zf&ve)*=HwnvyO2+mk7l_O16)A7EB+jGTJ!&1)XV8wx0Y59~V%Qp**lK5@18zKA{X3 zKO6v68$hgs0Jd75wSym#ddT)Wa;-lCI4klL5L z!P<`K<;1gkK2~mmwP)k83ZDYmsaMbyWDzp%#mZ|7rGgLz(X z$WDcQj__c>Fl*@JN2i^pnnH`c>i~# zzCX4Z@5g0D!G9Cjt-H7kZ;^kw|(UIMtH+se-#Ax^}iPxk^LqCEX}OcN1pnCaw7fcSAC!B}-J32+WD_#04J%7za( z5eA>{fW^1_rJz5N2;(G&$E~4>4tC*%2~DFlBK6*pY8CL{eM_NpBo?hXPijln@S;`Y zG!wSMWixqWHF+x$^U#VHZJUI`Tdna(Bza1scpN_Rh#Nc|2P%@&s-Vr&4)kR`QD3x} zW+)KnCcRPH1sc!*WkMDyr6>FvV%la_pFMlt&I7Vb{Pu*_ZwBX@!3uw=0!vgH9kr{pdm9sXtK; zi^xvG2Q+d2us+ANHgmwp<;8CgP*vmJFJegZnT=afl;2}tmjD(_iji^!3{{xP| BZl?eM literal 96169 zcmZsC2Rzm7`~NYsNA}DN;Y3tMHgW6`GLD&yG7};rWMt(an{YC-$zByImFzttacm(n z|M%zU`#j(8@AW^g*YkRMdd|5&_kG>h^}gQM`?@}ny4tGbB+Mi*7>pdDrmPQxos);b zaD#{l!QT+@-fIW{B6d?Vc>sfvo`?Ry>7NQXg26ao2xWy^J{hZ1fu6T^8}^QSCDLM* zaf4AG)YTj+t_eCWRf;-tU2{HGsnMdDV799zx%J%Fm{-M!E>3@kDC-qt;LaN&`fphT zb5r4Hem1j;G*ulNht0(2uN`>np8SX;##I!#5~I2Jhns?s=lSQ=)QxGejS)H2h3%=4 z3sx5otIJnB2yk(5!1mm2PF~9KXTk3Og8ua$;)Gul+M;} z9M2HNR;?4FNvZxK_zeseG~YlZ4}L!DHo&uh*_YquLS*%x+!$8YyGHxmb1Ag<4^NBV z+r?$xU9y9Ez5aCS*QD6rwd>=*1b?V#@wM8HEHc9!I!WZ8Gzn_?-A z?hziRZqj1Eugr$S5B{*qHTu!yqJUw)PhSg@hW4ubl;~1#h|)lJo&g2+dl81%bw#XO z38kdka-L+S<7Y~whGZXmZx&8>H0fYO2(aI?l!t-K3Q|kI=$KC=O>M$Qsu6OiDfhgl zLs<{|1&#ykxvXmcpq}yi8|YV!4iNiE0%J(Y@{`BTzRJVyD~R!I9XCU@Muq-56>mP9 zceDPlI*_lLc&?E*#><)k7H zjzcBb&zqFJ*-(}6*s>VCMP1CfZ?Uc15xyPlLrKDsPZfjZM};RPF$cTiUEvkr(;obCPuq|4Az?uv_ixmf7Lyz>OaN{!^Jx;t>R0K)3xV zdF$oU7c~=VDzG_r=*aSq)koyje#ub#;#Mxb-)ihw*K<(>-b8i-|KK5cXhz3YHGW3? z1KsA!1XOm|i`}wIMW>?l$wqM6Uw7zW&UwdO=^}%{-lugaEUKWs(ohDCe~c8o;G`@< z($0nbMdf8={{s}TWj~>!TxN{d}~4Jt3QeMkH=@q1F~I5M>I9n znN8xe5vk&c$h`1?wf7&HKJ7aVGsw!p9|b?t=uvFExS2ZZfP4#lWR`~x?UaZ6HUE28`;Gu{Cagni~EL^M6eCJg7 zNvu+^ZfVnd1+h(QdmX3Pxbz0So7bDQemQ?xe~n}0N~O*$zvW?+#R|*bcpoM!Sk@}vu(5WsyCWYus>j7 z)tk6wS&$V=@o-~I=>rq@`5Ww+NzXYj`(Fn-axljE+J;^MPm0b{ORDnvzkb4E5H4OZ z5gFeIY;)@p&#`&`t9x5>^E^5`@xId#kN~}Rnp&?fNfQN0yKSNQ%+^sYs%A~B zvD7bMEFgsiC1@sy8}!$r^gqAAWqqKGH=ve9lb{xqq6mfOEbAKz*?ZTAmUh#a*Y-C~ zFK&kB@m?g4o*<8%CihA>j~btzb#L57oxpEq;J~a*-D=i`P+zZ^le$TDxHqKp%JApM z!D4ShOIZbO^9sKZANJ%7+4b~t*xv44iAlazRFD@E|0Y6^FAUwG-CwSmS-g6L`)=qv zK0dIOGD8lZO{W5R^d}_zVk|6H3A>KvfOtg$&wv|Scg_8K-z={+zjQXMTW+6j$?w6r zFDT)%r@kVuAlC06aAnn($!8*BKA zf)QSS4OEXCo733vOS#wkrOcLoTF0{47>}ToS&s&BryU1eeLty7j@e6$1h84pe%>iQ zFW)TvwR~o_r9VgC`|=N@{2yp)rz_u%9;1Pr7(c8r+b@kRwcMm61 zWv2**DH~0=l^-^ee_yfe=f2XsSY1%9v{KAIz;e0mv#d^g5Di}cEW7^ARpw59Bn(FQ zIeCW^VflibeN;ThcSs%49iHke{H42feP<>oct8_9k2g3wevW-y`XUGBRQ(_(g$H+( z0_&JEXboSDuH6M7@na7Q4a&D!|5`7W(bx5ep|?7^l|?X_#{D_@Ul+Oi`h_~KI%c@UO9mdxR7uT*)D z>(GG=8w5CJSVM}GoAy)QS;AmmIFp+mZoUKNMi8XD`AH9+rPP~yYyJ4=j92cy~^sh|73E^~YxFM_F~U>9gWjmsduxX4ZK za5NH?))^xT3zCr$bPPjRYcZTJ%lb+&^r)qH+qZTj1irSio}Vy47)-hLN;3`pqegsz zp1#vY#P?SJ0~=$0D}1aunQk`9hBV34-5-^&;|sd1w&~+(r1;kTJAY=Lp8KtSS^ri` z&uYd@LRdtrBkDw{zwXDksVtdHw(+B*Z71jZx6Cxe*fJ@8cr{(Ud*t)#uC|^Y0`dAr zA;Y(JJh!^2JsoY*PSlP+dX6<}}?NkEOWJdrlM3Bqb#me9pK}m;53XE^NdGGPdl6ye5@N zc|2I?-Bxtxl^PT`Erd}2jVvLPE_>}ulet_-bMw{rds*jrg{fDlf+EhqH}*7sXV(E4 zJxdeBR)3ZrCe~=VZ9lIrMWEq`Oq7#y9Y4&<0mA71)O=D&He4Mt;|h(Pl9~OmR;1BS zw0t55Z3GprEl6yuCYaBE!(GvvuB*8JV zV>kBi{|)8ou~y^-|9#FCX{syd2y^jsf;zD&hs!<{Z7D#)?dm5b?YS2hsJuzsz7>qj zOC6J3WsYI>io%sYQcK>+K)VV&E0MUF3qsHNk$cFvnc3L)$O!ia^F|ICWP^%In%*A` z>9#r^A_5^SWKuDeYg~%mQSp_M-dSSvc33pqy}?G#kx$?qb4B~QO|1Gj9|j5b>+)hJ zzwa4HUKP%pV<3@#>6I(LZZVFg^xF8j$H*8zyR}qu>Q&i#efw^cS2%m3xQ^xqOz{j# zOFa>;2=&OFr^WQQoO;5@Z}5_Htn*&vb^KiKF0~OHdFhe>Cu8lf0%8sIM#G7IiU?-q z{f0~yoV2EFS=4!=(GWz-RV0NQYu!XC@OI?yWBo4{p5+)1?Lj~weMHC^MInR3R+Q~# z>;cNjRJ!GWJi`5m3Fg+^(ZTO`Q~05>ell7can&Xi4jX+U9MU_aaiO2(va^lhIaY3o zOCw>{4yA-%`@gbC882ejx_3*>w47-A{t{;O8_Gm%Xn2yfNCPHs4#knSJ_LX+`_Ig(FGw#Z*V;* z0h^nNrr?Owc&W#`{oy-vu30wsX z%?=f>BAJ*jWgd$qY#hd_rST4v!@9As6s_5aHm1u-rv&l8oZ7iJuuBXR?OOA?_j!Ly z{Yysf>*gDFKkt;(i`&o)+K%ZVF5I&!cW6O>u~}sa8U-;aiRio-bsNe=N0X+zyT8+f z@Z3W2<5Xu_)}<2W-A~J&T$Q;Y9#*dkC3%s@7?de*CR^k?*QQ?{wO6mJ&N;w>v@+{0 z6Ga<2p^H`2gHJ6Pi=ao z=2#&JZ{JiFI)Z?qNwCbgt=E3H4-~dG(PefN>Q2dk@Y0K(rw9@Qe#)ujCmV7oy1Ks<23#L`4nM3( zeWbm0zC69&r{@i!jPD}b>K;DVdn+pS>-=-{*c}zA2pyUGARE$_S6<7~b4rKb8=0HtyMLtdT>rBuD0o>1m6#@}V)@I52a$I(?nMvX zuAe3~H4*;K$M5ef_m6V$&ed#$l+DYI>;^QA&p67gKfo`MUzMyhOPtE(zR6F*!JLoq zMG$sPoqvpbmx)9%4*1CQ2!}S-N64I$(VBvktH4KvleKJT3J6dJ1CUjs%kMhQ{fM^x zepiSfJ%8u$@j*@7>1a1#N`)J48xX56VP6_60YR@k48n3M~t>OiW2ci_Uim za0_ZQJXP_Giv3KDKyuL%h&-l44fEp)UekU$OH*+j_?KiQ|RW6$HtBZ`TXCjk2Bgo zl1)F9k`N+~{qd@rHsbXx(NrQL!PD9W&h4ohmYFO6ILdd#UdKLs{bVZ+ogH7U=izb?<6lOqjlVOiCECTskxh!?Z`(+LExKycy zaJRlcRHDp)1=<`aK8{{yRXSJ0rxx84aK@bbZATNJ5_#?_pycE z{?N&@N6Pv3@oh8#psqju5$~WmEpZ9MH+Ou_=8VB5!ZH5T0jBx&HJWsFGiA}Qg}i*; z_G#0j`Vl0d7bO(0i%EyK(VlSt^)qAz@SJ9An4QKg|^t<%Tyg}uW(cS%^-4|;k` z8DDnvj*=Y?qJ($q{KqI*A67M)LpGv~O!n`1HHT$~W@P+!>v1v0a~@ zv}qjFsafJj@Ib&rwc&;MRI^;+Ln;?C?@U_O`0N`EWAMyIa(Q`N5G~`A_h__jjzgDm z^{Ak)FAgGHnV%%3Mzg=1_GuY}lftQ24V=GxiEpLlx?h;>s=~`_oyn4M+oP&itb^y; zou5A(w$f_j*9q+|^Rvc4j`hCU3Gt#f^}WHqq+ef4W7QmCOu9&ir$Ep=Io)%Q5E2QC z;O@L_ef`Avq*oxu6#E9GMQM?oE>^hlzvP&Sh`C~AvVYSZH6(5g329|ya3Uan!Yt;J z!Gn1n3MK2XV(Et2!*4HVKb^yDQ|-$4S`4`qNBTNTwD3O+VUHKrh@K6{gY=moA45(I zK?z0E+NU1d6XI2nB&T+NBmiG|IYASVI~y_8A+6RxO+Y3gN-KOxAkvdBbbu|XMyJ0C z`PxlM3C32v0wwFz?)!walz8h# zS{NS@KJLMf&F{8Td8IKi2TSiL4M81+>4(_@qR|z7p?vVO07TGeWm-Uhzd)NJ;?jlB zxkVR7H``4*djj$;AjW5A^5_wiRWCofp{}WZ1^L33EWmAJJ|ukbmZho$GcPfF+`8{v zq}qo5X(rl?>k&>nmauTJ`*R3%J`1stJQB(lGnQ+)$Jw@tM=?AyQ4)CYc=`TBC(S{` zz0h24FNnQm0&LU1c!}H9__DuRpwB^I&&HnWfxbJRbep_os+1w{4rmV#P^h`$1hH{Y z9RcAa59lYseMFyxh5ET!O1K|IG9wn^n_*UTSnqTdnm9B8$2FP^%%)QvBLUgz0+A~9 z29b*DY6RiUMVI6nmFNzo$Lh(0#YAzy<-qM8A1mf^oQjO{1jpk8Zcs8s{e9dNj8$^EA_U#mY+jg8{=Q>>@b^!yGOb}8O=g55QuS^D2>0FGJ5|{vo~pOh ze-Knb0CFatv$ToN!-lF+(g$1#H7#yRZAIRb|AFiksM6LCF*<`I6D%Kl-lIF7*aLnD zJP3obstm~1VQca#+Ny1H0y@e7uM{pdd!(N(-h8{a>N2jp1a?c!758;|_xK7*W)`dG z`13#}uJQR|YWMR;3YfkxM>-aczVsiBI>P19NZw`E5eX3EO%tulSW!4+zZsowOq7MO+bzp3_g^FV&U@1Pk}~Rot%I3MB@$7`%mQMK_Qv* z#(Ao^?Y@tK@C37F@*Tke`_H-y9vWGca@3ss_rtL!X&m9D3-?n4g_Cg0?(k-bD39b- zEr>w^ey^E~3o8K01TX;u2xl-$x(5Di^PbdVAVz*f5H>2;X^e9@hKzYo@nt26n^BX< z{VhK{tv06BST}q^vnov6dJDQnECwzZ9la3Q5lfYn^!4p2iI+$N=D}8>HYQKe3~y!7 zAoo_cnm{6q0;10tFhNV1WAW=$FbNB{43{&QgZ%Pzv@#^!#O;P+AGIWiiGD%UH4WbZ zhpQ1+hfrKsF6g5G5gjb%ZSz{)po>4x>T2dTWO(#7ID(7p*c^W^_SR2Ts-N8IoSoN0<{75oUeAQ;z zOJ#gngYTos242CeSDai&^1P?1uPZpXSjV%E-FDusoI=4X4s>Td&o=T$&3b*qeSB%+ zKL~J$AH=_Xc>HU3x2W`$338)uWo>-Cev*w~kt8hl(afK+|IloWsyyJ3%tR_TjD`yU{J|BiaA2iWh;81~Y$R7CxZVyzJE&&-%@ z#{D}|>SHx3fIc#$ zTH-B1MLp?fxA!GWD&&!CYw;4}!@$EZpPrHY_Tz$s?77^UeZrG}!6QnS=s+4#2Oahb z_OM2w{z`cuN6{x9w^MVMfL}QtL^I`s7Eh?-CgT*SZ)&gBiQHs(vxu#3_5mpcw#?V4 zDhWZ2^Ev|>&`+2*^u8`46w*2y7pfd%Q`v3lf^}veRJL-9D{?NqL#}NxT)hu zQz{I_<#i*u9$ev@G%y=W0D}HgQ{#II$;-_ajVjiDfE9h!K>24g4@ zH`Ga~4gTG6z3Ye}$RnJ#EpVNk0}z==@P=MR%C7$l1hqNzk-8?0Q-uZU!{L)0O$_H#egdM=~EAY?{QEPqrB986{5*+WP%e_laLM$ zO}p$?zA=!sFAMM;QRKZyXW_&P;sHAgz?%vuaI3-d+jBap9eF<(;z&)TFu|i$`AePy zOCKbVx2LckAbEP;EUujE2p@L)74UdS8Ysw90{A65k{UGnUp2BGBJ!EKdSXV}UBKX^ zplk*Dl*;_fp!$Y%)ITE@dEjxTk6y3c0WdOSEK}l6a}jW!2&PI2`s=uy4l!sCK}vjV zH8p!5*m(R{U>(AZYMZAZIx(QhE&6F=57N{DVSaU%9hHP9gj$LSLcgqe2}%lUF`1YE z0<3@L(GY-~Wi*ri^qYduT7g;W%&$QWq@?a~bpVa@W|1)uV$Fo(>(4Rh25V9Vrw&UAP?uZoo?Y|GL_EN`rOyD5MQHC(#T3qi1nLy^SD_wol2VyPlqS4=;P63%2 zQc;j8=_MtXT&%K}K4Ob3Gn5mrjLREVjoa-1!O#0Gm$+FM9{hCrI%kPhKnNnFflWZ~ z;cJRd{pUHVotNkbrBZ*Nx*LzK%xN86pUPhGUcIwN0c)o|@RtRRnc-H-##@WKP|pDf z6PPL(0@bt7U)YzIl$x0bR7W~(DK*Drd7 zzD4CidccKi8?LGqF~H`;Kx9Un6UZu1Hf47YHyG#l5{R_c+P)m0hO zzWM&IH1q#;yF0y8_kp4RN}}C&_Wyk7a)Jce=q7^u5OB-29W})U_{lT z?A%4hMI_#Jq~hI?vrDd9bG;Ey4vP>3%J#<9tC+QMQ$@Yen}9sk2p9l)s0MAu<6!O`fh3s$PBJ`2^Tp7HjhYAgBMD;*<6c zba}id13W1eI`TZ=TMw7oacb+{K&@|J%rrRhbDqDBw&1_mv`WJ^u7^0}X%Q^CiMKIR zx;j_%yv71$r$gRlvu^@$=cmv~EmRxn!=mLuRsiy@_v&&GP_j)8Tus)lqCNpP+rn~l zi7y4HuVlNOb;w5z<7w%_Lfy%XtQlq8K&N8ML(ZW z*#lDS&m)aHb$fCkEtu|n7K5^6yL)iX(*N;ns>T0N2&}W0zQw97qaf`Q-L12Ij6qE^ zd|!ONbbxQ8kvUSrTWy~P=jMsZdel|ix24JxQ#JRJN;4smPe8Rl5gK_xcaSD^Q*6Kf zJAp)a^MeK+B_U#1M>7K3fo?|t_6=;|)zY!mf7p~XyOnXmf&*)J{Bs<@Hr{5L>D)x5 zV$Je@vTFFXTl{J8BE$YMBjks&HkQ!fG93;t&`rE&l(XGc+GGxw0q$g^rBax*<19$Q z{8k(>qJC*mzFqPr{Yf)%hXO*DTvMwsdB+>ULN+{q8ygA&Zv*BI(*`5K=)|z$9h{pp zxG<_mp}7@#v$B1p4<0-OYOrDEmC7Z67_?o?(KMa<`zC*@8DzFR*~;D@&mOx0*1c(7 zQ2_+YjDK(OJUroHp4+0vr37(9JnI6x1Ol$rmxNNPP;~`VDlC82-g=GBetIwm_-XCj z$)S)C-(yf+m|ASBHkfNaDkq1@uX4~nWdokbHo<4=)=Bs2tGC5S+!S~^6zHHy3>;XV zcK;L*8NP9p`w;7V?sHYYD zZVz%l`_XZrfT+MnPXSmiKOpL3&5vS96&7VSUmE7k+~Y~2-1bXG98{t&_eip38AM*4lV0 z+h<)hubiU$8FUKEPCM5$<8kIjf!JK+r0@!JCG5*v|Ccz31l~3Fxs@$uUy6l7^j!p{ zp!Pdm?BnpQW7@C+OS1)O5Alq~#)eUFw{0UBcA~-sSflkng_>d?&ET1m^JC}mrH<<( zLr~&^u(86^CROOxN_n{}?PcnIs}4;{=zl}2-+-CXRbxrArk+fR7^pEpG@(~dexG?0 z{R^5bP*SGJ+J-?U4SEWh6lluoU?z&;=g;N2xxWcC;x>_7g2BMfDvRqc7;n)0;G-5( zpa+42l>*ibFrs#@P#qzO3Md?%z%Y@YjXosr^-%#Gszs88q!XxN0nPR`8}3t=wEUodW0oQzw*5AM=Q&7UrS zi|@I2!niHx$C_`}9|)>NPtJI((!2_Py;b)Qg7Mw(3Gue1CGEtAT;FZ4j0 zhr(lUV>=bjd*@%g8_h^i{PrHr;|$Pvz-~4wR^3;-KZM!0yLcYk&uNM(+Ny&V5268( zuEvL_wJKDd`FU z&v6^d#fS_)2OIP2iUHL&b$=i5eR=OsO?gwgF6|q@^^!7P|(c&mr_RF?`uUq-)u7@9()h`sSlVNiP&UVbyNZ`LRW^f(E^@Z9o|!9qxELs4}@ zlU~Uvn z8HGxDaXqIEH|2i{Lk};sFd%e5!U7>drIbMx+&JY@`lzJC-7kNBQUS3IruL8@+|I5L zj7-Fw9p%5W56u12-y(gu|23%9uHK(21|8-69>t7*o5qP?8U)lBMAEdSU^d5Y`{&2V zJ;{;5>kc+Ip;&s)!58`x^vQhh*w%t583f!F$Vf_S8#TqC-?7Wa?(;zi(gDJEtL-=O zwm{1f>O7*LRUh=qS%TeblLnu{i!`rYh|+*0z13wCgdFK^=U+kNt%qW_7CA7^bs!z4 zX9rq)w_KtXHQ2z`V+8-jg6l_0a=a^0CfKML+=uR z3Lw}<18Uf<7=V{SidgBHI0Fs7-gO`Ce2oI6Zy|sFHvF zc!|2a#b2;=e_B>t-?1a{-4ro%&7|E5ix9DI$4?(=Ei0QfSAGF4LEFH|I}n}*MXdav zc*N!7IC^gdDJ^Ro##mYNMumOF*AGozImG!~n0(*XdAb^QVr|q?eZX&2_%c) zdE2{B_gDgbR^T3h7WA9_Kx_^KT~vp6euX@@@-XR@8)4Y$VJNiVGfo)B3Lj{%~q3=PGo?m%50PBd`)}1Zw5@( z3&!H1d0Ma&!^Pd$!X{$3OJEn&lc|%pj2B0>_W>M&0j}2o(2}k=o@dJwh4_TR80CHH z_t~2K$d{0Gk(_N-(mFj&WVrAcKtG^LX^-}%`dH&xo&$|TxUX?G5|ZPB*q$B{cES(k zm;y<`RIBg*G;oTssUygaNY3`Jnp#p+rX4u88SISr$_>=Amo6`qHAo&|LXf`$ubj}s zz$nbW4hqWN&|EiQ`B9Jti)=^W&OXZms$M>(oQx6Lpywc^%=Wsp!34^xm@C@Lp2(K~ zIGl!1mJ@T+aS4C|VhX&4Z-G1Nt2a+a3-r~0JM&SOV&0^uzux|(QJo8+cjucnwt6^# z%~gVP-uY>1#j*)U?knswnE}lS%pN(cy`${DpKMm`eZQ= zpzQgcxW+)$1uVi5Xn;=l9F6YT<}tr<;Y|q=rV8i^oAsHf?Rl})-bn;V+-2*+5PV#m zsf@r#;XQZCbzBHmr{qrJg@@TXtN3HOWVi%0^hgx233lo+kK=v*%YR2zgy8l+AaMzF z(XFu&S%3a)gampdC{2JE|Kj>X>Lwz&%-)T?dtgM()OeTi^pt1-cM4_bFQ|s}%e^?+x2N1(9H0RM zKu?#z7>Q+NtQnz5)$dumuP$Q-0#61!ncM|B=KG*<@{n`5rHG}hR(y0Q3!>f50lxgv zxuW*u>6udeI7nQF!6YKj>cpYx@nSIhi%S_Q3@`?;HMM;VB&7Q8e3fR*VdDkrN3rMc zUU5b&tbxk?g=(y3_hA zSbdWqH2doVs|}0e?hLd2z56s!LvZCI-)^ld{rS-G_GgD1wwG|jpcx;~_PaT^6!G*d z61Abya^b1Q-QHW7Blp}Tx%d7ln82CnNkNYsbjIV>XiB$znHU%sueBa#olih%CO@$T zt;rhT)}hkZRH&RNK?JwTNltk(RaAl`zqIuw@Y#E{NzimQx}c6_4L`UMwn~q_I~!Y+ z0sSGWhe^^U7(C)VW!p@-(&64p=Gp|%_U(rUuUB+`!vR(MNpnC8mXHy=lc#dB_)jH` zca6RyzP>HNGb-uadFXu*=|9Qn=K(zp>$#*uBNNy&s2g8zD+G3W54B){P5|Bi$UUb~ zKIV|D>0iZH9(aHQ3^?3#83q%Mm_e_Xkb(xNaJv#A;aR8=pSrlI1C~B=I_CwATqboB z@=WnNh8X^RCh?z`k2{e9SB_gR0*{J2c2DSI2B)!-1M$2(U9I|$I&-e%9}ZA3d#3GT zKE&cc>32I`HKmHy4$OHpf>{ZzWSny;p!|VaM~{B!k+0%SId@uP;Svtu@(*%gh=IrK zwz|WA2Q+Nd{9o-6j4?QkHX33Zo=*q(!C(OhrEi{05(JI1wg!J251MAraxG6@CT43e zz5s(DGF6r1pSYWNkyf1wXU|fkkcVcqfyY$WXTJ{ytOl}cpVvnPh z1$WVnNhNF^Lf+CKaSUlh;(i0=;;$Y?@Fb!6GoWax_HP0scSPP~2*4n#U7L%b&=!!0 z{Hs)Nw##Rvv@BU5PopLtF>;bD;dZJibKtiqSg+%qF|n;i^c=w7%|?%_Ln9_;T13Eu z*_#mH8B}`zW*tC_Ds;q2e6n3F!NFd9&T?!dQ#SeAE&JomWbOIH1}#LP@NW9tnK#>D zVcj7aw$poICj|~!$Gz}O4_|7@zu1B_)8Kak%_nl~v8P<-8UG}Lh~M8EfB5{-fD zMkjVkE1beHGm8GASbh=}#}h-o@bs@z?|hXTXy^Hhr^604fWyJ$nTf)oNqg{40W_rk zZn$)^ok9Uzs#dn>J8;JeXL{P=hB!}U_Vs?CnLV6HN=}F4QHc3>kr~3vtCs845pm(a z6tU!kf%DwU5u_(HEi4STzw>GF^AI$MX$o{cm`GwrjsrG3Bxgnz=mPylrS}E^@Qfh; zYzd~{J2$n#MFU>H=}N(ECUuh8Z7i>ZaR~i;bAT_nH`s2y+VdVj+!Z?$%>gr*g&n!L$N*@$r_YxLi~z>@ zA#D}-rHT8b9cr5OBk7s%VXH6LHCi&2^yTFpFXvLH>q9AU0h=X`>TKiTQiGvj^#Qwf zh0&RNEr4e-BvZcUcszem^V0F4$j@W|va8TgrP+TwquO6AQusE|kyI7F#_{EIeqiHH%qr;v^ z&YDh9cV%$CfStrN7&>}5KUkg$=Prh`B^Q6p>vBNxA=vj)rFtHAVJyWpvk6al zvrh|u?_SXB={_9v@X4kKSgu}uEd2>gMGlx*kng2fdp|kL6>g7V2aA-xn#qt^B~UXP zBRR*10M7)MbND+VS}N?R;;Zso6==k+XeNL6sgx!3UNvDy%E}@cGR3s>dNm^sc2DVu zmhyD`m)AE2(hsF=s|-A+@hFQQf1PxTV=`hmBS3gq?}v3ObJGg{#o>N2>c-Y-3nZXL z`91Q{>i>;rvGgUjZh+xy-ZT5m7+f0giyZq2@;Leu6M zi_s6`WejT_Z0Xs`%mKB%X|u0$eRswi(nPEuD%7H@bS-WJ_Fn$l%pTAI?3g!9`EL*R z-`bURc*Vju@Oq}zH87N1bAS2x+Ze ztIgAVy@pi(@c2}!_~*3$L15#|RiwHa=;lE+JBZDv4?a6K$zzEuTQj^!0#rxMW#%BL zhmB~kQ3h*g*JgNDMf%O++KpYb(PjS+o=g$kpc6HS%{1R{Yq+)y#ZwAJ%k<=}T;4Cf z7{_eeNZWHHNcC+Z+?SOpRQvOXZ)pc%ZgE8*`gARMu<^0kh;>YuvYH63%&)C(;5VX3 z3fY0-$=4HQ&6D5}Vi0L}nV4aG%90C+q;PW?_*-s|-9HJtYg-AL$<(5!B=|c8l&9q< zhw=p&{hL23$vLAz(F7QXR`L^xK~Ts7 zPVL%A)(wXFY#KwYS^Qq8M&;9XdF~4!?CZ@Brb9J_Pd>f+Brze$Ru(tS^jl(?7BLWZ zzy)Ow?1!TmH0`v(YkNR&a4!aOFX+sH216y!}@M;!$OwWQon3({Dm*YCR ze8rFEidM3<=5NVG$G$|x7?~d1#>9cDm!v!WIjv_73at=9(l)bEfBYmu3sh>LW19|t z7k}l!>X@~}Z=tyL1CQ2=aUNe8N7b18?*`KcNLn@^0B7(}r}uo4CE$4S@oXj5wT#N< zA|S26U}aJMc)vvE4h;d<{1= z7=|3+U3#+<$bH0`(_AU=Nh{5&G5Ru)MalO#Q+H_OI%a?_10w}*9=rZsgSS!6+Fp2~ zmalUe6yn%f0A^Yud=S49V73&zQR@7!M-hcOOCT#nc7_TAO z33h1Pt;YT1Z$Bp>1j1mO4YfV36=~!ZNg%;oL*m|?1%`8+Oa3Z~49lC6{KJy3u^$W+ z8`8Z97Q(skpe#!A|?`KStWK4@Uh+jnulj{saB z=Uq+qt2Y56i6Xn)-?E1InP1U{3KZ-oA_5GtrNVI}O5gV(@-^B;K4U~EP!`DGj>`xPWe{Xl>F1k%;ez!)gEXKTO^75Hmy z?(2@h7vrjo>uiKJGu-pbWCN=(S7IeAyj|;6Stsz$2<4nr;pH8ki>4;3YJl*b2{fP+mRz^ng|%h=cYDo{OTm-smgU zdDo?Of!#j>Yw0yvG)ul)=%+MTtv0Lux`NI(J}T zpA4!nhu^u_7m^_@1@bL0n5@?{+-Zx-^D6bZ`tURU059vna6jwUo;cnGXdCTtuh4_H z@B3~PVQudPy+J9;tpv1(9OK5uT98%26hmT$Q+a$;gr%{CrFA2|Dwy1$OIpDi`MZb8 zg6Y<(&lqqmyew`Hq)UIAED1drMs4&<`(-Q9!mrL zLywlMz|dOK!%(5Lc}r;db_}H60ohpwDAc}~<0)u#Gn!lF7BtQ7(T8*}o6-0xj#M;f z%65AL?!@t)3-RKD0d`)+>H;U_{)OcGaWVvb%%{LHP6EMeuR@=sPA{C&=rOAhqVeyX zW9z&kr!*Vzg5xG~{1b`---Tt_@DYFXL%AZfx!Id6<9lsZJ`UuyyShkv7r{H{_)CUd zCGp4Y6H+cWJk8xy>fmAGweNEe2-FPmnH7I9v)#Ipx;WqX8})8-i9b1Zw^#!G$F4S1a-&V&bzW55RR2>kFlv&5s#>Re{(tw1r9_^|%ypUNirJrSWdd1w$c5lje{3KG{MJX=M8WJ>Zjv8$<5TPZyfmm2!Sk6=dG4*DW* zu1X!Ty|_P?pOS8k@UJpnA^|Iw;haNBU$!fcP}OQuiPH_^r7Y7%#R77O`&RL7n^E)1tr93OV_8^E)u7IAq)%* z?6afVP5E*+q_(#9c>DBrX=ZF%k*p7uT$U8?Bk~YJ%C4pTefR|$MzqTNZ#g5!(?Kse zR6L*1Kr1dvb}uj+=v!$bI<^T2_O}KF9v==FkD8fB`!_|YB&(62vLs1$E}Ov{$;vl3 zl)~?r`Ax=1jSc6oA77Hdtz9%zPJYsLAQifjYc6vm1cx82De%#;o-t@zoaa1wZEe5N zeDI5b0?C`ED=KfxeQdKLpVHDU30`9_e{=B<}x%uH@CE_L-S;Gwq{&A{-6Zf_t7`{Vdr6md2 z54EHOUf;J0k@pvw;%gtP9Nt?BIXxm<$bS5Gq}Kv&&hV7itd3LaV*cSdgE%$OtQxV( zagm+xp%-$(9B@^VIfw~gg3Y0M>|iLQq=-}(hr2+TXi_4_wU7LbWWV&Ab>|=beD+K5 zBye4;aC}SK@VGROTGE_ycHF)7iD~J?L*QT3dc6crwGpwA3Z9x|SF%#^(!YcOLr*j* z`&-*QD_85yc$!LTIUDoryfoiDy>jJvM^&ko49#pobdlOu0VBBj@9K#){UJrLj~``^ zV84(u`>mYc&%c;Ub3@Wv$hqlyGX3B+6{a6dDUm4d3HKz^lJ-jXVjb#fPXr;R7C@#l z^yT^U2!eYfociSq$Y;jILSmfu`i(;OE?>CHetdNBq3YW13QU3ZJ_2QZ4(p;{2I>rbb_zG>f`3 za_qk!>^A!-RsoP!a;UeBvk7|zOr+fJSbO_h|77$9ZSi*Cvp>5ow!Yb`0FkSf`G;pF z%I`xpWx=N^-X4Hb^IuuQy-wv(Q-@ktM`z40*ARMP0ijOv9LM`4yoY_&7Y16vCy*=h-M~JADEIX`w6T0Sy7j;WiUK&NIwXg$sgtfno zVd=YDJD1TJe2})$=T<{sJ7e@9u`HB~I5OpA@&P#eubZ-zRb%(7K*vtxbzp_i3d+^y zEEpaT$yW)Ui;ay_wM#|JdHA}zy1V;%)9I;}(v!*tp8Egj?WISO=q39$F23gEq^^74 ziBmsm=sWOS4L>{#*Gd<5!Fz+QFxb_fmta9fXngrz%kV$N z!0+UbT-SpGb{xX8f@?Blg(t?9#Ge5FYYpdigJ9dy0&f#7crH!w)7I*{qVvh~Y5idd zldhrTR|ki=T-RC0)=|9rjVlDNzQ*53Xh(^RZ$tB}%khNP^!d$1bGSAX^0y4agP++<)J<5YM- zeu>a^AmhaN3Eb;z(!i!j?mro$hT-Zm?T#B6#w;4jO+`gq2A#^^4i#$opM) zw0cfYtrU(sOK=?uuE4<4mDYz<{TUao?gJIc@z+LPTz?_}wnniBM*BTJFQBH=)r^$!Nahp(wxiZtq&V3Y z8yk)#B1&|2UT#s>NLV^Q+4XnZg1|oXLKXm}%Zd(9{FJ-jN5Dhw+^L(W+)mmZt#W#` zq#yBtK!&NI=Q-9HK5U&Bf8A@nPPmio?EFjX@0NbUeEO5{-aXVK`sVQMZmqhx{svu{ zW!zJi>%iD|^bHK3+KJ^qm=umrUeQuxPQUZM{khdT0`hESi^cbqwz(P{lnr<4AgUoE zB|Dw@F7VD-@Z2Jr6@~?rv9ezE;BhK0Mwfmd0C+PW$8xQGX2#M}fQJ)!P>crsoOX|`YU2qqh!_C@7!0|iHuARprFLkDHDpC4Ej>+SRS8AF%Xy zM)M8pO9CBLsRJs>3d$Cr%=IW9s60xJ!`vT)H>m+fj8M~FxDY6GD0#4p;XafswAZf7 zWI*Bu^%!`XFW4J@5V8ad=aziNd%m&UK}migKf+%gXZGCzp+$<3XlyetS1r+qN=FLn zEnFXRFI4oDOLtA^gk5%M&5swA-*&8fv(7gU>Bd0LaAgxPE^msx`D*CFiB9B`MDj^h zl>D;LBq0z;4*lk*6ZI$ccx{4GBKT$vc+_^;yvV+Ac zxXww!u5NzLBeHJ}lNNvJ$rw|8J>^oUsu(Rzkc%INxA|z@rO!_HvU{5j*RPv0Q&G19 zR~(cnM(X0m))=TbKB^Ew`_@lxZg`-aEYI%w+Xj1S$GEtYl1De%sq6nAS>GLxW&ehK z+mR6|BO{|ow#r_ak*%zZNDA3|kL(qZO7^BmviBxs%id(~Z1SF$p67Xg@8|um&!_vo zukSd=aUSP!Uf@}2=B`uIB1tGtE^c%==TF-|lPxb$UcO$Azm)7hto>b9j#L+$ICA=t z@Kat+*^t0w2{fzk?BEfRC7ZLZM&Y#&u(!)-H=b5$0i&b9P4B1?nH!LZ#u?ao9eMN@ zjVWoAz_Sg5Tf9NEBWNgByb0-Mo=P)S0&))$V*5N^BL~FL*w-b#ah;jKQvI6J#CFr7 zOE=Ub^a0Cb{Kpq+3D4qRwLI!kx%PWe(JSeK4ziEJmqk8Wdezj_Am}qAii9kkf4X?JM*9N_h4~A0OuhMThYv81HxXa%5_O6VCTMlf+SaA@{ zxHcLN=qn=LcB+*TbI0CnLdHg|!L#G>Nq4>^s37(iBvSVj&PFWjqtqNF5?VIFfpSxRN^I?q+a#cbk#Jl%HMzc;P>W#kgE2f7$Q z?lme8;#w}0!QVjQh+pfBI??6lGdsX*g7WkguLVuf~p%$ zgAzUcM57mSeG7*Wu#~?#gM-O>?OidGjO|+UW=gP{-)QI{`dh6~e)JCkwvKVsmPpQV z<<)1ZJ)yk{nNqSWgBK5PCLE11AXi6^Wq7u*>ei=)kB34R>pLt(_AvHPmuBrooIjKq zTmk|7Re%Sj?BC+A1;^9=4LY~?(Jeyj;@_WqOhH#(!(w^!<7tE1OiOY>wW8e(IOn_g zNSJB8@*3WqDnI4PRGzCe7pr)U9C;?J?*}E0SvclX zA`ab2jx%YiK{Wow^cvnk{%hDV+t3#RsLZ;J_oM4~mOmT!%RylU2yAJ|OPx%Zg!kDk z@(ZMmS2E#5Wo-a5T@NbhbY+Zar<$E5rw@m-IaEOt$#eJPx*dG-pZAT$e2bj{BCo_cI7fyl=DBW`eL=xsv z#26Aj6t4hxM79GqemWYN_4dX+d@7a}oP}XVRcpVs6SzkTYgz8#+D{yvZeHZ0M z>3>Cyg^RdVO>-q4Tiv(A)d;*zIi@>(5T5uM12Lq&#!ps@^jTRs)GB8LCMIb z;lqcnCHN3M?ZeOjS48gJ94o*$*e;ZcwGP8#Ah9y?Y8Rwc;b4Bgt|S(?spbon&Lhh2Oks3RP;Izi?uC zVdL!?OEm1HS)JddR1V_@+ckCQ%hpXId_G-OrK4POlCvSrX#1n_SZf1P^-l}$sfXKv z#A(A#9BmK7-uEwV=+@L{Dknu2gY`GcpT$Z>uG=pY6*PD?t*aRQ_vN7%KEzY@*X!np=8mrew^h zC;1&MjoE5avB}z4A!PRDmK2ETXqcca0&y}fBrPe~hx<>7Q65U8!@o4J&``7dWzhAI zPei~TZbIW6!TAO#Ev!v~mcwhX$i1Q*H~nf01&=15?Xfq{RY?T}R`#2nSt8`T`|_pK za_(%6Ct|v(#k>x1l8o_C)4cQ#pu5u+=uKDGB^A*~KuLfB_-RWEvi4O^kKzSx7vnZI zqvXY2$i;f|D@}Jk9#-GCqZnw_u0~&cS2(IQU4C}fX%=)=nDNg=Pj9+RD5mpc8eAdw zCy8{dK-JD^H1WUgQPFxD^_l8=L*8ZuJBp-Wq{8*8v_x*ZBO8EpHy& zl#1;!`n4qQo$y-|S-M(c49bfF8JKV2}2v#F!}I!(D8S>Ui?lvDnI7}49o>VCvoEt=CFnDiibc-Y z=uRAg=R4smXH|XEwVE(;zx`L|x{aMK{fCtX5P}a#xr)NZdngHkqw5&Tsu;pl-t%@S z>$^8M3D>7>OqhuyhY2vONWI!iRygALsR86HvP6+fbgd1oI#j-mNgw8Qgh!_lc5IqM z^hb$(tt-$)Lk?Rt7>vKB=7{~l1Qk^-K|^$FcS`#PCFGQXm5C#WHUvTCqWb6?ZYC2@ zBgkDsmsx02R63w)n9J+Zj`X`gT(+ohwSuww<;Xtr^N=}73_}&%tB}5OZMS(mvu5Qv zsW{#sB;p6UQ6bMG$6F{vP`$t+N9BHAw&%o|SgaVlo5kRnv7s0nWrF9SewSVhr-la+`6mr%lpEn}&6}L%l)x+F_m*rVA=0imkXC1ewGY z)xE~L(Y5jTS)ukd78x4IPWvN_&>icOZt(vmH4ciE0tV8gIOZye+alKBS z22a1%E7-y868#-|<_$UP?UJ$9x+E|S5dP_6g#8W*JBhI;-U>6ZxABemfHi)tD(AUt z^gC7+=f479N(z;r3jfwrLJ}dqD1r|L&O#p(nB$io)iaoSxohrO%t`}KBw^|u{dS!jKg=~XY5RET9Zj`{z}xE}N^QVF0$zQ^Ia zPQ4Y=Lha2itLjidIP^;k|4+7mK?;ddHn+(~Dnb<7R;p}4^-1bFZD?jd7_qLuY1M?f z@5zsyl=f0~FohP3{getEF4Q$8TyLYsRUs=&rQ3!VmV~)c_-Ge=+a22$P@{wG_KFx* ze=eO6hi|r_KEn&=f<`AD$g;{(sJ44){I&IHU$EVjNz>yf$B#gk)wgf0$2*SrqpZdM z2Me-8OB~(xR-1HQOT{M1$#|O9Wz6V@u+Nk;F8vY0Fi=*aX+=fPW&|+s&AioFx55v< zP@!o@*O&en)q|EcWdkp)sa0Elwu;iW;%8*Twz?o(dNV*4-NMoP*L^vJB1*BDjja-( z-BWlseUa=FQfm+s-{Ww#7rCw&&w^xxrMtFj7k>#`)hJcb>hW>oio0V-#wN4;=ONwM zKn@a3D)3=QNmNP1YH=3CORIZ^S@oI->NqxK2Pv7Sp$`$tDDaaumMpd`FR z9If;=lkvow;YWBW$@84eSFoZt=41W~Y{S;hT`3Q@?hjMT#FByK<7<|;qu zPKLMw6de^;oK$SzC(d|6P{;tXBQ~K5*~ntwPP>G({<^P=AMzVd*J{=!)EPC1a6k!`4q7DyRkZwIbMZ9|p7gfdjSPT!mf^dXe@S zswa1I@IUf$Scl4GmOA@_u5{+{@i782>+LOg?RRpHE<(AMcix*}|HIAFa5FwZAtG4a z$FJ&k{9mA0BsRNM&&P!Y3FBI5yE|F<)!(6$0(~qPwYI3~N!Mf(6H`5ciJOoAN+_rsNiKqTnXb>yn(^hGngm5T{|}3RBll7 zGB$!rc?B*QY0*Tnjes!@oEf9d4^_VMFE5#1C}#AKsK4Je^1K)Hxk@m`us)RnfMVbp z{tNFJ=bl_aZnr^BR0!(6U)=RPA=k`Ax2E0XH{$nK@?Edzz1rv8?uU@?+4R z9UsilxAray5{Xq;gk;&1TF98<-Ysjv1;>vxanqcbysdNn0&zC6Z)ZaH9#L~|Lo>wQ z0*JkN3mZIQsdcV0*UDxGmd~zrN0E|B>C5Ak^JrAbnLrN1GQVoGrnIYkfDYK+(Uitb^9Sx6{ur?9xm~v%zB`gG0jNv3_N2f=RQA>`xXEi#Qf@6iz_!67O zEc#z9z$O{m2MjApF=6(Aw6)Ru*63a%C3uRI_Jtb9pSb?(03>B~m%7a$jz7yUUu^En zCB-$ec!BtU6xt=ietv_Ir`!{#yIorW5#HdAx0^`_Q-cmsG7>5JjUuEJR8ZM=OI>FvsMJ%mrVLOlOAVeK7Mx6iV2#Ww1) zPv@ruN}Q`S;c;5hb;J%i*)xL_;WX$Q!d}7)Lz=T^ZWV`5xxCEDHs#>dzMKJ(AmgcM z$R?2pv)u#}_L+esT*?cn-wnnHE~f|&Y$by;bn-m9W3E-CYIh@N`}>AI-Il*M4AyQguEEdqYB%0c5>1}hAOv`#tI{Zv7-nz3U%K(a?a_X5XGDp;##_Yz zSU7|GPgY`AzcpV5r_H%4aSFF@Gx%PtRuNsfKN{vfZ(m2bH@#tqzFYq;`lOOJR(X9jD#=y-{HNTJpEdUZ|M!few-T7A?z|mhDDFxlJINWQ3O8*E@Av5ZwPy;{GUI61`mz-!4;Ja8zYp_oLL=9YCyT+VJ9X$!Py`~b%wAv zVl<<@C7L8eO-+h?iRUMpTY*C#ltve$JR^&J9Lbu&S4CK+VPS6SSTCas9!Tr#m(0@O$VD7nD(xp1kyz z0F1)v8;!_w6pW~0Q~Bm5Z1CJ-#fSd8bZ~kwT3SURBs&{CZ-!Ue^wYB)Rl<8KvzYQK zn3%}{lMC1wKUbkV2>ex3KD=Zg|6Od+g^%j2;zgL}67^8UsXv}D(7sEF{GzhsH zsy)XIzVl*h$=b(sX6ou!tnf+qI3~qkbla%ede=Q2^^2B^*!KPjT$9^!T>tpphCL~S zH{xsvUJ+bZ)x6Jm!m|FA9-1gU;X0{{_HZUJgCZEiI`o+HWmIt zu;Rd1F5YiW?!|!y=rv6cYMj)v0$gCNJ_z=rqq6c3XigWUcy+$gWt;9cNDE!TCab?>gSH-~6x;fo{GiflJjK}OSk*(vQw7K7$@zLzqn?5;9=(0PPU zDMX$+GsABhn}q{6(&v6EETL5AW{scszP@weExGyNb~dSeKZ^WD(_MTq_ks8eT{~@) zYPygo;l_}-<~nT`FzKY1W3Assk0O$IJY%s3k3L-+3Gh!X%Hq<19xPtmkL%0)C&$N} zz`#Q`e6`bd=`DA&Sv#8$#jU!j>>Cqa=(lu52}l| za&pQ>dG$&roh&&wk&Y&?je(~>bXCK|t^BBKFQb%#SduPoqmGHZo^|`ZGJ`rQ)vkr` z3^_m(kik)d*zolePYxvu(;`jI=Y2oGF1bv|vi~y9mh1F$dTD7-JJ>sg1<#*hOPj2V z?J{99qBSBZ?XTkB*DIkkORX7z-AL2SA@_1EoD?)bE$KAZZeng}4lwyDtw$xi3u?JM z>tx8fhR^J&-!*{x{<1bt3A$_|>~2?({voYWysG--P&~*G5ZV^<#?7X9@v5EMUivPF zX&TlJu8J5kqb^wfh}J+d?G}(a=J|XmQQA=sg%=6#eEqos-y!s^j?D3$+FPVIa8;y{ zcGPZ9ii$Si8?onVT>M<)rtyHvZ@Zl}}UiQ!WZi>uXclQo&X++P&`)ELC|5Q?kV|PXU z3oQD6>)VyZ4rP@iAA!ByP~Up#>#vWGRpklB4|n4_#H0)Yeq8%m9UQH73iND zK2!SgtVk2C?=G}p@r53+rX^x~hANWk`kVJ%_P{nR8noXR%zkqPGGF#(405kSbYUvp zMC$}Tt@O5+yjwQFcx*K;$|emmGF<+s8?eEwRk2sxKAN<@6<5Iq6$4R3&UKsD*uQS=c2w4iWg z*Ur-Cy__X25h%Re6V^o(9F>WbW5)itKnZ+Bu zpkB%ined|%j}mjCrP7(X7mSwW7)NM8fEt>QxdWvWM3t2xE#|#!S#fYwq3WxdCwao} zsC;H(6BjOgJ?F*f``r~@lO=_3Ta~|OXdH1qCX4&9v$^x0@RjPmG&ps%12fn>r+MAr2nemG&ps1v1T6I zwD${+z){?|TuzChyiSTPv%&x4B9kiv2N@6@&h{ph_H00CT^lKGE8|_D<3m5mpA!%# z6B5}f29KF{FdnlnVywA>4#U~};i}Rq&u_hzm;Q=4LIh3cX zS6Gs%4wZ$~2=zUih~^j;%m>CBWtNUy@2|ueu-p(|);h09BT8sycGLV$T#G4?DgoCy zKMS)`Q`1H)kYhxt2@gB5Wkc(1f8kS!TaIrW1pB~7qrFT4;NV%lUz!6(>bZtvB)ZsF zI&2hq6rLdv%xs0k27hE`cgAI2Az`ep$&}) ze+ARo_4Npur>EjM-FO2tisuA~#Msp@xUG%ohE_E-foV(2+ZM72gkjvG#P2gr4xzy{ zlEVwVaU)@v>E{ciYu@yc^>0|3B?iSGm-Ui5bYOWwiq-lq8l+}R-phVz9g3>qBlcn2 z0+L~6c&On_MQb~@oj1c3VoD|PGUJhW-NwIz|A7Ka<)aiR-!#oE9(=JX-xld{n^Cr% z#&f=CP;Hsjm?;`U3Q!oC)jT%IM{`nb-a)hK`;-fg^U-NWp{zMW50jHbc;cdZx2qfU z(58F`^c_dJp66}l%xil|`^M4JUog8hJs#Q?Op{sxSGV=6!`P7*ar?G4=yQV@2{^WL zgTf)snQ+NlKL>)?{TN|F%D-#l^G%-~Fi?8X>!Ffx@qZsCDu7}SF59bp>S;r_cAtk% z7)Xg6tfOZ8MzMxB5^Xg-f8T_S4TH=l{r%6UI2&dtec1v{6LPPdL>e)!Cp6AF5ctMN z`S}d*v^!I7(R*vNfefd%isJR0KofrJMV~+C!&Q2|a6!cFe5aILmVK|I*Xg#XsOTuK zF&=<~_JU()RBE^VIRV02S7n0@t*_1(?_ayNzpH<`tmYjDIki4Dh4W9cy3j`B#o*-& zFFxistglzM=iYsW@DZw)9XxX+Ab+0;GC($CqLKaDHbNEk7hrbXq~S26vINk1{>6*c zL_wJz^x85Wwk%d2G?cFbiDH;;5>DrbMo~qY%evC}3LgX>d^0QX#Mp>d53%=0%PJqk zmD#^B(mG=M=5pn`*pQSTadQG#&Skg}PYvpZ?QPgN?9UwvqlPT5n10Mhq&lk{ByH{> z^wiw03$S^q8}D}pt0@whs`@qc1E>!;QlSvpn8{Y7Dmp9#3}$WAG9CGG;%1KEEfZA% zPaFiEM$SSO9MrS+a=py*2(s6`REx{H*v>&#x_y_XvTwrStVmyfpr%8%@Q+mrT6+#>)t__IIj-?M%h8x1V!Y7~HV7$mc9={(_<|id z)lu-bpFcF<$nS9Id~+8uo&-Mj*;*cb*)Enn6DBow5~Y}D zTh3wJO7;XceL~~O>&a8GMT*1HsPmv%q_MQ5r=(m z4H46b!l@ALN2sR16gF^If0@#@^3-=aAR1{gvyTDH>^|TH-Km0|cAFcS>k8P)%>9OA zaD418t(6ld;OX+2N=C4>NbFe|t&5DysrkY@A`*>KIn!^5dMN$gv;D>Mt!wLda}yS0$>HFJF1RXdi}V;_GM&oru$n_9~F=IUS6RA+Y^_H4>)(fkhPR(U%bwr)eE^?LNUKwwZVQ zZ_75YvfpeERwq((p&kS*qKdy2IYGJlrsJ(Y!&Ce5AhD%eQ{DKiniY!S>7KYP9tpaU zY{#SUaA(_jn~&)RXp{tcN5|)HwD34dLJdDyHD|f9OUeky&9^s0{$M&!<9f@uKdn;s zo*x$y<($WVezB2U0-8IC{VTLoq z8Y0F}p3}iI2znIa`Dp(R-er!vxlQx~Hv$E2*Bgj|$>I0O4G)f@Pb%8c_FDvvHk#f@+@2S9(L*dwl?e$( zb<Z2S_Ln)ptV2d1YAb>a~C*03EGyJUF2DXzqHH{1;=jq^4Sj+_Ah1 z52O*KMNTH0tU@#5=dg3gCFR9v?QeL(gMPi2!^i+?(20YG#HyLsh6YtbU z=``Q%&CFk!2MOFvgPP3enlimz+cv0>5Na=8_2@R$NIz}^O5OPhfhhl0@q7va7j$>U zVeN)t(6l3vdaKB^$PaTODxkEJ$T)%e@W76lGOHiaH1*K|>=O|8rIXl4AdueoZ-#;C z=&())>OYFl2tZf>$KiXiC47@VRTc0Yt7eJfE@xHqBfp`&QMP4%#G_@$;KPKVf<^>O zP%qEkmm@>8RZ4h^nRSH>7D$QpC4Oc>iEYrXvQT%hPs$%ViU);#ujp+qxfe@2IbXff zJri$qD(8`S?_q;R%CNxO;Zk;?Xa)D;9c{mT`uLSJ;QrBL7PjZ>lslKW{gT$vWihWV zZtVRVfS+Us;xkaWG!AWQY5l6;<#W2Iv`VD?d0z>PQ0u4dSw;~^H|ef7J8J1| zAZH+(3R$}fG2KBH*L{2NB-_nzdEl?c8hT!+zfIk>7WVPWZs_4xPD-Gvp4(wVP&jFK zPzy?(A_GGcgQC7u?lrEw$^98Db-r>TP@DlZFaEJRtDN^dEQ$2RI3{$(JG8VVV;OFO z3qcwPd!NJ7ZT#vA=+hZo_r^hFsPa(z+hlM1!NI{)_o=gV@%-!Tub2=+V~t<@>(MOw z|0Q9NcyQiP4{)`h{JHEd-U(gADcc|WEo}|l53Ywpyied@7DJ{#+MhN=+TTFOP~ObI z7-VUwhk7F!^d0@$eXMJB8fKO7_P$RkUcbx3{G-XQYtfU0%c=f?73I_62tABtwM&{3)irBh1i? zY6z(hN<_(6`dpx$U~N+AZGwB3)m6`Z+vu&36&~?G+C)Fv9+y-REnm6db-KOhXdo*a zZ8ruzwBtVs75k&evC%^u6jOL`>8VmFRG`p1m!LbjMLw(_f`}?rg`_h!3bAWtZ;(a9 z5m~=M*QDP`nP+7KtiAU7$lYH?Y6`$@DX$TCy`4b!oKrL}PO{uNaS9I?rW@tEz>;l% zba-mpcbMQ$6NlBr?2SO4aH)^o%x&*KEQlf80%~Kt+X(cwV1x0W|0Y|?A9i$~7HvRLNo#2HZ@1n!MajzeaDI$*)MjDeBGZjH{X8*t9sfF?68`?rPyZ@-fM9InMg23Nw6ycwQFItuOx13l2?lHXL znj@pahd~N&nDQ3E6OL!I9bll13q8pK)j9{(#m0ls@+Do64lFwPNNw z#w7Q0>ABN-!Q&T1Q6Qeu^&VQ2yOrnl8;Zl1oESJX>~2K8-FSu#v5rfUj^5yxVCkPM8l50aLAepO4grU?k)|G+F*`oH(g_ zvZ%9Wq5pRU&1a0a=wh37q!`MbgV;e0oe*@8Xb#Q#8?g_luoHXu4e!kKH+gOa7nCl( zd)KP{$3*LAg7B1u!~VflU|6BMWc6bJ1kUtfoaDeC*);u0rt*`Atk}gT2$Y~VsZZs9zgQpZ-1z==Q87q%&C2K<{AIq-o!5-vY%KF z=l1qr2Jha0`gxPRnZGj@f661C^J#|+TjrvnKo)ZAr)$eI<4oX%09_Y(cH}9kMupGV zpjnZgNp&L(_Ai*5*XIdUj^=(JugnK!pA%K(8uTO zs-gAYsi_PLEVe|UAMGx03Vb5tsI@6r4bG<)oba(NhF5cK57Aov@_Fz+B`EL-Y4(7T z**n&CUp@4VN~-hi;CpVx;?o<2kiI|MThtTW?SM8-jsB*|%SF2s`ArH4(%_SclCe`? zpfbz%*rKRU9j$ncG{!-egJpT*&SyJy9${s>B52r-;v4?!3+Tl>ewOs`Jk-$@B0;iu z5JAfN>=Q(?jT4t*aR|Bz>*P-a$b1*jD=-_>7?G)%3>*{P&Z~8p>uK=q&Aex0`*R1l zJwE4SK4d7tZR2pcQdvPVr*2Kjm?tLMVCefIP%F4Ff=7OvleFOw2Y&m#h`V2(4XDfF z#(vVgO|TG*c>UrNGUAg)L!T0!mte(|T!ZE$7w$LLh*GB+sof}S0esY|U4RZ?raOH< zjo~Q|kO$9ebo5?8sRA=mFTIB+*J$0S0s4GbavfZ*M_zq--}8?%KE<8;TOonMh_13w zx8jwks18yxNZR_x7f3T$1P6+V5moTT)DAt+_)CN`pUz%%=!r~&p$M1&Y!zrfIj9!)1!ILEpq_-#KvY?^5W-{p9UaS0axA)w60bT1l58Q_%QdA|{R=+bZ z`=?0Gsz7X&+Ff#k6l%PW%ZZ9SD~k;wM96Ex0M&j=#z?4eG>yVCwCYb3=(=7W{yHw1$NTbJSW z3R4|lZ;c-2>Obz9tgE*Wiwpr^3gmoB^CJ*n<9g<+<@lWUyW*YaFY^f`^&U(7_VZtoGT9X59izk~xVooTe{U}{ zlv70@i2RAp&E5DsBXT*(C`h*(s#A(^9&?&{{bw7H>#f@;)R|II(fsT{-7>~9NL)w( z3P049?%t>` zM7HiZ=3uynAfpzLkUW<8(_T5{r%vpq7xwyQpe!zgWb^-Ukc2(7ab>Hy@QBg8^#oQ6&d;p(e6&>I`v z$wmiJHAwVxE%dt*)^N%}vP_z{e$x8rTAN;c#8Jk56@?AwqQ5f!5cW5-%L@N$0enZH z=eqnppF&M_STyhN=&OCx9z-~NN!g;O>~3fmxv%s5Uh#wLQFq(IZBVA2t-L4ygICzb z28%cdrX`HcvOYD1GHR8ZS4LHPayE};H7wL#p>T1S0%_%nS>8-SGAI&}x!s{J3gPLi zs06If(&0_QVQ5)LJt)$9Wl1G|iQWF>P<0c6Ca61Ind4IeuZI1RPlm};db5K>!ISEL zHC~>3VDXPFEb=r)WovoEzr6vL`cOxP!KRd^d?B(_JhcvVruXhT&Wre*7HF%$#42U@ z&y?K}Q-_gP({SON0WrCp^#;nT1Wa;(U_-`q{xm1r(sF{iIX&5>txT~ELaqoCoFewG zXx7>GKG7(z6Q9Q=D(|Ffv=n)jOUvmY`?CQ?_;-V{)VSC8oN$Gw%orL@Tl`Jn!1*d^^$@a&S z4m4a)rvSgQGSQ~|ohmbv5qJ*s*{yohf+6!~{O*(!9y^!7^zh|n17V1l{B+V+&$=+E z2dboK&tonCIMYoM92Y_H@SaHI_c!+Eig{P*l^#Rp-CP$RxO@0~!@%iQh43}2UBfF5 z$DXGVsn$UYc>gkhVU;`O8z9_SBJ@wxJr@yN-vc7)IEL;sX}{`8ASI5WwCk%5(ulYpM; z=uXuBytqGrU^lE+*0XG=N0=Mrm8=MAD`&wJV z^;z#}K?$Q-Ztr?DZ#uST+xF&#?0dfMWsc_Ul$2w{x@TzRnqEC_P(N{l!Ov$Oa~psb z`z3of>gE3kjz|;xNBdral-H!CM9z8UWpKyF4jyq^5QLqF|0+c#RDPfMZnnNsIqOBK zRt{qwU8}U6=44@0NRg+b7l%5|7p6O3nH_ASq%rue58z4~qJ~}Rtn56R?-eDLyhQQ> z5`ChsLq86$Z8mTy6mowKu6V5Z81_sf>4UW+3b3=8SUxN$+I^ zLHSx)YAb)#^Wfh8O4geUHf}W6@i1*hybeI<8FV1A2DPuZ&obtpA-251^H;TUnkugy zhu)?vuv>O;a6d%d_Dgy!K&^N3n+K_xVFlfDzuzL=y-RDqha*dLwRV2@Rx;0K{GI(x zTM`s@&Fwd=n_sW#ybxdBG^uvLN2zfDl(N(6Hiz3*5ngRvv6~8TE`J31DIjZS3wdWGk(bDpVTuIH-57qH^mI;Z2hUjIS?$x0pPt$df7%jIYRr3 z@fE~7Jc#2)2f!hniAFoGP+}VHJAe8&v-vV6%?>;SXaIAUp*~F^`+{BaAYF{7PjsCf zshQt7@#WefJM}vYS z;f@jMxj+3KKqf856*dEEyrlR$^|8ZPb{S3}6RgdaQt)x}U{mb3>Pa|U# zFGKOeTWrJ;O#nYX{!e#FLcwXWbYO5Ua4dJzA30cob$cxaHTseXRaeg|gGuD}c!>Lm(-n__Si@Nvj6W)7jK zt>dM~FYB(t&aw~%eLR>e|O4#|n&xpI2un(>BIgIsCG$4PKPCy+6-UfC#tH(8K< zbXk6ZIcnv*z`Y>w=g)wK+b{RKvZ4fk__&E9dGR4xmeHy5T*w^mNZl0u6qP3kwC6jO zlBpW(@t0_SP8fAZ9=M+zP8KKC?dK=OZLI&KeOzUyVt23aJTe#4;_f@1lW1YTtz z0RDt^-g@upeQBVrsoL%eKm6=O+`?tg@SocM!A4P)SiexX!Y3Fe<#erwp4C+{Ya(Ox zeOuN55OE;MWE&cA~UV=ezWcnFq`|yB-?lNmh9XXq}X$p6W0W@cG(Bs;5giX{E z&r1jt-o05MP4z@<#G=-27llexKPgGwXe?R5e&9VJ9a`^+6VP5>k{JzA%FtCe#!!Y` zrlrvUu6j(1{z^Fh_vM05v)a9MD*L!7tdPsucaGhFcKMaB)qZ}n?YjI_^!W#LQRkuU{a+!M#U$6=R5pB(S12fqX3H`t?5jk9e5r z&wb3%A2D;?A%20y(DvZvmZ?-{Od7FyWGqc!J=RO@>0)jAebop7hG+*%BbzSWx@R`% zlF8hR++pwsY;XwB(cir*`e`fLsXfuDfAnEYYq_GG@txM5?ZjSd+=4eTube)NM=z0G zlHyTq?iy>5ww3X2Zw*w+NS~mVF}fIVLk<4h=J&D^QD~{k@ZQ5^Zwwa>6$44aOGzk~ zgM-ZDC`KR1=nr+J~#mCPnf)k0E^*(zE`ef%g z+)@4!a{4?-S3O7WoD;y+|bRjwp1D2UZ@|Ukz-$-@|NhD{&ktUzpoD3Ng<=3@1Ja~Vb*+z<4Kwwyv z$mRq4UYT`?!-A|bZZOjp3oBdd{O$?G7)ck?F_jEEZ;jXWj<<vVY^XcJc2RrluUwUET;33e{KJG{@LWhOp4d2Jidc+IpRCixF1oImfMvV)ly=0 zMfrs+l(~C0OArM$2~hq1ebaL`9Bm#kulI%1)>QP~uaeJEie$DcT*`eoHGe-KD~q{Z z@sqM2+#FbD_^J$FivZPEJ2h%Y{8D@>k~gn2o~sKFl#eA|O67iTKFzsCe)ScTg>0f< ztrk1c4_OMM?7I(YvY5wQmFc*%F-TZN^<3doa@1ElzcyIZ=&xUEz7i?n!0eTvyD!ig zc%J%&S?aeNe$tf(FTq*V`a7`Y9Hj0r&`iqx`sg(J^GJ;MVAoJ79?gQBH(bz%y=v^7 z6j5hWgE1E4HKyL{iS|mKxhambY|Ykp8I>^!I(roC_bUy(*2fLv9tiX-vUyCqyFJu) z5g<&FJ2#fNwRQVWPxtPfHFAIXQ0uOq_3*;`j;a}i0lwcII^o_qCfXCg+!fq}T6!aAA#6j$3}!rT~a>ITl! zFUulbTko~q>TU;qt4VdK5Lq1$OL`yOr_Pd}Js191r|bZW-1&StauX}dmR2pV^xEpB z_?{VDSYiI7k8`m;5;QvZw39QNQ|g{FJ!I1PSy8ZW3HW5 z^{Wltf8t`-xA$E6R5L@}@IE#BK$hywxVf!C)5>D_RJQcgor6jjJ&TauI0tsG>per$ znRE}{0g1P$EFVG~OY-z?x1IL(@Um0sxr{4wn_VA+<<`jWK8|>^sXl@coTs!i_v1tM z=urpWH#5`cQ|0#iWoQ!kp%F4NdzbihE#B`I29z10B=MU%+e7U6tlrAgE5=^A8Nj$x z|9-+shVzlM8~2G2Tl?iPO)KMhGW3>fWzVF)k?T})V7Cy8k!7L4eAq_rxm}x(x3ZDp z%iO6&6XqyctDN4}Jmnpq6IQ@}P{ymF5t(1m9Vm~>ll)BJho{RWNBohc-J|E7;V#-0 zPq-e8>&mvT-^>seTq-kqm-G2+G`EdSsnI!aVeMc0dUtipbgnE1qi^I_GyrnQ-(BPpqSy;v^O z`_{0Tra+X=F|9LhljNBcUOX_|6~J!Khv&fO5a&wM<#pkrru%VY0e?V?TZ4zG!AiMPv%Rz)SjjrsRgWK@aTEe zt%%X1HlcS~U$;|hlJ}pBKVMu_*VJyNZunxeeSYJ@m6uKi2Y7aNrx&Fse3tGU>`t#s z2X4u)&4(MfF8qmBlQGLpC&WR5XK*5FJ}78OCWveQqX8qSXbo4E=f_*?`ET6IA6Db@ zvIKlw*|oTBzHSnGiph2};e}+3E;*^Fvw3#*TH%IswLigfc45`nthyWIsn*5fUkx%! zPcPG4f6sjV&KtpcV{kml+Z=zG99VQTVdsFdZ@J=?@e4CI+@`$|@A_4N4q8K6Vw2B_q4}Anop^TBQUV+wU&w5}8}7 z#`A~v*BPT4YWJO7teQB=jNVRf{?OUr?3Vm;Sr$PN-3_*z@V2r7LY$a^KAV=$(GYpG zOD(899K^D`H#gOb3cUrfbL)G!L#wfj;Uk8_(wJ-IdwOfdO~3w69+H!$)NfZ5&x` z^S7r}?oUfEvOv&pR`AWn5c7pEi?S zvX=QA3ak7U$NyRodr)24fpG2@nN^FylwDc2-6HoIIf}UL2S*a%8$cC3Y@*-55OnsB znQ0Yzvt06Smy4<#W0roQE5ImaVCLXz_TC(hC8o8#gdFP9ep-y6Pv43vN+G>4)zW_Z z&RU8;!IjX`=nKpq!TqayYI9?L#P;sPVKENgF5HF8f*cWo{%oz>)i>d$?^KnLNm<4P zg@;R%Wug6Cai(rkGSDdamXH{6^I^U;6A5!0xKQW7$GSeRa|9Ir=(sp`p5GRI_uTBh zR)q=rTg*f~V)j7)O_!0VD@XflVe9}m_GzSbE&CekX<=iH>ba#>uc|~Go%b=r#9q^x znhGKuV@D?ieX|cT1>dSkJA@lMcpLuy#K-^oy0r`+Z}CMUo)z58{hguN5&7qfZ}hp) z@BmUhTWl79AnnXu$XSGn-a(?42?wWq;*D_V{$BRIpn{Tj0Dt+g8&Qp)d!nxjLcN&=hrLXrRrGURt}S4`8o! zNYtM6Xv)W=W)N!O$7FT?T=aj$!LAohG5z!=t2^LW3vzcej)g}UcHYt!uPr&!(=u#Y zP<%{+9Gwrby!FlZ*&H$33lAyJv&-}Td~Ep%?FpakGbRX9`w7g+!aF;o#CFSLO*km` zhK_Ho$@>)V#ZQdN7`lRHTFMw?e=My}UkQ4aO-!a;;lxF4`kZT8HA8+ot@p#F2-}v= zVGwsH#wpBW*WOwOd*V8lkHT6mRvn3~ZWZEc`1c-9No-zxLVD?vl+wu>7MvI9n>Wc` zm~!8KaEMKHT+JRxHlUA+d04F}RWhMewB252v=Ak56|G4L#i`qrb)QsJf|QK9fg0`5 zV9wEop1A5+qaT7Eg$4WguD$tnFEb~efbTi8*m`y6*v<$+je}x>aP=%x#Ur`XiNn5i zz1jtxD;rOqc3Vz(;M8bLeE?USZTmhQo{X8<9yZM+%XaN{3{@a#Jf3 zU|7^Z_*1IljDYZ$mVcN~<{Lb_P#dh|Aw4xo{^Y@^2%@4vLSj z*|gEL(lau#5K%#J$edCXe>M8?OIAEm3vnA)e)z>lE%`ga(%1)^v{04+dul4T_*0m8 zpw+)%a(=DQ5Mp%(eAq`k;`pf8Z}g3qAl--@v!pfe!KSAYwP<4b4)IUM`~#~Pfn}4% zUGhwM0+Vui%jmFB^;9^{*kuQM#S$d;>WF4WRUfmd)U?Ky;;Gb>hrKc6R@%zhu``3G zC-KX2vtKz-MR=_ba6{}($E+w1zr^&63~N4N0j(rY4|{f^*hjy~N}yV!pr`*q-y|GS zUV+5C-ptNQW(@fcP!q1=+eJt8>pmT-NDx=})=^kRHpPzFpcBGJvP^~BS^5a`Q$(;6 zsWG5bEsM3e-fA)ZaFrRR^!<0yF=m}|E(&TS00*FG>G;_R)dcIG>EVydPb@AxbJ%#k zim&zcr@BTDWDI47r!srlBAhDcwQd8brdXA|tCo-y7};GLLO}e%iuPVtQYSt0BUjSP zs?0?#-l7=10~r~0UG$g@g1XmzFJ0(~+%cMdqMyWnC5ltGAS>*-^J~Str<-o7JTUM; zmE-gMJxiXrmcLRvHB`J8<3h5mvc|DrUB(Z3I=aO4*AidH;&Za~{0YTUnR}xh^ib8G-lgT$>z2vyD(ZKM!d**9%l9X|R-n44YN!Ac~7@O6z zhwGAGZ#yO11`Q6+1pJ;u!E^z1h7bYKQ6DD zX7D}I&sJA|r_Y0d&ey>xV8&J$w<#=4XGPujp^Z^xPgE3joen&e;0SdphiJ zJzj?<_~a8Yw5=a1UNz}(Mi#fb;#xdF$)trK9a%zv3mV5b%Kz$r#%?snnDMxKsQAlG zK-Tt^__0D|$C^$)O0&PMhhs;7NZ3xR{vust!x(k{bk!BdDy?&b+OV;xidp7Lz7~ei z+EYp2dwQAJ6r-rg+MV9e5wh4>rz)$-Fq#oQ45q;G8V3Sge;x7q_u>lj za51Cl(1(t#7#N{RVxEGoIJ|e{v@W{htgqQ$(?J~Vy9?=pb$3!OGq5N zDu7eZil19lPCl=*zaCNy-5AvgS{02+7V&;B{?dG6uh7#-Z|m|ZJ{e8Ibb9ptPHB1~ z8OqjrVaqpf7a17b$alSIKg>*1jgUQQTc?oZu@zzu{FJ}0R&;iZxrOOJcU{m{Dil;G z87Or#h<-c{diXHl5)zqs<)VWpFHWPQB6&VJ@H%`lKvr7ra)aTMpN)%@RYH=nuMK_i z2Yv6@E_&GneOC3^2orHdsiQFQm%R|l*>4!GHbv>KLqc7}kADT*xBx}UR0mt7S-=gr zXHd*nnqla5NWY}^^C$_qrKknu%m^RXl=4pKa=h`QJa^1(U4D~o>zD!M^OuF^)y+y@ z-}`74tGLRSmM8wJS1wMQFt?Lu)v~^rIOASOFu%hz9V*z~V3YUhC%V@tlMf&9&QC@( zU`Z>Qm^UegUCr+NLC?%Yy}ret^Pd-BsB6krRTa)#w}f!XP{we-_Bsmcj}bB^ez<;z;D99mumoSxPs5{loO*RcgBwTDqG%&!qLp?Yb5P?8&!mKVsAzbtQh%H}yis%mWj|iw*Ko6jiU=!)6NQ zo4?caSVPdZb!kgG@H*bTdB5wn{<*z^3eUCS62xq;pIViZ=ZGqq2Yc_csXHiYTw*N< zHvc6?u&?lsFq5D!DCnTS(>SK?mfRz_*zdz93553c_CXkxuH8J|BCEdyh{-H=;8%JL@pEQQ&eu>`#nNU3U?_4*Uqo+XI>nsowB?5;T4 z0Nv;Pz&&M#fuAs|X?gM68E7xQK_Sx=w5q=Ght&H!eWTVoLwqR0v)x}Mu%+q@R#(7d zEQ3Rvf7NtFb1Ap2-*o4a3ecqk*<>ff4+#{;6X%|-5F{GRT;JXXp;YXrzBius3iYidDmyHZBxJ-@TcYyO%zD9Zp%B$#>F#1mV zMnQPNDS6|5D3)TDdn6xW-LdUYh1|1r28RHJPp!Q+VgGgcnZ-fI3sU82By$}azNel_ zO`jOBZ!Iopc-OaWpzrwYRo9l#fj9@VH@n+pAaxXR*skV){VM*l_vFvN5hf2#icTBp z{epuHtOgtuqf(TTl5APhuPoKJ#V>L$KA*EyucONzR_zp)Y=tH7F6UU%TM7deG+diz z3rrfUIXKJ8^z=*mK7q-5gfD4c6xYO06{OU?bi5rB2a#qelvxoCv#@|vnXy;-)%-+LNHR2I^K4>CCvBfpKO{Zp;1od&|qT81YO`R7} z7M5Fid4QJPPEHQyOwn_&%Otv9IeJFgq1cQj9-R3f?$G=9z;A_of!e%-<3VLGV9F}9 zK|-;(aLAFNd&Tm?T*fW*j9d_K4`Zai%*9z87}*r2Bn&@Ne93vP+gx5yZtZs7tYp4)NXTsdK1=xK3&0Nj zfXE*j8GKJP1xk>jk|(3R^%Ntwn4;!^dD+_M0lAYdvHx-8@Vb!V;fw&s_Dx=HzD*JH z0=-jE0efsIku`xIq-JhrI#((IL9TznXhMlT%Gc17SnLGMl>>9l-agaJeTCGzPkEVwmUY;DrBvms zy`M&wf9?TF#D9{Shg%BMLt0fAYKn;RjRtsLrJ;+v*BfTCxydM;#&6?--n1TNJ#M*p{)*+OAx%AI% zOlzefo|T>cxOwZa>%r&!A*e1@p69ci+Lk4rx{Zr@OqJ|u-+&nGwQuah0u|YPPyBhv zn*_J}IJP3hyS*s=0RMA{qb=g4JLntxAMbmoCFT>5Lp~#c8xASnI+Kn&_v-Yqch$)xo zuRB2UVP_T(0^9=cPzL_gvGpp%jv+TU;CK6fh=8Doa&R|YDf2q91fKC>x=fB0E&a*1 zw)+t&PtVwZQAWI%2haXEReXHUzSV8Pj;TgHEds;iwM! z3JRwWp*S^hZ_1(jpNB^uuSXnNL(NVroX-Z&2GED*L$Te~Hn~1QATZtXyzvMhU0gzY zX^Phy3^+pps|i%Xu@u2ANT$8vL%T*S*G^_?@X?yBFN5x`14a z*(tJPqUlCgWmdn2@BvW$0u{=aJb-l(nC6-0<`KT1JqJYXmm)rSwKy09`IcL>Q30h@ zEVnnVzwFbI>bO;>OpTS}%_x`Ti^(?(Bm_@f;7@-~2VCI%=%fn-da?Y?&y-e)E{$tM z8QUjPxjq)3GYJn|{5C4SxMRI&qapDeoArkPrUO;#tw(S6|4JISjVD(kjQ`n_AH=oX zC>x;h%!+@>HR{I1(jxNYd3vE&)CHyoVqxcBcyGNX6Vf5qoJy(|Zu5gwco5K{Jv7-Am5pWF*+Yoy7|=yU{+flDlBytNS% zoTEtehFPg!ab@?awrk3rzV#yoYWSwn0i%y|Gqhy2W*QU=?Sq(!N|r>a)%k{8YxBcl z@997q1x@S0vjndAr@u(6rH}eo_R1*o?P(3w?$i3{m)enmgv22d%?HPsXuNJV3W*f} zWse_|9}kJEonm+Mf`j%#uQ|d(H=0Btkc2e6-tbOBlRi)F9rd5@{M9sWn;hS$2V~qx zFH4j1oy&8~f@w*R>o=gzM?MADc7F|A??byqcKU0^zC=PcMYDR8KXN#kBzI&gs$@!? zs3Xx6*6v2!iUV)2@?VlkAd4v&OoEVijM;k~D~>im*Wi zutluHJvvun{lU{2vO{!qWTly}{MeD!3U){I+q2?(Sj4S5Nc^klgF{xVT3a0w$~Ik$ z+UQ`5rL&k|U0BG^94aw(E-G5l>|3=vRedTxZ2~2Ole$s#iQQSFq zqXPDkrBz@T2TQR2Z!74FgRx7tAa043zs)tkxB-gUC#M4ik3SEl`U{+lT`uXnBIm)r z!YHR!N`J8du2!Q-_d2$!?o9i4EqBYBMV*r{rcjU>mhU8d3!DL>E8rSbk**~4u#!s= zJMZN-$m^PIMjJp5eqy(b0hew=GlA}U6o8zah&i9094^kN0*u2?V?=`a#_Gu{4xgBe zx!7zk0{s&DsCU*D}S7H{r=a+oMMC+%$C-wsB44)nsK(^0bbY;JPEHOBu zB26p$bvx*T&*`zFb`fa;6)|ocO_MWUSX~_8=}((}cpv7UH@vW}GKDkAuzq?nHs`yl zZuFKS9re1;GCyZ{?fxt}tf%4;)V^tpw~pW#9zS0ENp3z{e|*^$NB^KRgn5Jvy1o`% zmTKkw;kh$vL+#Gg@!Qca!`k{&X4ap#g1Dif>J3yfwueZMwB%lvuyeOGM zklk~309|z_fm&=u@XT8Z^cO(LBdRh!YK(j}f>)w090jBm<&fuJ$>yD7Kc zUcN8Y2olp&p~o87B0T6~lyw5ShQAb_YD+_IB6)vDZkEG#4oVYyZ|C{05i;)fo+4>j zZcL`QdMubIXn#q^iw9aGAJwnz@QVRt((NzD6l7jcH7HoxbTEF8q7U*PPpNm^{Fr{R znKuSRv-nL}c~g#-@xb)5TEiD)m_cOIpIzD~iB86W=8QWWr6I;$Xcu)1aIvQ!tT!I4 z{VT_RvO;i9VZ0-Q?LsPQWD`60HA>#HCb0XQJ7Y3)AhBa-Q^%bDZ_RPTkd&0Af!R|^-(@5W_E46m;8r}fJ z%_X<06^o!wEsPti>6f<+QrnkUjLkCmtcc^IWBhw6SgkhQG9}Kx1+rd~ffQ~{FoREM z-~6W7*pOSJNKfYFrP#xYMFO1px}>rIDt=z9gvdP7M&(P~lMxu(svFwsjkZ53r} zW2s9YUz-L-Hu#gMC1#nPTH^m7qo9A(7wSNtuNmGq)3T<2R|g?f8LycSjNIYVER}fl zxiG$v=Xu2xnH)2ORD5{i^CFj)3(OomZEu>KX1_ltNZ!6V;P!5es7()tSxo$}{9hQ# z1)O`9?YMJ`iAPas?&H^Yo%q#&D;$4TlzfDOMW(;MySUuF^29pxN;n;(H6wNya?d^S zjXsyj=0^AGvUos9pMUka3oHboVA5f)ART^cH_t9e`SJc#ZO5HH!39>L9NaZiQNe>R zN}gjs!H5!lpABI)W3oM-D0>jOhIMuXx3)c3<9D75ffuszSk%+B;iT(+SX|5}COjyv_EnqlkYdtWGE--vcHoJFu#1J+)+H>p7D40Xf8U z@8vojU_-G#V+v3Qd)HZn^RpFRuCU1rY8n5T$jD8W9WJ`;vvqzI-_*l%>k|Ca*-C&7 zs1%n!D=NASppW(d12VL{-nIVux;5^A&hlQVx4Q0aH`N=r6%|3ks)hW{vEgPF-wC%B z>&u9n#$hdL9&45b@DV=n=4<{jn<{=)OJg28Fw}}c?5^EuoOgheix0v_1Ca^E1yP3Z z5Br?V+ket;l4+hjRi){DeV|%u^DlY;riG?e(4^PX{aE#F1AXJJ&Zs7L=x~rEd5(uP&IxkEwRv}b6eVGVJ|;(MB#I@w zsJ_f+IE5>_J{QUCPrhC29m0C@4TP2)(6asSqw{vdT-R~3pv$16Pfy0-Dm;u-l7oE#D=77dtyd@2ALaAF7Fu_JmoOI@ViD1hNre8zVxYQQVH89%dpQufV5 zx+@_7i~aJu>aFjAaO>4lRwp9tA~4DQ`e~zY3B9LJjl2+Dcz|(!X+XPT+*X5bN@Uc= z2{Z8nX#Z)<+D-TLkdi4gn6@jxP~PDgRF?5J`Pz@9KW15NPrn!KrI2<6(?e>stZi#4 zC&HN?1q@YZD#tFQt@K!`uxv)GW_gl-wbMqB*bdNJZPkT^=fl$a-9mD z*hND!%&yqy&?d{6 zHOT@6tyZA-!z@1C>VUTw z1d{3e`1kEQ-vSs-=h8ekEI$6Wr2*5A9uQ zoz9_AxP4pG#@jDN+4Hf^rQddxGS03(9o5+UfC9kj26e@Icm_bECWyCC&P=POLtQN% z$S;4XtR(b~5!``qGy;`o+jm=__%NLo_Wu`OW;TB4!^3;cmHhT6S|&~iq@BMimgj{3ZRH$aVuMAFYUcI{Oshha77AJ$fh3Pi87>|4972ZgBDSh16h zT^cdK=KwPP3Os5+x$;Lw9^v9IOOXrTC}#mwb$;I^xmQd92fz#6USqJGiy(`ii134Y z_-@v!gaFi4-RO4tEUcsNn~FPMj&n3cb*@!odD@YHK{^A|grJzb(k$G`v0*yUA`3p(Pezbk%v&<%vX_ERE{+TZ z@vs~atAH~2WEG4GKE`5^y~%Cx9#y@$P%EE|$rsB~itJ=r`%)$=$}>Ylz`^045oQKJ zpYEd(9WeZ!pqs8fJ-(rqaqGil9`DK8A5-husDHjm#zyL!Kr3L}K&t#e#<~OmJ(&1M z(&iAk9TlT4NyW5HHJu(_hpjX%z~MP_?HuEc`<&6eZhivU3%|RhCnCto!}inuJMU2` z>Z9DMHXRd`*qp2+kMwQ%HEN36cbSM%<44gCsupQLhRNud*JlFUGk-!Hl(SKBz_LeF zMXJ~Jak%`h{_!=r>~jg#Y^dpo3$T@{1LN;Oc`pYai*}aA7}av}aZ7ZUlv&Nq6Z-vJ zyQ?uoQnsB_Gu3mzSrR;B*{`j(PTEE#x3E+d`>6ZQ{rPq5;Sr9F=O==Mq^va-5eP|d zRLIW9Iq_03*Nn@-$}z8zCIZH3Xa=l$qIqF;EMNDB?VOI#axQ%@z+1#3^n3fr zcSq1>RO!rMVxSAAviVOY-Q@Up3@N0?wd*}FM^P$?o<`3P`#IeQp>YXY0KhG|r zYsv#wuzI7{M^j$Ga!aI-)5eq~p2?HMg>d>lEIkqcGBR0&Rl|Jy1{vUbWt}}sprqth zvHO`2y!hlSeVQoSE|0VWj(tJ>BO8D`SaV1Ci2ZA0s1SSAOdWVUlkZ*xB&ggpXYKZh zL8O6R_U``3x3yP7^%D`@+28?gmJgvzv6QwH40oYBi2^0Lg(ZzK&>wj;)gT4HsVl?N zE;x72-N-vJc=7z}bS^TV=7M4LgTQ1q1#PQ;%ll|rdq5fmNaXw#KbnIJ48SV(Qb~i5 zqZwtAYL&RX7H$Mfv_wQ!W@;sfAe~m|sO(DLu6M_fe0nuR-gKVa9RC}U`T-?X@<#k+ zf%6f%*T-8a%pTmz_`u>HNw*Hi-moy0;2=eUI2eHZ&gAa1zzEj3*rcM57Ns@jFi+{TPWacW!1H~C$5dy>fNMF!$bLXv`;#0cMW z)s{I=s+!hUd%;6kmTCyg{}sF%x6Fz_cUDHQmH9vV5H`|d@hzV_PY?Vw-|u&Z7@wc- zu_=txs%Q38)E1u#IHfa?EEy1=7w+r%qqJwp+V|wZ3wSl+ z8K)`^h+^V_f)JB3R;;=9)V4m9jZoG3p;6<3*f$+Sk8^jfU?rIXnB5pk)P(f89n`9Y zcTMA;kX_3*r ze>FdDT82~l(SxM8W~Xd;$oV@QndI+GGHLsb`fgMNg>|Hh zl@X=Rp9DnZtYhv!5_N+-q7hhOPvv}|{?;1>SeOsZU~(QJA$mIgid`!{f!NW0&SnL{ z8W{uUa^dTLvR+gN5_qbL*aV11?|z4SIsAXsqexf(A7emWBqTpJ!B&%NK5n#1eIGhD zUX6NS9hVn?A(wlc{w>&<;*oGp>5&4}3kY{yi}I*g(=~VNV?N!PDv0}Kf?$W?*j;^Iaj2fl^h^+7V~@mjfxh&t%@Cf zNuK!6M`cK{OU9L1R-0UYM{j;Lv~MNrU!@KusT{*q)uVTX(!?g@u+$jGS@x}Bd~&?~ zwiozIS^y0);41vnaRQs3blM&*>D!i!xPBij;o5s;YjQgG3ZN&->x^*|g)bDDZ)zJD zWJm86Lv(sginT0q%;<_^$a@kWgaoB_HKeZ5Z*^A#<7F$HzG8UCqjMINGPW#4E<>A%vm*9?`c!f?BavNW`mYt;}6_mAMAs(DI)fqMfG-1$gS5zwARJ zt?%?xufg&^9dh|AebeTykDL4fw-oHG>CQ&Q|9@IucAJi?_;b{}C%cr)4tlR1fW+r+ z#Pw{CP0T)Q=GpTP@BmJKrnSMXiE=8hAUb#yLBfuI8+g4AiSJIp+Sld4^Ro^SsWioL zTG{}@8?`-36y2cq$@b`I&eQm15rbgx%9t-2MRjX0+Y{gii^5JP4qoev+Tz)pBV>ny z*89%L*@tD_6?(*fk=m!TiDb%_O}r04_Mfw`H9?m=Wg8OAaFZ$M-rJ4K;&EUzH87bA zbZ)uEccEH39DKQbWy;=-5Z~r8&W8tsi)king4s#vz5jW-=Qde?4rWI9;rhP>^L@q1x`8-tth|5#58eW#AZ_j+w;*(i2Z_Z_IysP_eA8OB73Egn|?DIE@#<7shs~fJCCF}ITU@2?oftV?0G{IiUpw9tVb2#=wsKS0PA9&K z2byF?oMpwD)?qO23Y*^Hgy4R7M$D?jQBO565-U*qJo8eZlDX2e^@!{U3(9g`sOGU( zhBVbdriVJJ8}rXipbMkOS}Rmio*zoesQxd`Xnq$PpGv9#&&uaM!O`aLHFjihpi<0GF3)c8+@q>=uIxR0@QKdSApI>Sy}M+zK!ZH_&-uE1wRQ z-L>q_!G}lJ%RQ#X1E_X=ys-@(Hz6R!qmv%Tx0i(?aF+8UOE=o84%j}f9<JWd9$oM^1Ecw5SoT<6cj$a}Vzjo^#&P9YK~4O-lx z$D)$i7bAIPW!hTM=eb_<;&1TS{^}#g?`C!#ZATn_e633>3C6R4FtJjvWxwPAj+c&t7k>l%%{tcn&-M7N%CCk;k!iQM9i}^WbYiri+?)rVLu767!EJFa zH->95kEA_Jx$57;BbR_seJie=bN)mSb*~zcDE}F5^h*!-hv8MxeKUDnat6*G-BzHG z2@PRq?4`N_afcOU(98!*A{s^Qb`OoIf%u=~eT5?QuU7vJL%ADAy8!j@tEJs?H|xM# zgI|9{tL@3faevzzsLLC1(CBvVjf?|sCSLUxaD$+pcc0&Ic0z7VY zFY_3W9Z*FetjqR_#7ps)H^67t8Tnr{gn)AemINM_O4BAWY$j?vI6?5}swxgoAG}H* zXxI;ahK^^;JKBnGj{%wI*Tm>&p$n1z?CED*Wm+YuOfXUmlKfKK2sID?5|p$8_%1-0`x ze6!9jzlp0??N9s`utM5{2)od7c_}U)IL)g_heummG0+Uho?p*F5V3N*%jX&voOTp5 zu<=ukh0p+qJ|!i!*$7kdyyd|2agdeq;(>Ev{0t^}pibUCGa5V%Syg{$dTjk-okm{* zu;APGeyi$y`Mu~6vP@58IZgl6oJ1%`p$P&uiNE%ohT$j*@oYHvUV=SB+W4Fg3hXC| z3iE*pW<2e0j+~Go@uWf<$!cb++63c*iRy?tmE*d&_!~MX7(5o2;zZtx zS^PBt-i1oUF>y?C41B8>w1Xm%+<=sy6T6M`T(jHKqX^d=6Z>98EkVDFW$Y$rXxsva z@8aS~to8CpK>7mGR?%7<0R9xj*+!i;FXb1+JV#zNM@g^I{SmT6o$~*dm|*Mxg5@yy zrsJec?CYRunca3)1<=6K?osA`&70Y~z|!lxjGr#Ff0ygI^C-{LA{tXSD|ldK?|O+a zMvZjyioB(pXPZdjkZd7E4+$qFL`oMJK3=#+=LdQ|U)9)JK*!*YG z`uI?8u;gd_}-d?!<5aUL_qqc2^0o(JEwQnMM?@vN?1_<2LLhQ(YqeG$%WaOc< z1YH!wp!KNYdz=X>gKl?OV=C%B=9I=&6~v+p1jL4b1W*0}U>YW-T4QfnymutC4$!GG zGIr9W48ri=|9UkBfAS~;?ZJ2r(!n^5Y8hz5pTJuR`!)pSI+ST&cu)FMm^ge{AcxQbLXsC? z!={SZA{-QX<)HL(Y#dm+D#4(t-s?(+E}s8wQO5X{11RU9!K7Uo;n9(00Lc%_&^^UB%J_&;UE^Jo zl5m+5qzH#iyFY*M(+MAqXkbtdqeEi805jSB^7L&7y!3gCcz@1hDfo*i9i)lCxpADm zMWtwK+o885(55#SZ-|g1v8Y9D0(m5*UT4Mea2^(B(Zvl@c1SzY_Qf7;1zrwo6!is( zx7bi+0Shfv_oRAE+2q)o`s)hDZ=sW26J-Ht?hDzT3A}8q6JSG!9n2Nnp4vV)6Vagb zxa6puLpxvuZyN5@4fe#WZwX6vsYwqQ@OIbcBD&e27*1RCpB3^jQrr;0%3HpH?Fd3i z{Qi3~0@xNvVc8X%M2g}^v%Y2M`n4;j`)Sc~gX7B#N z6LiY6yM6sl?;ZU^+e8@`XyYYSpyY&aSt~i&Fv(1CE?L6Zo z3fhFFPLxW?wV|s2x{C1kCiqT1aH7ETu@`T-uXRG-%iPFjhG%U;4j-&8z>!XAZ-4lw zi<_FGO1Jbgnl|sV)fst2G*CWRcBvi_iL)^S@=OvudN7Ou+_-5;G+$q)$Gl75jbJeDcf8Ad!;Y(!Xnk_qh$Qe@DTq3 zU~VqWPjV2lKStKGFdeHsa#HBaY3FJ6vxG~S!j$ex{MzjDgp==B-kbt|czrmOT*zz_ z*uV&6iBO1Xt&~!l18gdOv(nvaDTaGK=||e$)IOJKza%ujvG+WlQK6Sg1h}|-k`7WM@;18`fgvHGhh)$nvF+se zk8>)bjc&5}%HjLa;&^4Bi9$Nuq;<8+h*N@H=wVZ!^ViilA=8(|9)1S-pZ3b`1LZqH zCc}sucFT_!?tJg**aH|IgLHH*i}uO(3A#@gidUH#yy=@k_+pol(~t;JA$WG<@3||; zNXZ!GIm08>!|YT0lzhRb-|M8a0JzN?aK8sS6;WkY3d}a?UVPivXOJUIEe)?v*4hPG zdu9k|yMzkOl$Uk*9xlK7dW89WcoEmFnApR%S{ZKL;Uh5ZCXudLR)GRi$!tKzFdM?= zNOtXJ{a=FtHZh!?2Ng~spGLjs7iUac(Wv>i+Wx<#NR!eY?L0j;q<8{^+9#h|JiXb!ac zkurcxxR^f_Yye-1mEt%Ss+Mr&LXJM zoW(P&wvU*vfc|*{kVmEgC3Gt6=1ZLjGfTZYoDi3~{J#~))A&GJ9IzoX&gB)u4miur z;9Yc7ypY~LN7;{LgAJ3G!2HU+GOFr>X@T9K3VX%M{PyNDL=Qi^^b^oUqY8FcWSC3V zr){TIk@(cFm`P#Fbg(RPP=~H~S${ff5|Y8+9m?mu5(0hpbkyW`rcIyJf-n>!=Z}F( z*>qIo#c^rgM0Pak>_OAEjALoOpid=>wm!C2V^C08$n3>45kSUOjCw!wT(7wpeL4oM zOWeywS4pq0qOXaG|D++coMzrq>t8xSd%%4Gp(x7YgFaB7*6y}E=8xz8x(3N2otAD} zAi;A0%*>W2cdk^w1nLpJ<7s`2E4Y3l@O74v*NG-KC1 zVlfHYs3^olp8chkak(N5!qi-Or&}LxKH2OLV08k?H;k;00D7pj7CL3Fi470GIw7`H zeR~zw83g^covWg>(&tGnA44OqD)i%y@YNx9(|qh2S^254)1l#wp+Wlq=o9+()5IT9 zhqwEcd7u-KpnaSE9?r6|P7~BYRwOfRd5{^73n=9K1Rt(n(XKKJKx7Wl!<{)Wz5r>A zIGZ6_3lE3iz0O|@mFh1{0t#}u|Hn2CmXcR4Ny7K!K$n}qkN{CK!pCigG|quqUx-FQMF!2j%3Ui* z0%B6C4#iau-eC({F-QcEuN1+L#L&x9JNe{c1HB>a=E4WB2BCAc&bfsNP@3&g6+pxN z<%DUu5(6ycp=*ETg=@3h*xm&Q9;o>*a!^;$E;J)9k+k^uJH|FSmHRknEyih|pyHdh zj*R9?_7V2F?<47fD4dn@dJ%2H3VZAa8LJDt> zauifo;1x!;8#HeFEG#fDMq9l9z~D;UGa7OZ7a?tX?C&Dc>P%6Vkq5Yftxc4=<}n_= zof+8qE?`x?Hvi##UuT|SYR%FTG*He8#=}Bj7!TtfmMFMiZlKrg;To;tcLza>-=FCi z%RMf!z-FLH<#tIEU?2$8a|;L3YuPX_W@EkoF_N$x27nindE|3ykka?p0$6b|fESED zF*?FvYy%28oT~pgP+U&W=1SPhc>Fb#M*Y6qUU3g3)PUaz&v1V63N%-hgDVz%Y9xS* zmOf&*UngUccej66eGX6`Xbb0Sq zEIk;geJ>y*Y<@ohEn+&NqWYM6^$`%|1+eTPZw$6)7gPjHL++))_i*8k>?`>|L6)x;QUZ0a< zhK^2k@U7_2di76l_4WwclaiLPs$76?%Ct(nA?ElG9=GDp+qb-u{5s=%8|H0}M&Pg?16b8yD8Mk35}MMUvu|F~w~VI`{#1_NsF0Tz=)CfK%OR{@ z$k*yzH*3-T#FA7|H)@}cc-7Lkv?Xo13*1(m0j)gIqeDR{DUrb1#hsOa`bchsj~)mkPTTV@BUv85vil54mpj#bvEV!El~`~B$#UN< zLB@8YuOs7kqln9cB}l~1!L;p#NY4o?2Y&DL7U!PPd){k+9c~;?1oPqHRed12lNxV`a29$w==gKPpIO z^;wY+nql3Vz(2_I*=;!(-O|hgKbXpIry+5^W;U_xRAARd$r=t$vwXi@^5wA2W6{)S z!Vr=&VYU-YCb)h!{_Y0c!{6|WTdnwrL*<`s3h{-+UAZr|WqcAYj&IXLEn(z)nLupBd!^27Z9Od6{RF?;WCMPkW=$a@DH4k`<&3z~TIZ zEZyb?Z$fc*B4;8nPn|wwgLQSmMu{2_O}~-!E{AJ?BfxKxEDD*LW43t>W#tb%Hz^o6 z=ck}i*wFhK>O0?`(j$w$vZC6w=M}+iSn~)1t)W`~$Er?6Fsj(LQtREyZ$joM#m1+o zL1I>eu;hGz{^h-HCRXetjYe;EIXeqaITFfD!v|!Z31~8mN8_IUuoewoEil4&xD-Z}&Gbx>(?gXof^}w$}}i33To|cGJd_8W`vBQCU{(-g5FrZYOT5_|PSB=SGn9 zy_#YZVT!G0y?YP9mGYdLkD~?ryuB2#)Xtg4BYFNchKiFD^|0C5tp_qG&Qy>DBCn$< zgI|O^h><^%F5B9K#VI@SeOLMtUh_m*RS0PQ5IWcQ(9vHKDQx#&JiX9zHpGdVZf!Yi z|1%hav12gwU_FRy6V@~c!lT0!ebcK|^_?&1poB{1J20Z;QnM!}(#i8d{^J>)i zlY?Tg-hake%sJ71$JemytIiWglG&~i?b;?N4MD@MPIdB@aoSmjbKUwo2HReD#bkQW zxuh;Wi)A1%0%bB+kBrKj-yfcAWgv$wOtYf>mv{mWSI^(HQ^df%ax%k1R=*2X5wQxE zi8ZtlQeOFGL{i<*T+-C|6UKwVL5I;-zhM?aB;J=Z;L?LhX|oYz(WWK4grDC1=LNv7 zV%8f2QzNU-G@fMDkK+WNsR#_JgX!zfiu9DBtTLO#1-EiR(Px2Q?n)7RdihOh6|(6= zIb0@km^xW#6wR3!j%;uqGB=17MTduV2yO2y%SPa435=v>T=s4-8|`I@U;9)^&G2}* z4`mdVPmQ$v`9Jl&0M!f9gA&i_{Zj8t`r(Z`YclU~PJLrjKkkBxyY+vQs<_sWE@Mwp zH9E03hZ6#y7D$|~fVrmRzy1bX1Nj}VQm=O_EpxnTs6EeS`UyI0-8xBgc@mK&*y_v7 zwW%uL`k9b*`sR{x)W_Qy8k#9eVaEb)UZR=-gxl#JpM(R|Zo;)+UViIOh59au5RnYPFTy81FakI%R@u;8MN zwDH_}-~lS#TNtzfBbeBo$v%Nwf}}k7XsFhF^Dz`P8wj(Z$gwb^)Wl6&(<7i|4}uRa zD`s&P;NL|4L*uw*4a_RN1Cw~zR4pyQt}<_#lCPRNBestdihsmp@EKd$xCvh8Xp7j# zm8@#K7f z!;3uvvR@Zhb8|o!bK_KBdMDy^4YqGTXGz30Ao%e&HtjQu(cg;?tLva+oHnd(X3sUG zI+X83S1N(iS3%+={!U7d9JEweev`k{w2Vx!V)!y8{HN?z9k)yLe|CPX4Y1lte8(K_ z77Pa`In(5TvE-8|KP!xUX6TFTz+=#G5i}Oz6_p7)3-pbG^CvnnDFhdFb%8r?DrM~x zR9&!=yvag56=6b@xBB$`SyNVJm}}%mQLQQ-YOm`-q-x$jfuqqUBHaFBzzivPB-2~& z2J93wTM)Qkk;UbJPkd2q;%r6*JN$&xv*#3t=OmoJQ^R4F!1aM+Zlm{)Gy5yEihs3x zLY>)%7x>T;BAUM!BV1j2LH4ssx{Kw1k%{38w&}jUHdx8}Ks8l$XT76GQ!V{tZ?W9E z<0M=aW?QD7lprPth~6x&Lh*)<%+2zKHhq`v#E>?!mmF|2Va*}6#tgH_h>!pLZtiV4 zTDBcLADc45}^Yl@qY3G#bJyn}G3Ksc%0Pf?<*fY0p*UJqP9W zPRCXk7)QZf@dM%VB@IDaUSE5re2Q*3{L~qi<`K;RdzgT=B?Qj&L9hqMqr+aVstbW_uv~PW3>42q+k2UR zPU#~flqca1)Jt!Yxds%QZWoLs9&d)R33Y(h)f3H40F3KYHYN15D#%CDP@MbeXpadB z3zHN-voaq0u8+dc8lrO=(a0=C|B7)1m<|$uWLsEx(S--(X!6BN;D;GrOcTN&>lquZ zajqR(aJd|dBV!HY;?Pf~@rPF;PgaK4OrH8)g-J+2AyL3RP2lQMeGFV4^y$PPeBYt% zjh6A(_W5%Z{}ydE^DCpo}GiNKk|2OzWMDq*bdk~_Ni zSs@=gfo~!*^t4jk9cIG8lvCi~3YgfEE|92DkgaCddX*_1n9tK>03b2cx)_uX?o?e|J`Ec= zLfFnhAjo#M|I$}z#&jK{K-WY;A_Bo~4(YK>+r#E_IXz|LophFOPJae6B~W|ROMItid>nEr z?csWb-rRYW^i*8Fx5p%vonZRfh2Z`}qQDew)$gg&`GnWXTLK<3OKADslKVgz){rN2 zt_Q1Hj+iGNx7e5GI`u~!t2n)Gz)Y-7*=TNT zZ`o^n3!Ojn@ScE2-5m9scU8O2hUk?_+cx$gz8pNxa5hRtMhPsFf!-Fn1J(ilWK@8 z;Akf+!CdjG^k+f+XT+)a%ND}ANu94=x>c2$&AS-R|1JeUv9PuBL~<^kK4ZY5P$!>r<#xgRZ7XOc5q_LPG@u%ufp8SWiagdrFz##&a5e!8xwl zKd(}|5)?=wrz~v!6%{&u!QDdLyTtRh3@S1#+ln@2f5WTPjJ*tC9zXB}aPo9#Y zWtg%uRcv4zodg3ZVS+?>yO~W7dagB}zqB-(-~9D5we0^R>b=9U{@?fU+m=m;vMDm7 zWbd8Ij>sla8R51vvW43UQL-}2uI!aPvS&8gviJO5cdyU;`}?Egs18Rx$K!Ec=XqY| z^>`}v-|6t|wzRM`x0m-{qxqB7Ep;T1#Lg%@>33CA^Mv0{+#sR7DJH8foUP8B%6>U0 z%tB+F?d!LTov0<(^$$9oCoQBMCS{ zeU9hUU-ER74ZKfyFjY}T#%XrPwk`7k$CK^n7;VQl_7^^2oqx38M%84P?ETIapRgiA z{lOcignB zAJcqDd>g#9WF$3Nx}(+Xy+|8M?KqI$WWW1z_#vE2PWnCId-)UDx5B6aZ4B$+0q9Ig zo_OFq`jv2bH{P;2Bg5R(l=%f+?~MS?=lb$(t?dh|L@|WT6D@{t>U|Nstw+ClYk$b# z$jLQWz&RC;`a#ysQ<7I3ny-`y_j)}-Vw+~3wp>Kfzu`^q^BTo-Wa!`7EJ5t!zm-iY zD?^C3PRVJznTy!m2~9rNwDjk9MlbpRRj=@$*J~A@oIVrQ)QXz6XX5p8%3=)E)_rI0 z;=teT@;7>DhRfvAotuqwTW}*n59wb1!4sjrk#-W(+OBt4rETFvi!k}=dvL||-o-*tp=9K$)*$cf?L&Xs5io9>U1 z^fJLAa*DIRzsOp)q;zB#8{ZwUReEg}{fTw*N^F3r%a$E+?SXf$z&SF^{OcNiX46^n zf|jmw?)@+ znHR}&d;_|<6ck&g=Ogy*2?8yg1-}=w;zM(LM{9nzi~ug3JF39IQWc+(1(u3>Mr2P? zkzYFhcH*02b9Kho<9$qUq^IBUn8>TgGW3a=onE8wuQ_?w>;wkW4G1#hsOwbVv6?&a zd?3~zzp$pyt8*N5_wx6+~>la0VnY#J65sxLfWSGD&E`!8g zNuT>W)e2V9NPTbcaTfxB`EpRLMtW4o(-LM@E`x4zC3(x#e8bawW*=*VvNaL(RNV%& z=js`L@n5e4`ZAtB4qi#O!&8!|AYgHPYValZdb_u`ay%P89{SC3eTYy;a=$SdP zHI{f?iF=}4z{uD(oQ+g5@7ES-AVzXXFa3yexKF#C)38VaR z8ivf?-WI-oV#B9RB3=b-(#@q{l#f3 zx%Js!TOrs^KfCQJsrqAL-kp0gT@FoNabOUJHAM#JG*t+Rfg+=n6mhB(YlW}$Q| zL+-oH=>h9=+W5Gx4=dEE$4g^nz3F_NV~ljP%p&2#3sfCjJ$#J1X6!eQ;OGhQt+^Bb zG;sT<$m)X_g=>4#Z^sPQg z!S&J&^5q?~TKC);M461@3d8$ff2?w67>uF3wB!5kzn#-*2ss7&b1+TynkJlS2R8@k>VDo zd&wLBS$^ub;VG^FW9bmjnu*z;$*~O*TC>)2`Rc)iSEd&&jB^e2Vux?4NMIfO{m`Ff zsnqlG{P*{dH%!SntM0~zL)o=<66J9V2|1C%k^{iLZB8+A_WV`2Rd*|DI)3B}9*bAq z9)PNl_6l#!o|4O|+o2o616v`L;gBH3<;X+&0}Fjh`s|#g(yi-ApXg_RH48z4)SC;c ztM6(QK1QxBImOTR%rwN7y>H+zTR>mc{-Fnv*u{c@o#5Jww61UEPqM${$sFYy#+~|T z;{s&4Ycw0L)G(?vUi|leCbYBxJJo8n;&HH!#dHD@=!+-$q}u0PZhmM`F-BK zHX~K%oT>{yDN6s`E{49IF9J^S`MiRv7(ALxEP7V-DX8OOS|AvI2@zW>7d?@DF7~Kf z$eeA+)srwq1p~gsdvC*=@cUR)Y4zuRZx?e}UkV)cx=M2q+dB{`fYV@I*BA^-4}(AV z-61w+ltKko@IBtVgCCl_U_bi|mRbdXmcH*ePZJ z{iBA7-og4fEX$BV`b(ZH_k1{XY{C9lQZW$nRrxomqWs*I4oK9g3K1){as-rxZKAN( z%MN<}Xft}kcjRt)|4QV&ID^fnp|4F@CvSN2cUHl5D=7)y44-vkV62Ep+!4CPGXW1tdc>90l=8eyNfl7`Rald?`FG@{KU3|+DbG{PUhMF!tY-D3&6SHVxdXvl5 zZmAS$^ZIG#8h+fs!mFE});bYTPA!%23E&pO>FL4^@Q7pK286SmTTwk$P*{)7tSE9R zhg-tv{|~@6U^&1EJ{mJkr#Q^ z>#1L+HOBqHa=5xXqDjGT!(-_K+-H8p4>>Th+yq|x+VS|#`8?8?!^z9@R9H;xy2)1m z6aBL=MAQ=LlwI=3;X_6uc$(71m;Nnn1Bb%(-BeIlIe0SvO;X zg-7|7$x4;-x@ITeOUz1HDy0~y=R7~l-_4hbUkiAoA(HKqk9kWOiC;Z}v63wJeeImB zX#KQJ%|eNZUb|L-j+z0L@u778(78b|Ogg>a+V#_|cj@GUyghQ(6QRNIb!2|ab=r1kvf_YB2qR^y@N!o-X-Y;;0*+fir{1^s z9!U?Hzj}MkUXF2fXg9m_KX_HCd~i!pzZq}50GnD2-_<GAjMHgHL4&GZJx?4?(oG?zbHJ3 zk$ZGTWd7#=lcuIm9h zfS!|!5(!%&DujQXD0iUNBIA#B({;9IBEcs z;QzsksS2mDK9#Aux7cx!y{+)5$JPJC>(tu3)v1iGbNDMbf z1rbfYY3h7?emIKL(cXH9>ru96y%Dfvq;kAM%SzMjJmH;^*_2+pk{ywFb!BVlcnS(= zyy;d9V!G?3AAV5bWlpxmR{v*M-5e-U~^^6+^wW0t-Q9P2gT9yWiST58$gBa3W&C_ILJKVy5+^pGRuN@81> zLTLHDpJ`V~#o(2dIan{B(%Sd{u>?lhiZU%b#NhV9mF*A_i4V{_Wuvm+%{$75_7xl3 z=i&b4ip|s%jsd(1uen0Mp79kM8@SKV!=`O2SQ*KHG8#V|gVtya-IwAcSOqFJa0sZB z1f=##@Oo|g@+rT7m)WFNA+M~A4-yNo{4Gs<94cPwcZ2M^wj^u8?)96=cvcs(NNj=N zogJ)VR_yWdD)CWrZtKDEa`;o@9=ZaMQJ&9lVV{aTKK&*cTd`Zw&|A8Y-hQrzaV5Ky zAXvt;aY|HXEv(};BS~bUnO)6eB=)vn%bdnB_11JSK5WU6f|lHo*zp1N2pz{o+5S7D z8otq))_Mss4YTZqroVp%z^z~c?JFuQR4<>RA8;9@nul$u0?2>hZ=k2DRkDVU5gQfz zOO*{*{j4unc|Y=t1+4ukCv2n|YiM%)%#)z;iQ?)lWhZf^sl-Um^jPQh3vp7D2=tTF zt^Xq??d&y29`z6e^HFNJ%1izIur+)`_~fhJ*}6wXSJ|GDlq}RrUy6{KlW8*$q6aKE z>Ht6qE|YIEaUbTVa1meQhldpXVtFCY{&OZQI~k7uM^T69q2|FDYWG0gq@ z4tQCyPiG5bJpG$fbzLpL=m*yR=xD+gnVtGD&xr8R8r+gT_fm@F==aaI9t#0|HvQB-t{MVn$`#CtFFtwl@Wod^BuRkl1&BWjh9lmc%m*&l#5 zz~q+Xzr^SLO=)!M@}F9C;UsqladWA3{1dkrE^RBTY0D?>s9ye4ue;K>!fSAROs6iz zfTXww6!|N;l*!iZVC>?OvI2$>R8Tde(u)thQr+KLQ3&bQ^KMykk?f887Y5Iy52Z)R zgMA+LLS}XyTbMK*X%=eEa3KYy^UjqZcVR7 zHCowZ{~|3cEMaS4L`TJjpVDvGPqC{AdPBe>uWMrHhL~)f_(f3Z;TgAHA^Pgbb$50o zzR-vXUyVt3ef()460|)WCWTaw!p!ax470+ZBc}{FZr#P{DeHY<%pR>UKTF}9|7%{e zf`iR$_iqy{^VoBr$`^|yUpw1mD#v)l-d;bfkYtZ2R~TivC@QEyi?3VG=a{499Ej~7 z@l1=Oi-$7R3JLR?y=C8=kc=^waJf4tqlo%D$3yI=x#}QeeUeG4FToK-G4y_H*%F?z zNPjM(6}uL{2HR+Qzte$KYx_?+=_PuL=iPn)Yf}j|ho;xP?P;UzodXuM%htNT#&jK7lo{z6HuE=T1R5X>{fAB^t) zS%VP8ypo`h;DGt|djs{F=keARy4t-9uV&it`fhogO0*+AqumM#2NK~yZ`~U-@(&2= zK*{=p%JJgE4bkGU343w7ukV*Z?57`QiuJ0bo{>x z-(cCol$2}OVyT3vJ4=4r!m!CSxG;+^*%veiwQRm9#dKyyufpw1YPV@1uX-|UzWWT0 zgy4Jrnn+24*IT20$UO?hk*+ueT?LkS&7xUhYkG^n_Gh(Ixv45-C-`fXJnhq0uLB7q zUh=PL-|jr)+ixORSN^;naBzW&bSw_KK*XfmKHW;}$#=6*Dr+2%l6nWbHyfGLOSfq} z*SzcoL!|ebF`q@|e#rxS?tU0tT1I}R;;3xYe}su4a#29_O14^CW_X$RAN+UJRP}Qj ztH0#o2!}w$tjlVWk%8dSYV4ns3;ipgDGdJp{wAQeip?WQz&dE1+9GKYh}In8QF?^l z!u90jKwa;F;cIz2vkk@k%lnCE-WtA41~8BOG{?0;tfE~HbN=nC)cGnBqYd75C<{V+ zmyhxE7(NG%9Z;OzQPbyYZNNX$B+6cS;R0iW>4-wPrjcbjR{H?5ZA@=j~m zRDmtca9GuY`s&wWjH37R?eu46J#&AuzsYDagirlm`GGl}Qpqhla-PM3_rd!U%yPR7 zTlv9sf263TL$bSt&xgdJ91uSs1}*8tNd|?&YGK0b`iHw_eGi0YVMC}JYS@nE_W;uX zeS+R0_7|<>9Pi`5tR38XJXSDNu*mV`asuo$qnzIzC^k*a*Jc#6iV+~Ym4h=3gxeo~ zJ1Bkm)P9)$e_4Q%Mg=K}S-dBFVzNE7Zoj07R;Eq}f5KIoA3eB;Z@R8IlyZ)QZ(%+} z5>fm~w$g`cf{<^SNLid?AhJZ)SRl^aHKEirCt)q4i;rlwGR8nuV(#LMlKec=#a5|qqVpeKuCE4Og7n4MV3c}2lg#d~WHRo|PjmHZpx5gpg&UiP zt&C~4;eNb%jc)tzzzV5~uXQ?9S10OwgVV!AMNZEnhMCX<%9nr19hJ%NxAJzIFo1v1 z|M%b4F(a10N38$JCD@B99VY(2bKAKp*l$CNByvsK;yzIz2_uPzo}RQ)99a6ogMGf2 zm9A;jJil^Y?p-@CCf?>wmy-h>ArqHCSzzp&Sen^_Duij2;aYceoH zwQOZ(DsU7GeSH11muL3F&J7@2Uc6&+JO(vSwwHtfTU>e_wNuOr=X5vwPO!X+l;rlI z&?vay`kH3vQ3323_xiallfjQ7ume;ABt8Nj94@JO%YNv%F7`I{-rbjL-x>r2L>GS2 zM2u`qg=I$-T{)lFYe>6gzd6m&eI7p+4!#55spp(2`^RL)vdyZ2@5oYvQ4+(I@y528 zt%za)OTIYl7?3<=^ZOQ1TW)q5SH|J67GtAos0#8k0}B#Df}PM@CC@JqXrV8?Ggwjx zL~yyx@VOeXbWsY&XDq@L`2kjHsbBMXd$l_IHyogi<=&!n_%Wz!%Cz=mx*(1=U`!Ww z@oAkxn&aQU+&+n60F6I?$|6k2wWDEUSnzlTHo?bg!Cn}D_$z&27lB|wV+I==wrc4M zDhF*YAI5r*5AAk9#9Tp1gtp2JuxIJR{$-Lne4A3de+BOg{O2#mE@)qU@Bd$oG^@1+>aDnof2Y<51%F>6@NIq%I-Hw6104|q|hKO>q z7vkf&q$^N12bExD5s~imz$UdAH?+^d>a<)xS}Seg6gFvo2XTNkf;DaDPKu@iEp8fh zNy5K5#4wNAbg^|z-<1wRCxgk(~Hp9}Y`zoyM; zP5*3=22IX;sv-~4E;Ik|DrO~%BH8zPLoXoWt<7@0vlwV8LbZhWKKde9?S7vlw)ZHP zuhdOnJ>LdqG0dAU&J(}4wzdP0I!>ubqx-HC3(w64T7%iX^;6S4UYK?1o2Z|ImS^jv z!l-wcxm$%=;LOy6!N0R50-GR`zNt9QcJi%m?mi21LV-?>o%W$;mqV%r73lwuaV&bO zj<^5w<)iglnbIcPp*{5Eiu?C98VfR)p!J9&|FVVO&-WNKSXl6ut&VkRPCBIG=-;$z zqT6w|;hf#WAOTCIRLMg3yF8!6H?J7?*?a?V z;tTG=s77|gr2d7$9l?PPRBsOa4SBj9hKW1-#S7*tF&XC&zy@?V3KFLz`)8#|`^q;p z&qf4A*z;vStodmL*UL>8Wh<(rv+QMy^$^{3pr%Waspb|T2aLViAHGePe- zktht`3!}d?(6h*NgyOh$PK~gw*?S~Gx;V)iu*B&TNKAAk){kv^QmjG``hHavE{uvz zEkF@H!KC}&wg4L#xDRX#TAx|o^NF*~b=*Nqs6;KVHKgGdYtKhViC|j^;snr6N6rWi=ohX;f@MD z-ajdWP=+rqN|0XM*Cy#JVDwQt^DOLoWWxOWVL90UdAE>0eYGN*nqZ`2o6b}Xm?%D{ z=8mLKRRz8+Pv^)m>6vUeiZxA7u6D<~*X;BC1eRHjG+R%{<8SbyBf%786{H#?^{Th+ zF*2En3EKr4@mbssw6(EnKNXZkOQj_UAyXDi{B8DCp)EEiBd1+=_uGGU18T~vfnR|i zAFMcV_F&kQjP5>+b(7PkhHJzQBYBsT|3eXp-T(I5uZL2YnO(*{83sSLv987Be#wym z1%ZYa=27NcvOM|x;bB=}FUe&Wq0@Dxl(x$RggF-rl%{IC0$ z6B$Vc!2YOF!x{1DVEef@0wdiXgIcD|)mR%05NX`EZ^N6obUff4uq_d$f{##?kw#Il zJ`v<^fhVQM-e4%j%{kp&4wU1t`ax2(M&!XTxIyT=Z_`$Ag##n&}uN_kuo2D+X_G{ypoV(*Gzg?oHCV^3$= zhyW|$BR-p+;RE8k1$w-2{Xh*BWsBti2#O;O@~LhgnZS*3d*P-QP{FWtRowz5r62Y( z-n5)IqquYweYDk+Z7aDCI=+5OJ%v)|3O$bQR5|;o;*c*>H-5 zD8H4ab^yM&IlX3#y>?^)x@IB3oIamXR2%8f07K53X>U#bmCB6{iLN@rOPA{VcnmCC z%A{k9g5*DULz9Db-^O##7maEzhAlY(^-G&VHUqBF>(#p54N~N>evO5V_~V)Gf?_G1 z(pIWM39W!^?`xm(EaVwAj8#*oth_Y)@GB51;^Rh*yiMxcXIsgLuDAFYuMM#0pNUcA z{hCJ`AusIZu>}~D{rZ~CM>I98L(G=I7eue4jPRDoqeHq5m!Gsb-?D?Hu3(dY^Q%&V z%KUv-(g0f2>|U=Ke?Qzg=`+qX#-6d1+UkTaOp)C!3JP8#ASJ16+i2-xj14xx%kg## z4%6^B_Fuy<9DU)QCG?j%HlT3{kKQpvXPLnQW57E%jQ@uDk>VuEn?nmpNs$5>9?%QG z!vA5iIfEeLU{!qXDgJLf0#_+IeLcNa7LH5KYhMb038=g_OuceN4$h~&&woP*bRuA6 z(~q;{nX~i8`z`Huy(a5SuybAAD!SH4H8b=&j0@9w-(LNrH^XA+8xi`9ge8;b~89 z9sc<9{OU&=g>_}7v!x&FA@-(dTJ+Eh9{!K_qTU#ll00BBp5Az#3AK>m`&g0F z&LeWAGvYdiBr7_MTeNj|5weo8j9`OqTnv-)?{tavtjArXg>~9ZU7QYXGk((Qe%;iQ zJ)#FliV4p?^VnTGK7J0e3wlb&qo?xCjd~Sh5902|ul~$##5&n`VElkk zG#&_tQufud`|2@WaqPx#=b6QBXnCqWG`EG|%RDb13jPIPcF>5(sz`e>sv;qoiWD2# zIG_GGWo2QR4rT$xW%4Mh3Fis>k6RxbT6l;jye2?YiQic}p>Yep`g~uRK}Ru_ls^CY zX>p;o6C3J5{~c+i_|Abbui%Bib?^7IJoPiulVjI4Z@(YyzHDbnKI;3L)h}9I&u=v zT`C=#=Vj$4;-87H`GZsZ%-V)ppN<}$Vto`ja0iyP-}XfGk{{!H+Fhwx@b3ck&w|MQCj{4?T5slmK;RfRk>&Gm2`HYb;EcoCh5d1XMd`k z3&5$&?EYb+X;gII>*vb{%|9nMI7j(|l?$7_lj<2GJTEPds=A1wV@sC_V#=VJON18=E00J!j_CC2z#@-VuaKrObU-EoR+s5fo`m??0iUKRj9s}^Era` zI@Qkb*Ll2n>bWh~LYpo+z*Q=$Yg{&AeJtcbPb_ZutRnCYaH;?ERY5q1f*x+D<)a|q zd#oiV0Vp)bbyX z@B*qo;G=$7=&FVfejPf>yo166ku;&texgFPb$v~0;Kq$exEODBOE8d%)t!Cr>wZiv z72Z3Td~FaiN?1Qw%L`ByV|w_^#VOevL}-V!P9zVDArA{dX0{px!Z11}%BJQK4E7@$ zy)K^q$0{gV4*HunBvPXIx8pOD9g_aZqd;p1qI_o}e8ym_A633kygBz2KQxXQK6%|1 z^f?#s!}XlhE<|`m3-LvP&MAS&)z0)%EJR4bBHfg;>B~6p;De4KA^xRHGJ39uRNe|r z%`D8LZ=fJTA#pkuWb1DK`0*j3(`h+Vv*Gfr)Sn2o`#(Wc1tfooCRUAiI(9E9QKcU6 z0GSJfwn+^uud_{K+w*r5m}ClhT+A#hC2%{12bBb1d=(w4B#EB>%K22wfpEI(@-C!Z zX7!2KS9&Vp16fE~U&I$kUbvrLuzX(;pk&=bZ5EAQg3e?Ey;{7Ff9ss}Wvor=389 zzd3DXSEN;Sv`N3N&FfA&$Wrk<2PYEfOMTx|ZL!nA&J!vqxZTzJ?(r8!0fb3YP9MQX z2o7e;Mcc91CpF|SWmDMUn6=#LwFS$f5C))UXLr>xHL2GM{SQk8eVeZ^ibI|BR$*Rvh`-zCnY8} zN$kq$JCc)^fAx*?7UnPmbtafKrl*UEfAA)7dQ$ltn%H`m$?QI_2uR)rX|vrha74IS zS$i5gVMRUMH~w|)lab6^mHy1cAT53%l}rmH`A+KjA_Xi7?1n%1HaoPp;LH`U%#?^J*ljCam2k0`&fHHu6^T-2>1p#L|w$1U^t&2OjCPqa1KP z{2KZq)L*TRYZ4Lccih^;^33AfLHA1N#@Zh|wYQs8l2gy0$pgnn2pZKQ>I?#E(W{YY zXdQVlV(L;v>XKcp*2N9{3K~SK2XkQe6c!ft^u^FujGefZBTG+vEuKG6m%-`x`{Umz6pVvYXe%6%4Bo9UG)? z<`i29gVT%+AAqTxNexMcuorW7pEWm#0ZF0bJUv)_5*ZbHU5o^0#LBxs{nb}Z*rkwm zcq?o}^@k6?mauLk+$%biFx4SlX-Dp+-{_0GuprG~n~!*HhH?=kOO~EzA57>ejvYu{ zK;W93R=WRA)h&-7#I?&XWkMyJ+~6yEPEqQW^x%I_!$t%4J35zlz<6=Gh;eV| zHmd%5F~fuYLIL)cjfbhT&a2q$1V-?14vzO!Z&6CUnMA)nsdggzkK}g%!`&wmhGPIZ zs)l=lq1f<^(0-lxyS~R@l>qD6>!9BFWcU3>IIX#QVvWeZWUxT$@7wIaUs;m0N3dR! z687)dqi(6ckHpLtV6}H+)hUIivW^-1LUy6S_2mZp`%-se!ep5jE)GSIYC`LiHQqkN z>3wOss7F}uiI|u+D$x4{XeIgzK3XNqX}{o>`nIhFO?FU7&9Da>8A%ysY`tU(l;{-| zTR3(cfBeX$fM6N;Vr-G0X4$9d{k~WSPW`%c6%E7;p%3ddPuDsoyF<4;G>KqquTF#| zl^;4=0Gp{&zk;aISa`Y#5x9dk#Hbb1DPLKXmGj@5 z+m-VsRk6=5irEZXr6d`IdSf)l*si@$Yh4K2j||@A6@uh{+A>O>AwsFG_Z|Rl?c{E8 zaQgW_G{NS=wZH9CAuVYQ-P9l2+OzP+AhsKj-r-)ayZhvuDc0okXN|`bjE86S;hgbh zi0O2xGZlbI3i$z4S-O&i+S^hRSfT*|0ft6@+FRS3K{)dmpRTP=c6WGseq_88N=8B9 z3n7I5)rUQ3)xU7XWbC|qb7j)=BO8UZyJ7;KzH!9_U)Ilq{(AH@KxD|;bBRD3-P^f_ zzlzhry~|zm9ugt%zsZ41JDm&hUaf8!NXx}?NUrnowPd>rBBK*WeFaz1OlmGHRA@8o z4%2HUf#Q;=sMx&wS18&_A(sti<4W9EjKVmHBO&2I!2=>Jh+lI2-fNjLM$W^Z$d=%!orD}pOT<1(2X2>N1li)eAn^|%T)Z?gCH zCGgB4Ay*2!gpiPME5x1$p&jEz-Z`j5V5kr*%H zJ+prkn50PI8DByZ||^giqGlB?3J5f**zH3dlBqQLzJXeO-k{&oqpi4XW`kbH<+ z!UyVG4+$4Y{6#`Q!}a3X7nE9&`>NF@MIP#+C7=_ojQye zE6vTDkiu2Ia_zuJv+$Xd=mYrHYn6x8-a}>DTdf5~_%lH80j>g)UPz5ur+%LQXdru) zg<{6ygU=Xr$riEvG|!kiJaJqZnvlpRM4%M1lu;F^BebJ951a(nBZ( zb^%(}O8>XRdFCzj%YBef9}Tnq*(K)w2S1A{GUGdBtKEysVhI)dpm3RiN%d|cdP-g0 zV*y*yC|!|1Zs1k%8TTXl+?CxaPtUG!kU9AFEbsvIDo;cCFf;$Ikw^K3IUM1SJGmZw zIc1YhYQ)kPcHmott5eTRP0D1JV4?UKmj4Tf2Y$ ziG}O-Y5I@gl}JLVCQ?v3wn|sf6&ez+-l}5mAlV%^5$i^^BT zvYe-z^*NfLO> zg-a^3Ab9SfYO0}gNN$&FfYC`wrljx%V;67JqmazT9aPRc`WXtGomY?7m^Ag6LX&+; zr=50#W7jF7dLeZ~bjFZ(++j>4_JN}(Y$fjQ+wAJ(_)t`(%wfWLHj*B4Vw62&vsr2% z7C+X*a=GIYbd@n<;`J9%t|(>kRG7yRmBvV^(mvltez~WNMDx;H%sZx|8kjf0AnGcq zt=~A`)~)i~Ohz*vAXa+2Y|Pqg=V-xl`dzj{%2A#!tt?>^b;yHN_cm4q`fVm`ReRC@ z%L4oxkBdKC+c4ZHXJ&DOsOau!#VgCY3j42QxK7q1ncW#|QCG-hzLsDvWx12w zSALX^|6AL*pixru3jrZ%OV!}>$kJ;0oS&r}fIaBW0{}D{eYYohWfUJ#|Lf-{Qm5M- zEFN~=msf0AlE@^HF6`VA$DHOxu-2p0`!!z@twXcp-kK>(nq34Sb+GXf?@=m#eC!lC{$q5gl@}kG)k+S!dj`Du{)hyQk0LT zbN;ebViZd#Z9$YupW>rGb!$A?vIWQ8TWZ}(0s>a^bwPgQQ6dK#OT?!AGvGfj2 zWUz0qTb>*eo8VSbt${9jRTnxJ8L!A8=1xTl^MBx$Z0OA0-oTi_+5$q>UgblG`-_

P7K6y{w`QN_k;!52#^DsIU0`{?~*zd0YTBE_n*Fcv}D?gbW5_&?%UB;(MMs6 zV@$^*W-ahiKRr-;D4X7YQ>^(qa7Rd^1_Kk8QT;w)f0`g|7^TrUbHJJqo4e6eF{}Kb z3DQ}_-k(P2He?LLuO`oP*DhbAC7Z=xZWDHYn=GkXD+Hff*1dFJp?=}x5Ju^ucz!jf zHu7CcVZk2Fq$E<^CJ-tWQ1k3X{PQ%;C%QO2?2y|!zrFW^F|AxC{DTl!eog4EX$$^6 z2%)sAx}@f3;ttNw2VRlKL(e$&PLh8&r!&WJRT?G1XKd~L{`oUIqq3Im=}SFVVW{x3 z8~lqie|g5a*UN@LBKE?-Vdw~P`4sL%zdM6n$!^J1eT_ji!yQX zTM={+Gw0p?mKROkKmlLm;CG%hL(Dzhb>g>=xUL$69F?F+2$O&aGGGOV5^~4Y>bfE2 ztlq7%?mnkE{#NvDsNA^E2bH$7E`r zF4;e*t)65-m^Acmyo;o_mZt=2(A&9bxVb5!+2;*b!Qo=f`UA$AnH#q?_Z}PoXT8PC zW4OWJjitxIgo?GqbS0>JhInns)5VY2zElgRl3|fOS}}8*BA79-#(ih^fC^fq;PQFeI&nC zLdEU1ueeovfmy>7IEjr6+>gbbCi9U(aQM66aUo90COe*ZmBUydgkF}_*n1$7HG|hF zk1?>0GzW_T`$fE&G4y`tAdtAZcL!q=d9%A}hs3(u*gz))Vrsw$`vl1L;zr*lqm95T zwX}tx^QtW_0|UMr&t=j*8aw1@`|*WT=&I}ybz81HwG;;2R2XD77MCnmN<8yg)c@MV z(X%eK^1_3}<)(l57PjW{@UPmPliU*4>pD5S|HvCq$qFz8UKkZUL56X48napq^a_v% zVJDa&E-r11go$CCGE6>feyDVZ3`{xxlUr2<=C(eiOJ0A{3f!J_Uq;W4ly~Jq77%9D zAofa0`TGg4WoWV_#QtTj#JuKcgL)+?n=5M>M~++9+Sk~w zvxvI?QSLUTGy%#-ZkNA0J>j5oK89s-!^d><9OXqbNvKk!LqHh%8DSAiuu|x;lS{P zLLZBIwFkz{uf5`Z+zoAj=Oo@NVv0SQ^RHCb4xIlI$9Z3lavv~6?-?&|QsigOySOfM zij()Qvn-19Hf8c(lDxlsAVK(y@HU7qqFx|^V3b)Sf zwZYP^GjcM-u2Ssc@+7Zbbw^{e9}3d}wl;YEOJPh#P(JAbRN3Cc`AkNo)c;0QGStBL z)zHv~p+^qNbLp$ot$OmUnCT_|~>AX3MlTjb1 zv7{HeM}KeS%;u%z5py+OjuNfl^1-ZoSX#&78dGi>Lc$z7gzc+9TeM2Z9zQBgByEq;+7t{7nZ@F3kHru#V zZh^OejLOj#~Y*!Mc}_&X?cec~S4nTEhv zM&{!uFJOcN&T06p1*&hkl>KIroJK_H&;^Xj6Oe)1X0!gpl28=#Oxa&@AV-G}N+6Tl zix&bdTa%qucur4nwSgpoTP^HaV~8Z$?yng#*Sk`ukq#sx#cv}vn-Hu9XMI5G!{-I0 z_KDMjY03wAT~n3r?C}~1xFKiRZkln*@6FT2gzthaY^-e`T+Rx4Ei3*G%e?4_Y7B z+)d*e{3OCWe-f3y%lEJ-6|tOyAXuXDBSc&lxDE5S{~kSeW$08Su=2U4)7M@Kjm+}6 zoP9ecv6a$<3-f8c%P?`+s{QW}hNxF3m-pQNrPdT~T2sD!F4SL3#ycm?F=H9oPP9^St=PyGPZhcX6Ko8dYxx;*Y9ielqA&Qn58-3lQ`Oj&{ z9=F1EVvNbCE|~svR3}^cLnbMulY01Zg?PR000%!gv6rj*!AT125kf^8WTS>(hxA-! zCXz$t>0~JpjH_;i?S?6cThM_QwvEijg9R8jaxjX9_vsmqn;q7U<@X;CxB(YGstm!* z%+G9o=lu}W^NSp^3YK{lga_}5)y9g*E$g4y5~M{PfypoGk9rglL$PrBTRXS*f>04gP_&VsOXiA2I7)` zx;?mc7JP+V5u3BGZnqZstCe7OvUACy8n5$L$K|MoTM$A?gUL_0iR&Pm<3Lfzf&iGY zHHqoJL(Jv)O5NbP)16F8wX-!DFR5tz#7y*9MJ5j#uNt8|YAAiev6mw?PJd2h`#bDn zXve)3P*js|J!06sJ@%XH@NK7QaM8TjMN#6Gvfu%D5`mUFhwh+b?E4a zwT&Z&cXR=SyH3egITy9wBW@y!posp7heHDDJ zCP@8fcwONB&MkKop&o05RQ@l4rV|KiiN_w1)iSEkOyRsiJ8w!`9e`L0)KII04rW(B z`JtTAjy(SNu`|yyQ~mC8jn})5qx`;!c2id0zdT01vn?XLQ&xDWB7bF76~l=OQ5&?T z^v`_*JBWA(PKLQ7RA{0mKbr&J^>N*(c8|!$Qs-tOh$8CWW!OAroP?p^%VbT$+`gKH zHed@rn|(Qg?lp7t#r_?C17QZ37&iuyE>lb74rPB)p%dWNP=iLM{4`S8)5P-wJh zv{)lFaqU`Ln79Saw=#LuvIa*%>^YQHMhg`<=AzcXmrOj)^%;9M2orIwn50Rdrh z=lMD6A#;vf$C?P7Dh2i0oDm!Xgqx$YS`#Zvg|E#7nkRz=GT_m92SX#+qIUx4D)>#% zyrZUUL0|AK2uc7+GKE8bSY#K11yu$`b;U#r>+--4iYH+dk~{xBQCLUFHYz1x&kkx4*Qv76j^lR7fE^F`t$7MRBz zCm20Vde|8zXl`S4jHP$iJaLWVULs)cS?G;ka6R3N*9CFz&23!Y)`bimxNS|}{;57_ zjJ;`P?Lc&{&6z*K>KVk0zhmnADd^e?`1X&niG9#1muNr6n^N09ZV%=^1rI|7@bN)^*x<*!vi+BMl%qOEFIf!Qe!+fN~?jX z7dZChy~H`gsKzPvKNnHfTWt`dtfRZ7CT%gq@ssDnWgRkZ->X@2{hL_}qF=>ML)JJd zs-76bYyqvb#a0O*E=EWzw^RP}0Ug%#y5;8>lszK2F3@HR4n_?*`kOxbmE?CfUKK7` zKqR6CJ{fWkI;H;xaD++M&)HU$gIw&Qr+~PVLnh}&cs(>R(_AO?pC#sB#Du)-g;x&~ zH(Kj1dAYzew;)`0N}G}m!!EZss^pMiK4#?a+#8@l3{C5t7TKWRfBywq#J9P9t~rUH zi!8YkzX;SJvttx%O-k_)@Z?xv_JT);vR9RCr`qnK!>p@R2_sK7kpQP<6Q*0-rVvb2_ch( z4MPN8)9O&7J5d@pdf{66_`(y{7KuWqnq`MnQX%IoW zQ9weayGtYvNOuW`?v(oGyxuoH{=C2IbLN@Zv-jF-t(oV+b_X4Udl6vE0J*9PKgi1v z092i$V>#c`uMZohbs>DfjfWn1>w}xx=23oJ`cTxty1lj+X><8N*pO}iPxUM3Ef6&; z`y)I!>BP=|dGpF~@ClJfmKHto5Y3Vy_YX?~XzX9RfX;EdWu%GA? ze^nE~B8}-yEO_w{elt*>u_zlu+z}j&6-3rK(>_dl1sL(drR-4fG6Qk1^}y(t0hJ0s z@LQ0W*$1__N@V4Wx+~ zwFsZjV2%^S{uJp8=I7pKx)z9v?Xmy!~lf_L>NnI(j%cL zHXw7!5{uDG%?o}IIou~W(+Aw#v0-kY=loU_J9GWmH$2cpR;>Ien!cPUsw^M}mO5fl z;j54ETf;y<&vGxKAia9253xtd-C0NnUd6&LkI7gkgC_=Yp$p;71&H8Ke!t9ZBGFFmBNAJ_caUysv_oRAT-0 z(TPGU!BD0ueluv|!I{JMQ^PyBoU7Mn^ae;!yix{7W3Zetawa2z?GzcOj*vH z1g}@=x8{&?dt$~9-QM=(jJ=4b?FEnye$|sbP?rCdB*8LtF9Ca%aKS}Ct?xdcPdvv< zb?{MBM;gL2t+}l+a3h5gxa#GD<21OV=%Fa5Je1%;vkVCaJLaEb%lg4I_`Iyq2} z*X)7TU^)u?{y5B06fnnrQ*85O)Su^Hcr%+$WeEe&9M=O?P69Ove&jlN!(v8_p&hsu zhXQ7LK&XNf*`V%53!h(bY_v?A%A_2z<+GcZu*nNh!20 z{>sn?HjF6{x+>R@QH-akqjQ7Q^)x>H4inG>tP%XMDu3R?7ng#!G7|jqAqx}PFLkfk`BD#O6Lr{?2 zf=ZgPh#_|dH>lt=Co34cQ@D*S=k=oRA;1K{BXnF_gG-srrsi@ACoMq=J)QJK#k;NzeU);N`jwZ12BM6E*@1JSY-a? zA2G#j#Xz}ft^NvpNH5z&`U$h{z|}I2_}(V;Zk|EC^v_m+ppMdmkqMZtGC_?npkLuM zN?HLS8kB-&z+=W5`$tW>-U!V349D8XqJb>JHl8gI1QrVMyyZ`urNE>3A2z3?po^G6 zNnds{xsgf}G$Stsy7_K^`sq&;I&b(2_3gJ%y4Ngu+%yAGAu@8X6mF#D+ec(etwx-F z48YHJ9K=haTLO@9){Pm5qM(O_FHRo3dkYJeE*OghbT0g$=2*NacNLg`c`+B{`(4V2 z9%w5wpm8tase-nnz~S&04~?f_K*!Ko?TnTH!Z5k<%_)E>>2)pLNn{X&cJFhI5n@TG zH8q*+LhGXHKqt%`I25L*d|rBBH@*ljG}T{2XkJvM9D$jqFeaRVFYX}*H;QX6QG@22 z3b@=)fg6|I&EM!Nc)|UkodOW|=5oAYvwL)nVpuY79zBl?gSfTr#Hzd`cRW}uC@9&#i*M^oawEnj=6To_ioB+7{uEW8 zK%qTYFQM89y++ochK@u!$EEapnI1~Dfx&B%%>rVERkIYrnrEL=9@?O=ha7fk(Ou=0 zCn)BB$bEsBN=~$K(Rb92PxRtBOf+XokB#oVh_AK9u)uXy+mcA3_(U!O!KV_H@2odv zXv1|b?L=P4So?1+S@hm?Xv~|T=HY4B<=-sRQ{j%dACC3<-07)zv>db~AC^0){8sT@ zR7c+?Kudq%eHbz%c%UxLVk!e^Oyn;W_x)U?U!m$K6qdWH9lpICm-&rQ|7_;hu|QQ; zMiP{Vm)q-nUL{bmHzv|E;#rLNizf+wcvyNXacw&eDml)JG(7_;M_tvTN%k=e*aQPl zULSgEdDPCSZ@&L{vf*;QSt_8R4LjFBlvpX^8MGOBVg2Q+yeM%scn_YN-T2Y8Zh5gt z&%hP!*z?;KcJgx7dmYW;g@cx#W3=`c_L`e1qg`^d^K3UJT#mb?F1`w_dHCc0>u6%8 zi`SHzpIPXVF?3HM2^hN(p6@Byngi3B4vbU5lDR{G;*K-(^8 zq1tbnSRcf~hb7YFl|hAjl0xk)#+L$FWN{>^n8I9!X<>7Fw0j5f8#id{31Wd0c%o@( z4k|K!3=d+DhchU3`OOtfNL%L~6Jt6LWL`6*a*WpHpp4P_qz?4Oo!RR<)-m2xhkup7 zGYz=&tua%!oFNY2HLY%O7rf!V#lm7z`L>%v@WWU(H!{?bXRR?b z8%yEmM}LYmZC0# z=u=PPqb^7c_R7>mU}$Gr?)5@th?Y*hGJ88u#f(hyt@KODYR8vE#%QL99xC?Y=w-~=D-j&geklWns%A_b58sVB1d=4r+miTEF4%8Ga;z1 z_la-EpHWz+Y;0H@srA(w_!e z=7{IM*OK*uI50~6g9S9(enMv_NIyt9Ct#6TBy0Ppf+)kqaJZ2^ovq^5LLIyAj2HcRy`2dDy>nQ#hwih&Kz~IQ`>W z6WI={MS}~;&YD+y=^r{P&nSPiIvpk~?Ii!o>Sb>|G{<);{ug zZ?tQV(OZxE!FpIxj@IQC#4xe>OG87Nd6nf+1_oc%MHk*ecHCz~ssO}s#I1JEHHs%^ z_n#^*3h=z|vpmbvKBDnZIpnXN+%{`Ah2!LC9Us#-74ui;h{T3(5-Q~1IT0Bu3q~@$ zf1`#L#$FJ9)qJHWYQz6yvhhKbXh2Halm`CQt`tz5lHPSGB{#=W*fCq

w86XVatYU#(xjzsrh< zXxcz(t9+dc!ZX!#fK>~7oe_oTnadvgvoS87iM=lo1Noy2v`Hx^VxCLu1Mi_sAl!ui ztPa@UUD>A?D)iLWLSJ8mfN+N&VY0M%Q_SOl70{rKT@s8z)L?T1op>I{g!jT9ry-C$4OFv&-3v_j{hcu)zO=mlohEQ)BdL=mg+`y@xume+JYXt-1FvxVZT zDyv9-2|Xkj_EyFX*FBouIaR2tmij=}r4lG@?OB)o;6hpqN=6WBghc-hrSRfndkW&ALMaaM(lLY_8#*!XNmaQ2R^2pM+LGy zVwB-!%z8tEv}BisHk!c&g(Xqpg-YfdsfdbQDjcL+H1&J6;yFvBG_g21t-2$B&W=g6 zu(^h#B%~2ng~VG(kd)qTn#vl!Z6T5JR)%K|?l&A_O->KZDIat5@imD&txz+QLQ44c zD!}_pGlts9s53mLe}%)7&2aW>R#(ffUmzsZ9!jQdmnc}oF)oK&`!ncOruy^nIH>gV zfZzOACYooi)!_G5SVr7E9?GcSt5l?zDB`ot@lk+o@+8xWC5GpHO!I?WwOLI%`|6|mHTtdul+zSGt`nKksF;BWs3L8%MYePBe zlF~KsVXUJbB>&8&oHcpaK{U)2?jvgN5p(DU9KGt&&SBO2A2 zD9}lm&3NZR_;4r&<%B%-cruI56cjB%E#x{}jh4p^2iegrP!hT}FaWij{Hd9BcbDt< zjyzz(3X@(1*NyU5yvP3U!bc$s*|J5|2D`cfj%+fDq)q&fVKD&Fur(etWtq)MNQkXf zeFmjpxOFs{tNb1^Z`apOnt|%}N3~i90G^7|Mc*=Vg>H2=u>#tqgS27!#^mas@f}59 zKmqf7b9F)kRe|F#%Igw?tPsSJZWC%<&!R-~;2@f6H55`>pUDS+v&m_6&p3KY7#xdoSg1tU8-ts40!`=tPE4DxS^s ztH_B#78!+EAFQTquBj`f!?NrOrgwZq-?tJ5;U30OxejvF<)^NQ%tEljGoi=7&|IFR zHRwcJUw2mdVXZqOFlcm!`~@&008Q=oSVnodtb}N2`xNwy6tK6x)KZR{8~fhVn*N~h zb~C)y1~$&~Gz_+MtBG~3P1D-R$-^R%g9Z_jfx$oLs-5&b3m67*U04WKnYJaE;{Kh(5MmQ<{EIxd ziH{%W|AZ*|HApq;D|iM7nmq#rtzLxWC;@EtGG?ZHud4$QBEMdENMKZ1T)V84@C*$N z314j9UpiR2G7zukeGp3&Vc4IfvYC7MOO&GBgGCQLxEohO3FNe^QD{J~jJA=?x6{*h z1N~+=Se;pHX}+G+FJossvX5A+R%Pg3H|v|NV8|YU72velaBIgjCAhYo7#eGYT{{;T zl>K%}1wj|k2{E2}x~2=Ow8fGayS6jFvD145o_6%ru=zZNr`7jar6d^4tno{W8$(Zh zk!srE8{urz_R_$oO^C$73L{VT@c)dGq0JmxKQ(s;Nwvp?YxqI8b$_zRxgs+hqwV5~ zBPaxxu2Da!tW6T&i;aHE%ho?MV78I}v;_6zn#)cMblB(c>hlb zEcaJ3m|uDNEaKE*a)@U>io&Z`gr35Gim4~W z`#wVhYtw=@->z@`YH~E5TRH$U!ji)^cX3eebwEK9oIMRD>||l33xx98>0*n>)N{UC zqV~r8iB{Z`@=1EGL=>oK(P0%}y77qJh7t?(9Qd)0y~i!cXZbQazbw7*@D(h7js$&6 zJKtA$OIvk6fLd4dJBr4zb3Vb`7>yj`B_uskaN9du9f3h-9P`4&kQIDE7=uUiZpr-> zhbbN3U}_o%mWRuDG?cjFs}+wq!Q1dF`+flKY@aRs1g(wX{S~jL02!+;Vd~^t@2jgp zy@sDrd4)*vI-Kuc1nc2p*SUzZqW1J*%W^^YRB{5+lN=O5FLHu}cA1|S zR1$R|P~VnZ8B1`kIfc9-xM2<|2I%)bQe&;|5&o7DD^2T2q=?c}qozq3axqzWClHoP ztqsQ4AQiK1PLJrDNZ}E1d?dGx?1adxb%_R`(+`M;iD``@Skjw-$rcQ6+*)$V;b(*C zN*$q*#@AI9NW*5vN{7QA?chQ4C+k<%V!xsM*0E9_vos|xpL6k+;yE|XYfzLhG*@P5 zT%lK@^jBqL0I*xxya61+LN;pK?Mb>6{x}3;#S=qg4-tGdVgz~NI z^O*%@wB^&^iCZoo2x6Xvf0!lt0a{AIb%MK0otoY}hQp_M>Tr{H0G2R zgTFzIS;&skv0Iz10oz&WO*tvf%MC>F{rAF+XPe4;lQVu?RH7?gZ&|8PO!iv+{XpQ@ zEmejov9}O!hTnsXOO$c{jYN}CZTTAcdc~kyJ~=!Wpx8rD-~s-%TmMe?3$w0NRKEaU zW0iCJ&raIi`_FQkrBXqcMlSq=;7W*H8%HC&Q+m$XW!(U5X}WO3$Y+0fommoI1}d_; z+xiOu?A=k&;28jX!1+escQTsAW8S^xHri3+Nix{q9x^l903Zdhso!B|Y{!uY$h;-Q~7tcpyc4LY2 z+zwEQ7pf2G_g17YtxLX^{05EBimGAO<8So8D5$M#X5=HO0@7K;P!VHdNwJEQHOZ17 zKS28lD&*SN*2#CO&e6jNHIvD=zL#U!7xUS@oU7R47Z6K508xYV0a?bs>|BbP;Xf8@ zXV5fRvLbB#X2sn=OXmd$zq+CiBZCT1(5SqyW(3T|LiPt_9S|~6Q$}?qhGTQ>o@L}O zq-WpgqnGqNOc+*FP*_c$yUPF@bgz3r20^hds`RXwDn4<=vTTm6DJjZ++wbw>7yz_= zMt9*^mO?2I6c3zzuG-LC2*^XKwVbc}MqXEaWn`Sc|4qxf;8cjOG4FImaZB#3XIxjb z;tLosI5|_<_@J)nn8#54mDgR%z1#WR1=3Qbli+J4KCr_Mg-1f%YX+&*+X3R%!iHJ5 z{2F6Sb;6Q5IngB9#^*ZK_*}%#NQ`-;+MiSIWA&oD%36Bm9QaAp15}+^UoAXMr_dbF8P%FErYO8+~+S#vOHqY}#szGKx0iiiYDOzGZgU=a? zBc_Q@_e&B`hz_ZoW5cdXdL?MGh~MgtEo)iOL1cJ6y@~nk@o_b|ER)JYr1j_U^14^f ziAz%XBP+mY+*ouDl|W8Y!P~NUoeLBR+A|=N^nJl`fZfy-2J@Y3Hi*P}c(1H&d$k%O z6!&>Qf}rq&Ehniu@WveWJ|jiWD4=y1+g{3Uxq;^x6*L|WD)PwR{Uj9k$ff5kcV3TV z9<5E+dz5>0D|W&ox3W{RNGk8#?s?@pmw34xQ3~LE)Im80@J7Epy@xwKBv4G)T)#AF{S_3b}Jds=4Jh)Gcvi3 z^U@4a$j1zm#HQf+In1@BH-*8t0nLn@DC0xSAEWShrLmdCA5(J?nz+(En6;>uc<)< z2YMnkFgH1WcD>X$Cizxy@9i^UGA~95N-C<(Ik)HcugL;yy zF8@;UT4jCzv8koZ6|bge6#Mk|+#1jz{zQ;@!dAoHO_;KuQihfB99~dOI5Re$we(61=T6l~8Q@DAQ~$>V(G2h6<|xacPc0K^O#s zg)9Ubr{!Op6y)r`57gDyb~@^vonO=^2sv`?FvXZ#|4dAJe8k-BVZzn)=S2`hsg59S zlU%l@5~NjCeI2 zrL_E_-Me@aT~sEx`yC2v)-~g*eH;twmfsVM2V@~Cn+6k>`gZn_BCanH4w@U(J}x5= zV20JZZvFC_b*cG`$bVh>&Hg|&*JFq<9`w3!Fy%QxPYzXs6;w5sy|G0LorIHaaJmJM z5Jg7Moj!c{0NBy#a^J7Y-hg6lK0F<3yeF8${ujGdyH9bFb(XWg5!NC!pDjD@68`&W z`J^Z7_8g}jeaGUl)`<;d7D)#W`@74?>iU3=&w+>*E+ZWw8D%H*SxgU$NOiPVhFP=( zP$Sx01ukRiwyRicbFgjz#cz8w1)~CpAg;PpL*HNvJsj#aiCEZT*Ro#&XD04o`%Tjg z{!OB;vtIy*0SLdo%cgz6$7WTUDq-Nwdt}_bjkdS&_hnc~Yz0Wnz%%ztf}H2=z?5cA z1#uv1hBnBkzS_7_X$a~Vz3 zRc4R_uxSK7z{x7Dv!DgMs@k@DO^khGz^r0LWmgoeo7k=teZjwcY;yfGn-}&Y^0a^O zO-B_};OL4Lg$A-;eI3@_;3}c72iUf6x8|{N5msTkdjk8~ZO3jIbLan*_tD00({!Uy zL=mdklA{@*iw&`5x=DEVH;e;m=4Ebv zoKJ>4%fe#{0Ij}je=4U(rsuTLb&x!NRo;(;i?(5zla#*%hlAXqYxCjQn;qSnifREl zJ$O|HSkx_lfmQrpNI8;>sD0Y%h#$AMJ603aMs>XVwskOuo-RQQ8Rfu zY^*|E8li^2?eG7b%iC`ad|b*w1*jIrxjZVnwa3)+e$)Fx|tEli)eFi z9E%B^q%MsF{a(Z~d(M7p5mk0=kzy7wH2mYFsuIO~>+7&N_eoRB0}>dL=kf^jbMQ#_ zo9q*+z8QUL5|n_*GfN@GJ|!fY?mV18`d6T!??h>-g92fwyLD~*2DCH!mYE$5WCrbk zxOTr5W!56TB=FPO8Yp(CmF=f+jNsDq7!)Snc?a zfW`9O@KhAOy;q$j#S9a^xM%xO?i4b;KRFISvE1FcXX$|maiC9X4xmUlk;@sDdck6h z6KYGd0N-%fB*P6c2Aei%=8!A=H9QWNz13Bght7}U>QJ~#JH!@i23(^RC13@BDqN3X8vH5R4NiDBq4(FIEzO0J0+x(_I~pD*SMlI!VSM}lfg<9}q{ z^iDh8JE8kK4f%dA7TMqQobRDVSeO0f3pU{9npO8Odg(4p+he_&5PytL7Cdt$>K4m> zo89|vdGGQ({T~d~L|VQ#%>dJD`U&(Lof;Da0Mo2P;#3-l{eq9?xd~Zc{*mEy;t?$` zocUfD>Oaxj;`*uj^v40MplVlCSIu&e(7Q-Aa&03Z%RV#$9G1b4zfcVM=l;&b*(%z`tyeDUp@w= z+6w1btqTeoSRaHq7p*sV86t!l52V4BamGe`h1|@fq;6AIQhuNUMgl%~?#k&E-f7<) zRvq1IYHiPM+fiM?#{lXs_mFXIwO+SqB&Z_9bcx{=eborg;4Nb`ow&Iw!K?`y>U$j6 zVkl|gsNDeIn$h*Z1I@5M(jXUv$i=zs_ zI*9p5<#6Vgk3qnJnBTPI9s8m$4~jkHpXgfR1=w!Tgm~?NM`69ray6i z)~@8W=4ZLpY}(FbhuhKp%AzQifHby_CgUKeq?8FbH89~gz}B2i(CE;57A&lH-Tdlu z*)03X@BNsAjGYXH5oruj`$f0nGu@m+R|uvox?XTBnN*Doep?idsM){KQIBmj*a0hd z^!HX5!QX~&h!afPjXCy%49wUBnehqm!m~&2P6}_@i7X2kouX7g|48H4Iz60ni8P#T zyOYLL4ua0hVap7z(cIcKmZ?}gTib6QE3e8f$2IPz#0>ge?|0AKH-X$7BqE1h2I)0( z)qjJMH;fe~nVFW*0eFhJTPkPsHf*R7%-0Qrt7Zu$RK>;KaT>#aRtMUs;_*t0?8fr) zZ{IZQE|!0$OMB!CvC#{2PN-CXR+q34^nPr`l0_wq z=+BH>_JF;EQoy(K6rv2&8$``epGcB&hq(g4GofYs4?fiBdpk{^ZQI*br((IMt}?^T zW&3j1SM*kSYacXQ{8X`(rLA%9zMkK!S||6m_&YZaPwbb4Jg`~L(G?(Y+kiO?OL{W- zcv_^bu#jnbePvZ;4@Xp=$*463Sc`0&b3*_^t1tUL@+Dchu|DdBt+2Yc+4HWE$`{fw zRMZ!LD;^L970KUDEjXyayn?CxdXF`xk!R6I;e7pjwSv?dmycQOL@2$hv06FVGnr5%!RMh{a`V7fO6pTNZ^1lueTk)F;+T)+bSp%I#=}wr;plz?_+FKe=gbuv1 zKVG7lG_zc>1JF&Fjp96#QY_qk0q6bgaUEX#71TZKf4I1I<+;6jkEj~xOkSR6=L$03 zs`niSTaA4P)40H7~$xGX@EL^j2bw~@4|iQXw7Yl^#Cj-KiuzqMTwhj5+*Cm&*0_)L73$h5K&< zkQ`%P2X`?YcpUV|KeBX~!sF&SUhMF1zS&|sf1Bn7UBVRm%p-v%-cgVK70s@k=iAq^ zHz4}~)_?lyIh}pu?yMxBWuGt$_5E^=PGMh5TVfsEza?!f{NyxBqRmg?XG32V;Wo}~ zLc^9rFipxu42L?!e)s+;`82Ct?PE#B9hNMvM^i-h&iYjV8Sf}6O#kVn#Hbh@&U0Ns z*bch$;=80k6~;RC@TIdKeU^28WU`6T3%h^POmY|9v|&ap<8{u}*S9OYR8_SBK+EAX z4)+3~a#mSw{b~fDeu!VD{s3Ir=0}Hv875GlDr`D}L&Yv~ zRV)fA?X$LnGDdndr3Ed4btkEX#ebG`RI>Vp(&N-S_2zOMYTLk z2VlM}%N%>VM)wWN#+LNJFssDkYI|}gAtwQv1>#L2wPmDc|F(4-GODmVxwH&McA%B_ zWwQbs#i6;{E1(dgUhpS6dhWYwiYgrx2uif({e2DLp(@sjxmB3>cv%d%i@754;}~TifpSY~m>)rC`fH?7>3W(T4@9Re zicRVK_R`A}OXGQU^c&0{9dZ|4Y;#CAIQ6vmW%9&k)tw$n`8D@=VUm73FSeq=H@!tb$+AZelQaqucc)z4?bcH5V=ZH5Li%OmcYma8%NSCi`ocZ$& znZaqNN9M_QKeEtXrlsb{w~h-6pQ@8oMDHSI{jc3}CL}=$T={AUY;WHx0!5z0H{bZN zvzd6x5*=#C9Cou~IU8}{T%w2;qucl6Q9d%5Wg$yiBk&^uPXlQMXgnScOT+ZO=a-EN z)u8OQT%VgS_oQ6r5d{uk5i=7bPUZa^nY`pY1;+%vsN!$hSiuKRC8ME|HPucT-;*sTvw@gHrVaSY)3uw>ERCdU?;y`r2y+l9ksO_>>iIr+($bg$Ws z(ehY9$tGlEH3u5rl~tc|3DY7h`W@;6TR6$HbZrfko z(4N;1@s&Hr_hHM3%~m7=&E;z|w5jkDhsmaD9q$0jkyofSIjrvLPqQc&aXCm*(ESC! zfaSm!zoDg5QsK zGcpy*3XBHuVVM|a(esU_E||UM)AT9B`7>oO!`_v6&)Q=IXLonk-%i&g?-Uh0`$SFn z6oL}2znVV&Kzp_TECZlU8B_U>esmVneso;yE=(~Jbji+0AQIT2`tF$+yBO`mO$1BZ z{B|vq=X!^=dZ>rU97zz?QwP8SwQi?BJ~#uj0&xyk1u0_T)vFf+f*Bx|>d5YcThY0& zbr7-J&9Ekq7(moO&-cx*lUb?$0%ru4rHA$oaaR`nFM*o#4}cD63t+e;F2d=$OlyMh zqs40w=rLewAUPtIVoX}3PhxkWdy2Gm6qa9;H%|YrrGp@D0#9X#(LM8Kj@(N<}AhrT)cJx;xJPN^JK%brEr+ZWnd}j%OzXh-c zI5qT1*p1I?FsX$+z4>{P#(i75y&S z9rFC{xin!n=5imhA5mz(fq@-fd`(;&BC~wvg$$|_9>TVU6CH2`xnKGs28gH~G8#R{ zA;--`%Y4`NHSIbmt6iUbCnxexb~>tX1oRtYMTVdd)ACfz<6d0;cz>R~rYWL(Kz15j zNB6@mNKHnskpyjp-xG2|P=0jRW03EkVx`E?!CiAe;_>hLSiKIiBi!w-UQsj*!Xxe!1&!Uqa;Z=3iG2(iyk?exGj z^1XT)>1Hd@Ub;^(ec4EMnIOEtrcuPKsWM1F=;F0e?gC6MO*3zbs5v<)k`1#tinrs% zcVa}#fU_Ek59z-+nB;}%YM=IBx7U7ao%E6HOQRHRiw_Py~WPRw|*CP}3_hU4n^28kSAi<=0NTax5$@JdrszLSJgU1G22e`NBeRMPSV}pn36~@K{MVHlw7A~!qRww zD~ysW*Pw-CBdDXT_^DdMGVD*PcOfjWIS*5$M+3niisn$JyA3nQI8VmXS}ZKPBNv#r zHO^)@Hz+18^HA*CrbEQeQnhuiMA=e`Yh=>+m zjLk@?VKy3=SpbGBzgpFFs^Tev%qYI73h%~;1`70}Y_F~&sZf<&dUB{OjX2byg`Yk44EgFgW|ub#+kU(_RuDgN=Nr+q+g-W zV`57l+z#6ylI9N~YN@Lp;E)s}1x8##!4@O$;atu1Sw@K&j!`k`AaMUHkP#kmtbYHm zt8I2TMJ=Tj`4i;y(!eo#?$^HvZUVZ`vJdstHP_FR5ZOInEGCR_@v%eqwyLMtVsaKV z%Wr1Be+w{f`c$ro5F-U28fO{|H2`mO@7SPQREhk89Ezwo8d9*?vZN+RwkQzR?!GCk zU$l#G_ip%{nBZPL!~Hrf%Z$`%_t{VEWq+3G?oTJ;w@;br!*%1o=C4On7z(z}eFAZo#xh#Zg4{d_RtGO#(T;f8rfEUg|kTutIr%S~I5A!Pu1;D)PwQ*K`FZlR=UhQCCDI)`aK zyy4M8Z4Lu7Zbf^eV4!6fAAoF4g1B!Tv`8g5$B; z&mAN8LSb`RH+NU$6VZ)d0Kv1&=ypU*V2#H`OcCZR_PO@eq9$H5T5}kpN~E!!yA?Yj zm)onuEhP*0Vx|K{1uqqS#)Sgn13UdJt&jomD)y`Lx z{t65-eKk+GQF5vI5DVzI_p^U116%>{u53@OJ?LVdy`en?mSterSJtXkTbN-MS%8`r zS(Ejy{(eku>7?6HhN)Rv{Ef@AR7n=AlaB5v9W|!67+-{1GpZpE;}Y3SEgw0{ko7zo zCo7mH!lo*-Ep_S>0(svO@|9&(!8wwCP~XUEUIaQ7(t*smJjDB8K{>>oLt4o_ysHh5_^zTOf%ai~%D)pv`C zc;WXv5Hapu{=%p%74sgND9U1oRf=&84RViv7l+{c7|5UH`-^5>YEQmK$5SD$GO_q> zTbLbcHsz=;l#XtXpegd%=;ZKV`-zeX&*b7gvDuAhY=)$&Bfz6TUyIUZ))qK&aYR~j zUJdKMCJyX%L>9%B)K>>~D5)Cv-7oV>fI$e~w)rw`QT=N{yNYMk71quQ7n4-uI`t#1A6svT@Yi$rg`ofo_E~-$=Mbkx4tMdP7F| zW1`bv$l3RJJ!I`@tP;r@aY1O0hQY?oSZH>dxd;-LGtMWzSvVoeg}h3fn<|;!@k8o8 z!&v5uv7u5WDSDxg8N|i8FV+@TQB0278p(qdeb!D!g%fKe9aK~ue^q5&yH9yI@y!GI zQ$>cE2dJdpIHPA$(%nsiCgaWVk^agRRUf}z-4I;wuBb1iaI*mpC0eb>fhSQ5hmjNL zW}Ie6uYEoX{F+f?bzBc64iq#)#+atjR<PrM=;E{X&9l>|1w5e<(?=Xtx5Oq zzYe$&+$1!2EZ`Jew=u56^QLnrF|8jEW?B5Ov1;0SLieWkM5>T&7R|6IkR<>8mvG(> zsWm&g`tpzYV*NcvU)VBHjMn|*Oh1Vuc9h(G0oupd)ZxRJIF04xd6pQMA~Mqd{u6{7 zsnB%=to~}`8Q@G5b2yH0N-(h4JR3(*1$RLf^Totn$(dSl+)z9|G*J0Id`kT9-zIQQ zXMI$xw_2N{2o6xm$P^sir`4``_5qpm-#;kbEyx_0Gsk%d zR|xh;@6~v$uz)EZq`#|!hLd@8(Tu>f_Q%R?-;qNQ!8yL1f}l-E3XHCAwr8~6Rixzj zzrS6PVgJYqob19XdO13iEB==Uego9yF=p0YixR{Sq^>a7m5?uFm#Q#nj z!Nm+|Fk%vDIe_YLX$4bmGU}`X9wryv6?Zsntt-r}676}8DSpeGSYWz|P8^|UkJQVS zcxHTCwKUWp@t?mR1hwyS-}6fp>+X3EebQgBa@{Py|{HC?hQ^o;u^aXZ!EdI$og*$09u{ zhI@;nl37e)=X>BRiqKglH5?!zNQFNKPTFsw*|{EdtN19nz^_fd=f>6c2QBXQ;e-8M zx47vrwTgc2wshlf_uJZBhyPu$gl_O?+{|6j0f32jRis^8s9RjEYVkF+O{ByO<6j?F z2R`3NprZvj#xSi9Lj$s=+NAFsh^QJVhZoac{rP3wyP`dCLs~;?7Z>4wf-pfk385L| z)WenV|C1&gz~{+=CzIG^+W7g?`($_CTQuA^hj;k&O`mX03DKU_)j-D=33ZeKv z3mOrD+~YI9^JcmbP#6$~Ir5d|yV=KW#+pZ>#@(5We}EOph3tqYY$&<^ooPBk?rf{b3=?9% zD9g;>=g9vJ4QXkXS}cAd9Y`_ZppjBLZ6F)a!{Sv@{@*auN+k9wXfKHXMi0?IVEUiu zN?2%xDEAd>C0*`zTsmH{5(h-deL>XtpXU?yW10x#yZF+H5~9>1CI(FzW=H?PV1$tT zXHp3gq!Ymn&wjKhd41}5!eLAzJ(lB$+W-5EAvP1_5xnU@wH1bU42Qf<)J~X~Vm;yi ze=M)7ds}GuXZ9lNA7c7Jgt6wgjt!*2#{A#W{4SO>VRItwaELD*QAbq#f2;9h${cb5 z5kvH85+*C%4$ORGKYQB>VvRPJIezffFS@d`n`~S@~9kn{T z0h%8kKlSCirZIthnoD#Yyk>Ob20==K6o&tOae_!Fhsi^Z2mEO0NFsKAEk4UwCd`MT zm_jmfdH-+N-J)Zye|XUNn|g9^tkw@4?T-!)%rfUN4kN-&2r diff --git a/assets/crewai_logo.png b/assets/crewai_logo.png index 086ead55243f3b403749ea1b45a69c0d43035237..bc44909d0566b2c0466b7be688cad684b7801c8d 100644 GIT binary patch literal 14642 zcmXwg1z1#V*Y!}+-AFf5(%s!9-5pAIcb9ZG(%mH>DN@qiB`|dNe|Vqop9_==&dj-E z-)pbE_BxTuic-i31PCAy2w6s2TonX@a0h;Ng8KmcTjrJU0Q?8Y&Hqc1L*-X)n66KS%AlUR4nOd7nyBLtz00+Z1C-){*~K1{ ziLKu3c4x=oyPkM?sg`SYD3BWjZHfLWhC3qg>XwH*7YvBpj{R7%U~-Vl_4jI_$3}4L zixf_`za?F2trpqZ4Sc|eQ(pc5?iWOo1cHVrPC$OT1xvc#@ah*bnLnp!ZsKO;4PV>) z%5^>PgJ=RK2R<@7w$|0vZ4MifV%%~Fb>Vui^Y-3A(t%QOakq@mcDBd$L9Bxx8UDMf zA3mbcIF%_qerHQ@hKbXCo}sJGHp^p11oW^wEL3SD}lpG03vy!UHRZnxhY zuD{EP&V~3_t^mh?|5jX*CqV1>&Y=S=4wDufl{Hg3H}Pmg?XTTy_N5nnT&8VlcdQjY z!Qo+ngNy@NU`czB!v?ZWVb7cXd;4Ps^d@S=4>aY=M$0%NsI#VzWra+`e1_TmN@6q_ z^Upbvm{q`38S^?rftMe#r2JyM?>U6pVrg+{!69`iwJ~gfE{$I35WZT1;HOZpF@Nwf zl^-8^FQMEjBLf4&^_A~s;cI=KGO8>|3MBCH>5$_lPF@5AmhwMN&t9BBz((NMfBG*P zL`jlJqpaUfF5=5!YsN+f2I@_b^uGaDBT&bcp8B6`7^3%#|Nl#cD4B0hmK1Wt#|im9 zY>an@jl!g{Ok8zt(8yQB{r`(LsF^7>oFN&$>e468%#_vStVhEiBG)whFBn8gxDs*o z{lSIA?Cy}0-+J({aFxFA*5DgQ{%^8^Ly5SSot@>d*0FJ0q5myc8oGe5Z$(_=|K7jN z8q{Lq`kVrPnt7GlrmLl;rAOU$HOO<+eSr_({bKOn3NT@=zXqMIm&HT=V!5#KMd`8HfA%nCm|8HD= zN(PJ5wzI*ChHX~&qEa2+=}Z}eZ`VrOt$C0dO)>8qWuW*ScgAwOnE2Xp(hy0%xU`m} zxgcrwrJO=a`na4E^nbV8qqy9je}0$^MsdHx>yWnn$5wLlR-0Dw8A6n*q!gkOBkBFa z(6=EUV?Wv#Q=>l3Wgm7-zu5735-{iRE365<{-HnjMl z9;V?k{L#-yYV0!bL6@d)2LrD?viohOptzyelaPPWEx@^yhO7kf!@v#JU*R$o6SwW` zTUQ{sm7(8)D~AhSe%=jcxk1w6*yAZ~=c48l3Zw6a>LCw<*au&ycEW&|WVlDioeJIP z#wYTBYy^wIU=Ex5o>J~dCi{Rv-vzzcZ=a8Z5)OY;XLUdzUBV+b>rz9BPP$|C>Vp2{ z8!kc^@rD>&QwB(x9&iPlqn+wLPWJU}SN0Qv;ALxeGYcNpy&oUzPm7eWLj_=ZQ_~D_ z5WPvDCx=MG{y5PTb|bKw5J!rm!9k-TeL#nU{yQ}lI59C$vCWx>?t;ztr-0lK3lq6HT04XGd*w>kl;Z{yYy1e>rbX+ZgCY1SmcjcPoYhnqZ9k>q3L zen%9txx5lto9fL>bIY99kT+(Zv+JQ?!o3FfGKo+6tOyI@hu_rSX&u~^0MQSA^sDpX z@dMzM155Q!@-=&LJV7YMnu_}HC>#L0&|xiSrlLx^YXHx7Hr>N?M zZEfk)mlt@-Fd7gZkiE3+Ii>wX+~<{VvWvH|{`asKW6jBSAi09oe;OhV3JOK~x-j~n zbx<9|bM+P>oG2aN_BPxhR_rtK1DfQwYsUswv)w!PG4uy~X+rp1@ylRHC(`HK2ej2p z8*bQw9g4RmR`|X%Z^Y|XXPh;VAEa|k4z#fl(P=M{T-rAHjX*c_k7#@y)ClI?eGF@0 zgqN;vmNjm5MTMxgHa^&`)5Dn!Ixx``BV>rzYO%Y(ixDyqBna=$>V0sbX%J}Nspqk| z6+Y<4B8(?mJ+%43QbUX+z-Z3fXf<&pkTluciBBnNV@d-9Q{&9N z>A>yqlt?u#NpAt_UCGx?Z1T1Z5}ytJMf&U~1l)o=0Zx=`OEA(K6*M~NjuN!zFWm3@ zTSSugA-}UaKu*O7)@c178LX%-L-GpUYaD2+Gl5?UdK!XLq}`O<)|W~A6o~~u{Gq3_ z?r62V9MLA4D7CHgeACz0Aw5uE;+yW(0X@F@B*Z;IzzE)|C;Gyscl}N>QY5tTMyn;) zsqR&P%~yTY!y+lu(NSi(H%SR&*fHEYB)C|%Es?7SA1gb%eT(wO-{a$<3&*iSy(}oA z9=``r$AM6TnfiO~(gqdJf1Y2mg+m6@%h+hK6>7Uu-1!@|Xz_2W?NZRo(`~_VynX`* z!HlZf#ZC5c3PC$@Af@twq$03bj{P49J?<8!`?@k^0_#AO{_Fg!I~US?62f)6Bq0q{ z8qgwU+jq;$%LsXsTviidF#Ut2Zo4#LJUzvbCR9QQf*QV(V7V+RB{ zpv!>?>im;u5|VM=(eJ)1M#kv_k(4^9{ir8kDOl|apo2xeXll}EL67uWG!~SUe4wJD z+8!S!g)`s^A+h@?j$FGHnYZ=8sQdTnNBx-;^*4C5s)j=&b{8FP?C9A9iu2f!r25kY z|0K-EE6{0}m%p1Jbb)-`=l508^jqr7W@i;e2(VYo5q2u5V&hq6%uj0RM!KIvXICdt(mw09uhh5#bBDko1@50h*Hi{c(Cj=7I2<5#JDI; z8Pw6it8b?g^>n2NCYGUTL+U$A+*G1TikGD@t}c!v4D_dCNT!~imL&;@ZNoxif$t?j z^EPNkcp5|cXklJFyg_n$s{!EBs;48z#cSuhFQnEr zWOPbmCQBzPjUV4`e^Zn^;`|uP=7mubxXW%bDH-YV8Dc}RIgGP`n)JW#h)aY zH+c@1Nqz|naSbW`TjHyvZW@K}wUx)u8?7cYI#=kbK8}0%fKarUC?w&}y>`x>*sVJjy?;HAcdloTp&c}q(g;CN?dW`05Rb#!FuGN=U$ve8JXO2zfoKEFa2$V!Uk z@wg%wto@Yg$gqIe6(*Mju(9neTxfTB$r?CI{a+VMJWclb>U+~D=GU)VI5>9q_e~aR z^mk8q;55!5}Fp89kyf!3!Wa$4TdpsM+W02w$B@xv4x zx!Y*C2P@&I&YdL~;;M66ivzKbi$gTULj!4O)v%Al+HBZ21xDK*t6V&~>VLU+ ztN2Qe%;9Cmu)!k9si_qiUJ()ou%REpW~7*hT|Kw z=xqEak4SGG7X-eHx@nv8MmT85vZ^YxsE9=joerb!PYV&lJ0dbN@N){qKb-Lq%oy&O z;QG7n?MsI_%v)Es+&Xs>u$s=KCF?@x*EIu+QwdtA9Z2e(l^`>T+wmJWJ z;;jsY;#{QEl@yPRfyL&3ShWEC8QSnoYP?tje|0fDPKCOpfRyQWTOwp-_lN-I4cFPf z6|ivpL&rlBK2)@%e+T1yc{7>M<3UNeGi``ljFcXD4`a!$a{ynE( zCvL+t4AO^Su=r=Z7{p?NV>Rk_|7=qdp8#z4jcRLXH4Tj+^MSef`4X>F@o(b*^kZSy z$7qU_br~?Ls;Oa53Vi+?fNS6Zp(OY~d6S;Q=Z@hh5H2@=LD|b6tsgV+_NEOcE@mfd zVG=oE)7H=k<2&oZTrluvl#Ib6%=s8A>Gb*H67_SDwNt zS_DMIj35@s!el$vtx03+dnLy**jkN$ns|%;60M zPVv&y7kY4p>F{Rv$intKyblf^y@KM=)=^VExBDyPP5b-Y6}Wg@U1dXR5OE!cpGZbd zlc2?2Abx-GHlWzfg(7<8j~@=MsQC1AVOE|NOVrxBoJpwGz?f!%ks(?CnI;;z<>Sq9 zy#tv5NfQCEAV7-xySywbKn)}O^m^OoEm>W+a^eQ8#H#Cpe%Pr>q%v&?Ffd?ik63^T z&dBpn-^!$|7Xbm`d{#|K<*qS8vZ(E_B+KkhMvo0N+w33SA2>85se|;Cf2FRWp~`nu zRmISCqp>PLb^Fs&IAXgU0R-Su`ji+949sGiBZaE!b)*__vaGGGpMUBY8)hU$V)InE z%5peC=JB{8b!Yh^TbM6y!*_yyE%+AmReZ9>jSa(BV^PEEuYpAtbXUNv@KSqCOewu^ zsEvw3kTAaU`YD6$CXCm|$;oN+nmuzk%Lv}>upu@fPk*{Q<6-~@Y#vwD)}}Fa$>fu~ z6CTj7;Tp-s97P%(9UXaGH{ttJO20jkYd@ZVRKwTA4k2b{)GSWhXt zURf2E_u+TC*H&;sz-_etzF~#iPK-`qUY-kCbY5Cr9m8_FBsv6!@$aAC)5=C+e!g%@ zU1_|m$d!VkS_lz{y0p~T9K3p1q-pAV2~vRrcRt4mU1y#`;bBTNc_xWyK$1Tv=a217w(qgs;T%$}3Vj{P36 zgTV}J0<`Un5k{F)Tk7iSQ`4k?c6I7WLHt@y2?iLMm9=%k%~MwB*uWO)*Xl( zSGPgw0$+bs^t%Ti`Bo_r5eQD(<>EgF%tIE9EC+Vtm{Xb}4hcA9sr1x%NPW1+y?YMa zgnHc+ARxd8OBAxOLVabu2;5frZNLA%-0xgkFgguEV&B;Pg8eg5@Dt3PLbt{Y$F$TB zhvR&~Qy!%?_p5B4?N-?4Y|Z$#G_ho1Sp{B*+`jd{e*+$_4vJb@G}Od{hIb061PR1V zO)1hIK=U`Uva)r~2a0MiYFJ4E5xG6D*b*=>QAwphMwIgP{dssr;k5XT^C*Z72grn( zpM-kKtSZDQ$(Juv^u}Ud3i0uG#zSAk zx~&;!m7Y2(65T?8&B#2pP5C1sYr{QOw@O8C$q{N~jeSQLky_K*(((y`f!R1-vWS+E zQK3%Ks^5Y~nw6Fc7^HmkCYi@i12HC&W;1hhnX5y}zrF=&51zFPSy)_1(W|R^CDAgn z08DgCOk(^d`my)a>2|`pdus$=x7%^yiZDJtJ~r|UoFj##QX8XKj6_DTS4N^=XvGfe zaNHF3B-3zY?$sB6p^U`Xu<|u2QYEWmu7hAmCJB3B&!4sqk^yvH@x^gsZ0RXyf%svzZy`j=LPvi_NBaR}DfP%E`0(LF*;a%Z zaejGu9o$w9A= z$c^v>EH$_9Rw1|>s7{JsZx5Ps4Q==G+Ju;a_oR7UMX^JW(t`s8EA=BYGYM*tg$QDS zVgseT?(5*8OXg83YY+i_dTDJAw&|G*Z^od7M;xh__u&=!^lj!hcckM?XBsm+ojB&b z=D5js(F3>@xrN2z-_NE9t|vw2-*Z@r~S zQcOH}1PcH$Z;#}|#aLuqTZ(rvUzB?{_we2`v<5f%J1?$bqE3~w*!y`Gwb5O0@NC0y zM}1k?+?>LLXb3Qc!P3?Fc}acaavGw~fi1FgF$n>H00Jb(!a6z4V1e3OFKq|RtJ653{&lKGkx7nZhC8PL405{h zzNBngNiw;~a;`51HBYNfs8RSD-s}W}Ky`vW`4J(s&gfTANE`!3+kEQJMHTt6`esHCV zIf*$AxqM{7S^uU(){wL`*h%G!FSTN2Kd(?h(MCTN?MeW};taxW0vrSI;e~}k9UFU2 z_zg^Z#2^jAHiN>cvO=wv=H`OBI%Rh|=jJk*(x>~Kup4}xy7$vzqTVBUhG_s6bHzW)fC6%5u$|m zC&TFfGE#1t*=n}Al6FUJarc?vx7gP5tNfZ$vt6=OXPEQ2ubSQ@46T?-Z~j0Wt4Vvh+aV((S4mQt=_aQ&po9BzpiMhFX7-NqWdj($Tj#E1le0}!WG*TK;v@mJ!Gao<~Gy|0@ z;07C=5kp1g4V&F3|-y`R!7KNKiS&u^pmvkmE;+T--E*l6l{%G$HbuoI+UrvwE-jvtu( zx@>LSv~7Nye0xT9C5hCutd{fz*AKU?qmqt6{KlsNdNo@nONmjkvSWLAR7FEn{c5Y) z?Pk+b@k7_*T$y%hNTkfC5`T@FKQ?Cq`WqX0fDvU^%zmN_j#*;DTwuE%Q%(1+na)X| zu(0bK-A_B3#04V8QH_UI(OzF!&t-G2pcoK(GQU$fhHZgkp{ zJ-x$>l{7z?2Z&U^IYu)O?&=)Mo=lVkc1-v(O!#6<_(pYmxlmI!Uo=c8iBmV`t98|% zxNZ#O6g?6Tzt&b(s>Kj*osgTn*zJ1p`<~cTxpl79bV5Z-7JYF%;K`^M-slFVcdGG} z!Dd-@myneetxPLkU1z58uV~dd8E8zcIQcYd@J*Isb1-FS6Vliv)jP<!Sd&A0DY@O2O$K5kQ+rwtgSyJ+BQ#))lqTChUq2YR!xK~^ii>> zIx^ERYZdOgbt3Ef*?d}!Rym%*W=q!aro~d_7_o9OPF;0P=_B!^{;uluhNqBSVg`L) zMO{3%VW&aMI7)>XDPG3oMey;X0=#cAqn2#T+yq;E2SJ+r{vwMEV&HBwFe9y1XVQx3 zKVDXzS|>IPf-E{oiX+-5!cGG@_e*oVPkza~^0^w`-rjsyUUsb?->Eoz4^Tk6gi;@g zRQ9aJWQ#MT$!lof%>4s6!Vd8nDqKOl6Jjcpd*us9sD$cnX4k*N<5D+jI=nB6lXY?* zq_OUNF3Hj0lxb9R6ic^8q^mXf^1)IX&OQx&B$btw>TkYXnwo&%XE8^fi_Q~{LX^06 z>ee|mJ#E4*W<`po95cTis^QW_mn}%eg1*_0_K;p>OoAT5=Jh8~DtL z4Z%>;oJon}@a(zFZWo8ZrbB6r%`>653CK%xT(N|iYr^>mW93hEqnyqJ-{eyqL8i!hu% zTr|=TEE0&s&ncci_*C@a?NM)fqzV)j%ugzA{zS_A4g@41K)ATM6&4jirKF@ph?Bqb zc8LH@6&yMMMbz(UmHh5HH_8R)87gyHl;z;rL`jr#+Q4a+AQ0Z-yBFXw#wO$f0d;l7 zHMJvOY7CGX+V$e|hiqW)OSp}GV0F_>Dc{4?e#K~&M-hi{)0SY2(J{9vl(G^hM+dZE zwy)yTHO4Kq`U(i4W=Sb2ppx#qq?k@={$_1faT{1=q;7gCyjtNV%eI zG=xwjr8t;i5eBWg0I$59oSX&<)W*TJgDdNa{@fUoB@^5P)xh%t3*dxhGTh>`Sva&a zRpPZ^u_cY$E;UHZTzGnVmV^j)1N8HTXV1LZUB9hNi_{b-Yg*@Oq2~um(&gi{X|!K& z=@t$mHl|1I4u!w9LmBC!N)AS75}yHueP5p_J3Bk2{O765U!7}qYBZ`T)UQ^Q6bRz{ z_1Q5K;;6u&fO0{?*0$Qln3<05R|q_2n_iwel5P}!C^Jx_xe-_-PXOTJVY4H7WN8Q?RszRvp-*gd{^D-w<)2f0d|bCbV zxOe?feLqLndNMG}C0AK2qC54NzHf;GoDg7B7MeK;s=iKTM2;B=Vqy6=?yN;F>%G08 z)1P++dd1JN{8v?7)yhye-6KnS&K+p8$um#^gP67ZZsYwiD0@K9cDwb_NkI)SSvPF% zcTAY@k#>aHbr*n&v)JXbj*Cm0@nXYyuMC~>mOKJC^+4nU z5ej&HnRB#gq0lf!p?hZr=;FZNU-{5LwA&M-@0&b{we%kRjNcQTv_jheI5U1upIWVv zdD2yriu-O6xVvu;%M|&<-w;U6E&0-HgUE3;BwwjNLX-qFQI|KoNx0TMiokxV#N$XL zHG1lmX1LT4OzOP|p`zR_|GtwgWaIatlpULmH!VZhhAEkv7STO?`$SLAf&(>c>anM7 z{LlqdgO2fWD&Drjcsb{P6j0%u5!t&_6aH#fAZ|_dl8)x6&#l1`5fOz6*5Wn}=*RL6 z9b<CIH^A>Qn#zc25PaHyfPJUXX!d_4shu{-k8EX@gg<<|;d2>20ta3fD@m(N z1Y%5kXB$WsLoM{}ru!3_*oLavLiWUdOD$@*ZTtHccG&6vDLhD_SvJ3bQh48nV)4Wc z=s=|JW8mZE>sCP5E{ytas~!u0_S;PjJhnnTbS>84nMvC8wDOnw6gz9uGw?Z2Pf6#^ zj;^)gscxFaODc^hLF#c$;BTg4KMK+p*+y+pLq(%b({WpDqkDRWcOWW9G&D8U5Sh!U z(BSgx>ONcdp}=7Wj$Q8noT;g)DgEeJBPyANex@+hn>2kZ4*o+h{NA%wQhC=-_m2Zk z-9?C&za{aqf$qx-?S@Hn1*_@zV4s1fWeSKg1RTzzRD1--o+rD9P&`SM<2W)i-Ui3f zD($gJfR`3FHGN^xufHeMY22{YjsLj7yt7ntk9;{L||Z`$k62PKnNe1 z!{4ktj|Dw|`1P$w6_9_2d~eE$-x-YUTs79R6`8A9-(A%Nb1C}VwjKL7=8OaagW84U zf-Y4%$8x?8y^3&O;aEeSDu#W(5(hYOBHOyk%$aD^;9Tlg(zz)1Z#netTu@ejk0O|_ z(oRY~u0x0ex?)|=m!qpvsLWmGefbh(pJ8jPnBe|YSV#LB;QW~B2x zXzS_4ygc0%6&HtOIB6l&Ki{2g)E>L5@!`t^Tez@uayl_W_XWA;l$21P-U03ejAyVR zcJY*;MYw)LVqxs$rf!ycoJbsd);RS?6zkc%tdtZO0AMq3{<(E--FPW{B!h;8*f-k) z>{svBt99|At`!Id$U2M&c>et5po7Sa=n_!D$?U>HQFk{nP*eaVWJ6`5X9?`-(Ge@y zZ*+82uFmA@@{(ObJBP=4=-fu-Pyi%)WV>!xz-Bq2?A(Nk6xqyu_+2yU+p$yrAvFzP z^C86P_G={TvG8%lqW?5h#(gfIVgocZE^gA8}N(>U#;gg0RxC)#ot%sXUhBjq@w+deR#~ezmGW_|oWfo#FE97FTXU~>KQmAZ@x3{k8>>n}#AOl( zoDR(=^(F)3qT^Ajh`a-gn3LH92qITcFJa+JUgY$8Ot?6VI>i+TnD)u6^s>|{oe))O z+#2lU(S}RY4!N|KED9n5L52eH1|}g&BVG!RR*B3ITTxS-Nb(V zdS*m+c6h8*!B@n2jqmmNsAAvOS6GWcVKVgrAGAHOk!9Br$vftAT;1BXD+wr+fI}vD zyIJO2E6@q*-@?1}IDkRKQ63ZkTFqAe%w#ZE)u8n`hesZe586Io+k}PhOh)uN^B#rF z*w~B-wG#m0H8Hohx5bQ{M<*wPTs;rtB0wAOZ=KdT<^Iirq0+yQ;_sQA#iozHF84g1 z&W)E$&$6;@by_4@nhPe+Ef%!R;)sG5>~k$1Z;G$4Z=ALNx)gW9s!Q!P`cC9<$Pse- zq7^nBcaf%gbBQNa*tKSLs5Sty;An+-)_Bzh0>h7=KXa^`?H@gZI`?Ut;Q363 zlb|tgKz8BE+!D6gv!D>XB~vsTWl$0mn z`*QC8YKdijK?lv298o%)^&3$`F%_s$`;(;75M>ME3-!jJK(p05C5p9zegM zyUQN{xLB_LtEYrdbrrK=ebBNUAeq;8Bqk6K^eHOqj%DIbX8-Fg5TdI6+?f$E zD3IIoT|JcvBQn3HW&->~-~<%N8P?AJkN1-U_8T93G1Qs!MBYa<;)ohzMKtKc`O66H zc%DuH6^XG}t0nV#aLWOY&kd~1KLJ(;_w`M%qZayUOfUpyp_3R4A_0r=%)s@>ZE4i{`M?1m=ZtS>L~euMD?m^m#2!)(*J z&CBBrxAT4s-5ucVz+yBOu3J~i;WhswKs&XvQpxR!04IW+*z*If*WhwrDpCXzKD*li zeKLDfGiwVFmrgz2C~OE{4iRzdH6^b-DO_RkV`rCETyf?|d_~v%KRI4=&Ho*wmDXfr zPr#K`U#F+1f3sL)ldV*_IvB_7dBJ+FcB9x8GyAEz+L-A18U23iQuhNS1Qs@S0nnIO zI7nmEwcut~{Zp=1T2{%sr`2SoG7a+zba1u0JnBCgr-e{#Ad_KvVK3ELji|=-BpRge zjMp??SmFEx7R+>XWTuSQOYedoa59ejHjzqB5K(ePDT^r?3eEdo)AjKGmb&=pKttZ60dvr_mWwT|T~Z!9p5(lyl3=QPb1Y zWcr)4OG}Zi$MXOYnpMsWao~;_jK+2Tng&RbeT5Ry?EZFBg%UC!*B)Q|fH6xsV)IBW zW|`CqL7{PR_}4#p?zgXld802;gFAfAy-o9c4|lEAhp&hk(IU`B;U7NgTV_yQ?s^F0;wq>=+m&V8U&fHD}z2YElz9Lz2g;nh;tg;C2Ogcsq3tBWzFew2# zI)E%a_NPvzlHPv!AiZXift@%V)vDVcN^Wzl>B&70^}zHpPq!6n*3i-1tlpTSxE7w{ty7?b$)Mly<%*n&%6M{oTs_AtA22hyuY1ze{JhoauRnizdAB;MalnG$ zl1dXz_d;?UK=qa+pjXFv`Ro+!*#UCjFS7hlrb>n+3tj9$Z@~!gm##Vf12D{EUp(BV zHTpZx<#Grl#V-Gu&r~c|$7`|9EI>QSNZ~p$soUJFfxgNn>TYaiB-i#(`*BTY+d66N z?X4T&AYuvcW=caOjt{3(p0dIMT2z4LO+$}m<$PE2;?Li$Z_Svg5Q|~yR4xB!HaXg+ z<^j9$t=hSFc|_!*bTu}z<&v?Y!TodUd^c6OqP}vIU6lYW0CV#nN6x!@d&U66Nf-3x zb>;X6K%g6Vv4A!kFtq@!vFW{By+#XUyPg*}fT${%UwL_Z*L&aD%@v}+ZN(*S!ll+> zzCw~B|K&WA!vQ44IvXcWyPWR@I=f2O*~7PSldezGY{PNC2I_+x^Go|>AAn=P6NExNY6Tt+QbwsS2GXx5G83h;F^VnmYSAywQ) z%W5P;ENd2_VBdhYli>J?HA-jT0RvIz8dIkg4PY|^t|N$bl40Wq$jO1O&cMjhHeTdC z9W8AyW9ur$bTYHTw%5mF`q=D0ejkcZe23xhPO(*U$5 z1L^fLY!6{H&(@XL#6%3S(@bSrtat+!J`$eT@;Cy2SR2xdKdI0IUtE;O_#%dY@9~mA}VMm9Tmf4zKn*$lp ztZuXV!vlA6Lwh3@EQ5CgapZfhBLm?O{o8{et_bGmZ@f_9#A`8`Rlo?fS(1dT1D$Ks zy0X*nWpaa_t!No!6)s8!03so;3+<~HZ36o3=r1CQ;ZL9}QZKEKx%c?F8x0akS=9Ld z-?jHh4M8?=B?5Ov$VS>UUKy|(Q5jR0M2!A8EWVh*kYxD9}SH9I#)KH+vdCPHB9v9`fv$LzMb*GFKFUT#zncN?oB*OH6rWl1OzvK-2^z9UuEc3__mQ`cvnTapb{& zAL_7VT=pu^-GwDL0ofU_V1%h>dmojl{5v^j^U1{mjlxdapn|mG1`y=ZD3VBJxRBsb zr-zU~)}3wsD%QEALrd-Sf16NkhlNBVjv@(q15;u~7NjXT{X7~Ieh0>*$fT}W&UV>7w?ejX@I3E8uO+Osi9b%!)k zrUTe4HF<1tp^dpHcIFA;hz^knCwV|*E!{|IlIFpyBLCn1p5|G2JyoRAw3}y5F~Ggc z!;OuCI-T_HroO<(cWBpZ8{SFC0f&FK)`=sVMrPbr^dRW<-9`pD8{pjhf+!Is3N_p< z(0Q@k(-XAuA{cV|_JZj8FCCKf>d5hT9#{M=MFv3)JCX-_UoeOCF2t|Gjf+3@+-~=9 zf&ve)*=HwnvyO2+mk7l_O16)A7EB+jGTJ!&1)XV8wx0Y59~V%Qp**lK5@18zKA{X3 zKO6v68$hgs0Jd75wSym#ddT)Wa;-lCI4klL5L z!P<`K<;1gkK2~mmwP)k83ZDYmsaMbyWDzp%#mZ|7rGgLz(X z$WDcQj__c>Fl*@JN2i^pnnH`c>i~# zzCX4Z@5g0D!G9Cjt-H7kZ;^kw|(UIMtH+se-#Ax^}iPxk^LqCEX}OcN1pnCaw7fcSAC!B}-J32+WD_#04J%7za( z5eA>{fW^1_rJz5N2;(G&$E~4>4tC*%2~DFlBK6*pY8CL{eM_NpBo?hXPijln@S;`Y zG!wSMWixqWHF+x$^U#VHZJUI`Tdna(Bza1scpN_Rh#Nc|2P%@&s-Vr&4)kR`QD3x} zW+)KnCcRPH1sc!*WkMDyr6>FvV%la_pFMlt&I7Vb{Pu*_ZwBX@!3uw=0!vgH9kr{pdm9sXtK; zi^xvG2Q+d2us+ANHgmwp<;8CgP*vmJFJegZnT=afl;2}tmjD(_iji^!3{{xP| BZl?eM literal 99658 zcmY(q19&DuvnU)}n`~^`wl=o?#@I zHR1BIV(>6nFhD>+@Dk#G6oG(%0YC5|6vWRJTOkMICj)j=6cYlfn!-8#DFhj-OPI*W z08#zGP(a|o$Uva~LHRj>fU$tU{|g2Jk_5*7Usw^C;(u^JfPg~Hfx!L;NAoBD&msPk zeyIN^2h9cke;ji`{x3E#AQ$xi!a#@rA!~>{(f!Gw?Zh=4fq-C9{*!=#(ldWr46I?U ztnQ>PBh6)KYfYzbWNTnd=Vop9A1WXoH?AMh+So~-z|Gpq#*xd7m*~GRxPIXORMQg? z{1=LoB`=Y>j68v`t%ETE8yy230}&q#0RaJzgOLfB;vdofA^$1y5}7$U*>Ta+ySlp4 zxiZt)I+)Tka&mIgGceIJG12~D&^o%?IO)65+Bg#bHyDqCua$?d*ybP)NJ_xI6p-zJ3aMeu ziVZtK@oR8fsah#G<0^#cc4SD(^@ii76QARo6PE8jpy+I+5D1VMU|BMggh7&`4`8L( zHo8(X5EC*I!JB+Tn9|Jn-J#vNsvFhFKT!^@zyO^RNfQ%84MWz^Il2r1M^N~m>Fi~L_AVIWd>y2-Ixg;?cslanuia5ptGIl6Y2UKjo zDSK(A-`JOI!#*l8_oLSeQ|pbo&$~og8B2WRoTNKRJ-QUY{y7B%>Fo}m3UwkH+M_Rt zG_)G0->4!?y%co~rv84^8Ui=`QeHmjb)jHA#(=x#gG{5&!1a+Au3AgsQ>T~|&QnX9 z-!2e$1 z-l=W+R!i}^%FJE&TTh4-caV0aX|$$bW}Z)p&$byK8L2VfG_a1WuX9>+HqsehbN8p_ zrFnJWeaEhO;!Kr=<{-V%O#p(`Zffd(R2zXX)38&Kubxk;5ULVpuLA}_bqnapA@R4B zjA_VGj~(&;b|mOAWxMtt-!g(_O?0eqOr{?IZZO%`{m{5R4>4+Uk!|hH625YkDLQVr z)wG>#?KuB=1}CDPIb3=R4WIszkT{6(eMRW6{BK?Ev^t1YRaGVuNlm&-gWwmnO0n_# zY$owzzNl`iROs>TRB}Aqfl~9>)85}^d-YfubgAAixy(3-<*sqV9(JQjf71RMODZB8 z80OobesB(GcTId&8DZZY())n~M-sIN__VaH+b1O#a!zwCYXgbzD?&Ty;pXa z@v_f5>&n|ql``APLLVdn!T#;i8Fv)KHJgq*G$};3k*uJ+pt_;CplUFc%^v8=;{N6S z#-cbk7ayMnZA&lb=I)lOyjn*A`1d;kjUJ&}z_97i*hYpu07?n^V=()Gv_s94?ive8 z9`LP{Ps8)tVPVX&&#KBICNbfO6N<`hr5qnZ7`D`_3@M69QCE{P0V$t2*iMF z$CsgVP00(gOnTw5w}Z9h_rE{o8zldu=y@D7(!C@hO)MC>(?E7E-$uIhcoevAvQ{V- z0{^4i7N*1P`fE~0K3`oSiB=7v>dHkbfJGl2GWSM#Q+}sBusc5Is1uP1e+)?N+nvtT zi>+YMu`H%4%ZRq<&{WOKio3?eNyNYl2W{d5Ixv*S0H?%LZ8Y>q=bX4h8~Ek@y)W89 zT0&;{klKEw>k;nAyB@gRJHJNPVXWa4bu3|_AOFV2 zMe%Ob1Pw`%DG?!bq6385^o@m9A7wD zvvKm#(ZX>f9>vSqi{btj{+`;I-jg5%VdHYZv`#=Rknkv8D&AJ(HjB#x!!Q(~AJKAY zCs}ZEwpB?ZDAhi+8=so)9_yt_di)O=u3TY>`_3FHw##*AL@RT%{#``piPAS-2a7(> zzlExCmIq-&%fZE4gvl67zwS7u>k98yOs}k-b2rPW!A)Y|ashjd;MT1_DIk_Gokj4q z^>CIcbb~wrNAZmracS8RD#ye(vezyr8x3eoWvrYPNHwH`ztn@%=tjilgsmjeVgOi<=f!T;~3~Sf$JLAk%Dj?dvVFC=n?vqtC zNjo-Ve39U7k1sg?URaPwwdNP=8KW7mFSc88Wh|H2h*i7Ze2}mf;Qg9y$?CMniRO8e zhKTT+u)?~ebCO5fn3=KY%u9Bz9Aa#Vack8&YmGfYhTz`TqAWBR45_%D&P+A2T!FT$ zDelGoLK<%Gv?1}T#iO`6E}q+&98HV&fH2Z|GJZeJC?|ZxPt7*MYt>6 zXCIC1pZeFQ?(H0328%BC?VPG|uxcucDuN^s%Mra!*lY7(hmWlM zPJ)H?_au9dP(C(;zQyfPTZ*!nSXL1Ga=?Yc@c$g` z#;KzpQex?oBL;qq+AKa5qlGnJj+(i&-dj{Fc$1dOgxU>|kw&Q&huIBzWuBoyNw)W(fiL}Kg1Gfz15Or;9||1H2aG7! z)sORJ2oS1U94r*$7fh4W^D%xKbq=Jt7@|6wkdTGm5J9f_4dEFZ-PzNMX`MPbJrD%u z#qC0KcH@7A&w@i5=xxp~&75W4XAEp(;=!dzpDo%eimOnu#Va1h1^ch?hE5aYbu0)Q zi4MaaS;W;=aAJ1zrfLJ}l}c6E;DA!ub=zMTfay<1-}UEm5*?BME9jJ`4H(SmsRroO zyehUO^1ySv0(l_J*o7XM3_}j>glZ|61t5%eVp4Z_^Q1JL;Rly9jUO~q<0T%$4H_%N zySuCP8#7gni24voHwfbU)cLLm$xVkRqHnOAWW!5nN418NY5t6m)&y4)^&6Ksr!2x? z5R~yHEj}zdrgy(!xM!YPPWEQ;nSccS#%JKeM&GB|fik21=@_eyZ$~?k&{Q-Nxg`-y z`Ndruh`@J>z4eP}^&#JJoyFGqYmd{Bp-l`iNEJ?OJ0@Dbiv_m5pHAG?+wEu^aoW!d zs!AF3%KdI<#&v&J=$X_t2k3xO{Wmb-?g+Xn@YNPy7^e#c@Sqfyo%6m+-z!C?E+Zmj zj9C7;R$`6w=5^_OX|0VXaap|#0g^=32+uI}H;K8iD=y>pkanN3qlpix#x!{iGl#(_ zKPx9Rzg$+#S1jD%m2POBMWY@G`Ap`10?c&2+zlLkp-9)LYjUco%qi}m;rBGeaG45n{ zB}2}9j=;EVnI^~%S+o^mD5z|HPJCK5zjnT9zG?H`&7(U~R!FBr&Nk5rzg1A#Z%&)Q z>Xk6CIrh;ez_**I*3g5N+T9RSkI*8W!q1T(|1rw@P{W8;+ zlsz~@ArIxL@u9~rsR&{lwV>jW?R{HLt}Pz#{eeZte-=c%xUXFYF1f>MWV3E&C(>^2 zo~EF?giw>~V=5SYjK3TkGf?i`r3|{Fhek+$p+JuK7y;KBkjew-TClvq5!ubd=igoR z-^R#);a$Zp6k@a-l|jWCe8wl+{l;V*UxD}VNB#xahW5c3K6r{Na}%-R0&^BNxifDm zya)Zbo&DI^ zR}j$ZX9`kcCedicee~qs(P8N~Z|CwjBk%1E0svk5!n%5~uO_xN+|4_;er{hu?P#9p ziUyW~-gMZ1HFsEOuudY`y}|L>7_m~m6@sFgR2!dnznHz^2Q7Bnl69Y ze^AI^(iQjZhZ7yDkx_Ieg^uL=IxBhp>)zpCHaehpQ$41=a)3tC))rISRT)gE# zBltVHABwPh4h}nW-P(Ke>>$h}Rtga& zNZm%uw2pZE_MIMslYT58OfH$fYCIP=ee;C^e+s@A$O3uNHq)@PHxxm&izEZ7z{2=j z-(vKAI$V&4}3>+Pv582xme$>xE>F43i$a5imib+Hd)xO z?eINI_3OaUZ$1Z*>uqw+wN00L2%JK0d zkNc)a$(*yLS7^BJg*;jZIp$bh5Wz-WYWdE0*E?R?B6r+3eaC|-8x&EQA5^cE6DdXD zVv*(NPo?Qw-7ObMbxG)P>71yg3nmIa^md9zaaqgGe`&>6AP_~{NJ)$e41~jm9q&NP zy@9kRf}dz8w;rURIkVyw_`D`}z9^m6YmWNtO@23b%}$;=9O4C_4Q5=1E6l}9v?t-L z9VS!Hnz+UjRZNJxc&B`yNsosKs($d*Zx4!m0$iA5R^#udeUPV7&F=5KevPUzpUg{H z2A2WN$EExYNpi#Pl;MaLw@=5}u%b|&z&6}A^9$zx3=o}6$EaMk*BH)V;Ts~{a1FBZ zx1ZwvK0KG*`5YIy>e&4b1*-^94qL8SGG1X0X5nQ8FG0lnBVN?k@LYRi*^FEVWNM!q zRG!Mt1A0&B4@y#(*ZbSW^Ei%C0!f)UymW2_c@M>Y*NR0aFJbrcGJIvgsE}+R<9v{` zgoug^L2B80=mOtI;Q8xw(okZ(jtcmy2BvjwIqv{B#=BV%4{tJygz;|HifQSnoh~v) zlj=<}Y8ChIG6&~MundVjQycld}dAJW&B4aJEDSjp4Hk)2_aRfqa1z!TA|Avn9pUv{o_)c-RME6PdbqrArZ*< zvQ^@^Ot0*D+6z`65E;hqk(C<%!{o2}r0vUZixb}Fw@E&gOE8JTn3^9i(0;QqPhMkl zD9K{6rR_eZu`S@S9oY`W zbyleY1o*GPNQ)4qJPO+08Ls@$5L?C|Ov3u;EoO@y2Sl2hV%*;p|8U2q-p4w`f}~f1 z@G3vb}^Vt)D?#d7qVjxZX_}x;StyAMbN#S_WVW5>vejB z>wRszaqPiW>15S(T?o`Uuq_{j{~S3xGbWuzc!Qm#AJ}tkThe$X7X_;Y$Cm>PGTp6u zG5l$*bVf?>NfL{+ZUbaiIh$A~8Eg1rc361Kprb;f{i_4qoD^zLVfAw_bs^GQ$2}b$ z2G@Ym8QNOaf_Eb0Xp%NcWW_ z-V9^4+__TT49ArWA6R4r4R;mDj1v>9y6bN2ugAEhd+k)+)ebqk*iD-%lBEKaW25Dw zRzhYQ_~3In?pAc|)xHt&0?uOtnEYuHf-i%7oPhY0sVS*+*qF@rF!=K60<}a{v#laZ z?MT!b&*aSRpqG1S94hUg>rfMXSvX$jTdJUxYQ4KeP^U&~W1+5&ln+M{@Dnh=eOspQ z_iyOZ@H8CXQ6PH4C(YS?&^9G(*&^uOkJZSq?gYP+7BHOHuz>1rb{>us!Yn;45I)}_C&0%D$71TNQ9e&GO+881czh4N2NI8ofE!X_d$S8zwDUQ+*xwvddWf^TOh z`@ZY0?a|&}qwJXW^-FXuCvN!*!`bwL$?fc&f>i)~l9KTPPYc@!2q{5DBHRcI-2l=$ z4jAF+IttBJ7}>t1yh8ti3Dc5wB&~2@(4d6i4s)`dA4B#~k49IUo*|VdU1;)+b(rE885|pIsPg z8F@k&Nuz77#;!5mGJJt+K-VUYM*F=2j!tLg0*EeZ%_V{78PJq^Pm=JTgli(HvpZEr z=5x2C-O_2o&buf%pXY=-?1w232iC>mMTu_Vt#W&O^dHeJ=FkQ;A1gs0U8pwQ$%rL0 zTujkLLdUj^ZOJ~1%;|cC(0PAke(nT6vFLs@FO?%mqS;ZYAA;`q9gvub{NhTwbHe4C zIV42uQ1GPDVg!vcz(W~4|2A_)@or7T`8KCws2I1oyiShDX2avR@`BBxQ~w>p?+IbM^6F~M#k$okbvO0? z^)22*b-lV=(@>rp0B-(hC^uGnZ)a?(X@!$f+^~%XU1wRJsf(e8l3=$tV$Q?h!4Z=c zCJ_A(6$9-K*`;D7jOkFaV0p%81YHxZjAxpFT=0>Fbz_b+l88Q|{oMtB;J~?^(Q-yf zG_l^`kCEw67ZGs{RT%HV>yyq<#IAa0Ia*4`AvdkqhNY2;N6g_U3ZoveKN&DWnQSfL z%p|DKf)h%0p@WeI+SiIJST*kkIyxs1=?p#M8T)x8D|HGHLkkj@QwM&qP&rW=YF2bE zb@wU7ng^@1d?y6!FYjBJl&SQ{U%Z}#Bs3ETy&l)gOit^K=b94DGLPuEH5LcmCoWU# zKW^9UWWVO9utH(esa+PGo`eIgp;@_1#0_aX_i%v@n`uEXWjH}5)@Aqw7pdSR0k_LB z4yP9=&Bx2A{N18>QO6*?xkTYWm6!2GT))83*m<|Om`N?y49|t`XFz?&U`~2niYj0F zT1_#rnq8*WnwYJmHv~mM0FRT4rDPr9ulvuRk@; zW3$OL_=J`NoW6chjop_b7~_Y@s3d&t!*L*mT0Le&a`~XZDNbgeY6y3zDY+w8G5fGj z6z9DUvje}&9h}dK4@M^hqCv1JK9*NTO2{becZJtDk3lrf$ND7PSSE@aDLDC|1vFBF z_J{?QCb-1DtjJQ6cjULfJ8Qc7;aoBy{wCpiPI|jSrW|cLE_1w^fqnLxbysdj<`Fpx zI4r7Pa}MHI3RR5Br20!F8so9}AhRUSb&dYMi{m6xackcwhOW;(ir4Me`)ZX@9pQ0q zru$o1PfeV=T7b5~j^n|-TqW0?O=A@Owp#+YS8yb%Fe#psvy8v4po{8UGn%{hBpe?4 zp5iZe$~08;M*S<766ldo6m1BIw>-Pvj}zun^Z+jq^KpXW`E@~%hl6%*!Q9XEdA7xh zGU|4{+dk40R-i+Us864H;Nj<(qz^K3L z`UL(Bn7yS)+cfVs0@^np?;vWkq$H1^nAt5_>EP@VC23j1Q2^OSO00-tJc|{IG0Yg+ zB3W%JwU*;_R%;EQy|01t)%Mmzp%8twCn)aVrKe7bn(YoKqEBaGeZjDL+r0%XBj>JH zgCjH~iAxp6-x9>iq_t76-b1x&yHJiG4la8eEq$g;4$BEiB1;vW5zaS^8Y`4_6No0n zwss7XsntR12zvJ9PHlE`p)Uh814{FojyFV8I#I&=Yr&Heub9-WMxzFQ86ES=HmOwp zf!PjmF6}^*&IC7l+}uDrUf%5za`>LY@MX0DP5a!i-7(C>A53|r?Yq9z8}ZG?lP#~f zSEgSexVz#xNbXdp0>P%F$;Z2*t^5mmB;XgTl0t*tu~Ebc!09})3^jOG0^^%qViVu`F+9tX30hmpirhSw&v+aq4WD3*D1z}k?l=jE8lwYvyS_(OAxOS~Fl$Kp>HcuoC64K5VvQul$%c+G)!=nkJ8vq~Q z(+(A_zlG!}ecng?pS+JUg(g(iwPh@1whwMb>8&c2Zi?xee3cO9d|pAo^!vF34sdEP zU6y7pRpUX_!SAGMq!=A+)%}-v;-eSZFB$=^V~!)gTdl$g<8~Z}>7DxWI;B6=wrPX3 z2U|w|Rg2Qi*^FAUS5B0$G$VOyUY6j2pF8K5Qz;?vvzasxLQY)XPWJ_>MI9Im)!2%$ znY}H~-9m#7#vB-fD6gyfY3#Z{GCUrYPbs2UkP6Dxs=!%XETK!VXmFwh5QHloK87w> z^$zzr2ef{uCu81LKpNW$RkJS1_~bW|FtTH)$acRGSjQI|XeGa5Ftr42#8vf?&od^! zY`~-H_WucslI*}AGDUXojV>5rW(Q|kUV$kim>+gUvasXiODQI5E;1Mo5P0#sL*yG) z-SMy=jH8=f#CBA-raY261_}@+`_D_E(gzR{dl+6tr5)9P&%`t=h2NR2`1bP+n_Ow# zxL1zJIy@6}k7$q8e8v$sn`@t~*7Zs;C2R~5fuknSJQyf)Y&_2`(hQKc_eRn}A+m8M zDDBn0pGbhUB=?{oZiI?hvgz|rvSE=@sMTn}H&5km&^|p*1$Oo04WnPEM|L_4ZYa68 z_%{e+Dw5Vlc+uZG#hfnGnyX-Kd_E_pqssn_4%?s65o)X%=Q5Xy`R1p+5YGslc1Wt{ z0&OQFNIr8!av8gWU<=XEYWD(n$8Zu2$=FZY%^#r(XH3ycRI1uW(kJ=5gbgh=z*s6h zue43n!y^Q9Gv(exbJ@EP2-ZtwW!=dvvu+7BON_UR&914dCO&Nc5-+*9%Ut>IJs|8!BXu z_|i~MLbnykJn7y$sHag|X>JvFTJupM$=#9z4`PWcB3#8HwPYJ#@YYWAPl$%|xmBF@ zT{)_H25YqGya^sHdEA(C;6@d;ML^m#!Yk;t4k=;UInJSYi4sB*| zaDd>ntnCZ(f(3+nNoceIIBVEP&S<8nXwJKbH4J7yeb8`8NZCugsBcA>PwLthkE)O-; z$Nzl!Qq#I+)Ars9w7tYrDx%XurP`>dMgf_E1W^$@fu+pxVVz#9X2p<86(Xrk zlk%yR;S*jyI97XN%t8zxpdaz6Smg&8?lNnz$t>BP$%}>V=w8sAZNww62EJP{?oI?+(>D4!Ny1I|fNoA}EIo<_05)lfb*2 zOO7(_$5U+#qXwnmWxzFfk0ZoIy)gyB4(eHWwF%JaZSe2@S!xwZmr~M*`LLFuzV9NN z=5u&lfHnDa0|ozMXO_SP-{z8$Vs$Q}jDVwBQFEN;H;%7_cKEbBvZ^6=1<~0J7;Wb2 z2#Q)_{+GYrWlxMYnH{XM{S6fS$t6W6n4-|ACjek+Z^rxJ01|1^7?o3FA|q7QPV+)^ zFU1X=OWdUqt2)g(c}sYV4oKLTw8xXulRgm6XlAyXf*DfkvhaIh+j(TYA6vG%-(?Cf zIg(_WjM7eHHZA^|Mb_(;=VWHSa_9tJ{w9ER0&gMmPsOEi^s^ZV-+wvlbXBO9#*{th zg)6INH2ZxJk|4caW}%W~a=h9)BHr-5!(6e^6`QWnMma-y!7EpgVq(<`21tVsC=G9< zz>et*<%A}_)+ZbIjTC28|4?ZCa(8IsUUpC zmp3&sXM}uzGVgBIdiXb91L^Kd+?P`%D?Pl@ZNaL%s$pt+-pH>*RH1he6BgG>{e;d|10Z9jT#3EDV@sJEzcPs5i znC)&Zh;3_yP4Rut>8cUWLpWKSwV06oEkm!Ugi`rREJZ~nP8e%8Jnwt!i?ue;SYF=W zhy79+lCrcA!2jB|8vxA??p_S;SWozz{t!`LDMPB^;@6sMwR=pZN{&|_#w*>CsJeRg z6WcO7Mcp+blIH}Y>h9ftwo(jp3|XIP>m0M+>y9R7`4vEDX5NkpPsciDBF|l({jdep zg6GdVR-4R;yYSv&x-`2YhC=ncW||PNnEB41!j*RKhAd0Bt3ao6_@bG1^`XkO2g7K0 zEof9f+MqVCe1SImgc*-E6-uSyX``1XcWQPAyocd?sO;X*=Z|{ZMKSQOM=p@;N-wZ> zP+usL3f1%o*eWz*Wf6IzQ5sDD-B91Ois~Iwe6&>?n|{Q+L*9B7%x1tH2JjyZ|3z;} zLb;Qy3nIlnp_SqKPt?N#bk5#kEQ<|@Tf->ti3;U!HAJi4s}AGYvsd)^DSt{IAD>b{ zb#_@46UY4Z)MUo3RicUdFqdg>5c_O`eH7{qPZpN#UEaL7DUu`pn|k$H8|=32c8tXS zFhN-SE?8GemQLMs5#@=jC3$15c)9ztec#puKO53xS@5VGf=f^ z+&c!+zKZMTyD7mByv^DT6xK=QrFz~8yv>}F^6%T=p9c%KSm4cfY}SFZ_l!KTRReX| znaS}ZpJ8A`{r9mV3hfGlG1V$*#{g-VvK!cm@i_m_TEl&hi(J*k-L5?K>0cIg*lqF! zib!ayp4NX>fu9>i6u6!(Su&_&#|J6`s{690^at-vz5?h&c%z^B-0=NhFO*_7L7YhG zrQ*wm?~U7+oc3DtqZsj1Hj0=ubE&I?EpyLjBp(SZeY7I<^YS2^u+_@_uiKmZOCgf& z&F5LH%4ZQk`E!EIVictaZ&a%$F<^PvlkFL`U$@SX2!6Icdr&$(^Y8ZyZuc)p$#&wT z+0$4)OI4+cDf1L2soZKTShaZ^v*)|oYk%!x@CdGsOBuk1TKGOJX`L*zVfIleD>q4t zxPqRfZypgu4UT5tq@VWMQ@GMrAdZ?%)9_R*2!x^~9ZNM9DYsubCYb^^*aZtkMH)otwM z!MFw=&3V?AN|E8!x*7{zbWanM@p)U;)t@Pd6^svgmml|L=%AoNqmEkLmJ>Jtt=<`& zEvPm4;u-4T2p5!#o;zLmW1gQ!-*6~**_*d^+tBTM_g3z9SRlvv)vAB8-AcMKrTvGh z6SKoiGEPS{eptH{-t+f3;_`?8P|tP0A=CB7b_F}&?>lUIlNro6kmrV>W4+e5M%}eU z>vx6ep<;2lw#3gASLH?f-65?3?my1t)dp_l?*|hy+tR|1?Ief87Zsnyr8hOZZw4r$ z!lTrMqvcrEAcH;q>)Q3F{q>$RzYNsGiy3?{vN%t_UbR8Q;Yo8Uc&KkkYEC5}x7^ai zsIWkBa_z0~$9prf=aGUnJvBKeLrBeC@{mV!JzMK15d}_%2zYQ&HqMy`1m1 zqKI7@9$Yn?C#G)QsdlZTv7^qX#`DOQ{e1+Jelo3Rh>h%h>O~%0oz9S(uzNI=lkt2f zV7{mWd2f$lH+uPY4iZ0K(hLI+iaO%zP%6*P4yoAZn=$_iKYAJ9iz|kR*SD5NzyEk$ z$Z>Cw)X4~|u$96_wO-@9<>mI)$R2ja#&h2bHt=m?~z*k!(G6u}Y@1uv*`Q z4(Zga!_~Brg50@1WI|U7iOB|X>71FJZ9>fO*D(g4&+PnXWr#?z?hEv`ie;~|UBbs@ z=y}@8uwBz!Dzj|TrNFg%FSb>~j_kN>G|jI4wJ`a*05f@q;QJI>vntCh1AxwwdQJJF z9q@F-Vt)T@{fMGTu;zUiWMVdguU*|UG_h%ZrS9o!)tqdcW-er-Qk!7ipL+$iDQ2ZM#HXKu z1h-xoj*_+p=t`mgSbW-SF||IOUAIyzXhOzG)VQ&Q0KkUPB5^qAbzjzAJ+E5a;T4UH zA5HI*UM@#P#xzxG)owhU!P2i}C*BAVqe9G8jLxFzz zLW@>=CdIzlb->Ul46UIGwQ7ZR(#2sKK|C9>(r83PgW4Ns!e*Yql>nR<$KA! zw5Z5rQ7_2kFvn(YsJ^r~@X+#F5Y@6!g!5+~AChg%@M9-`8TzAJ+xdMb)x^~iiuk%; zYv^W^&}w3^p|YNB?YqK<%Ra^CwIkdlkt17^WjaXpmgvw7h+(3%g(vo;b)?p?aJQMv zVb*o61yhFt-So8Ua*$BYtF$}Z zD{J`}k;3@AAwZ>=Kj`o}C4xcMXJK4Lok!uI<&c55!Kv?wH?F(g66caDm|BpS|Gpi* zUCnIGRgE@+=-LIBaqxA?WVM(@erB;Y`H<9E?Io_eGH1K`AfJ?ZsPxvy$JtZ=E=N~2 z)fC=(aU7nkH>TIBu-X}ghIVsZfNePUiS|5O7`ozbBFsmE%$liid}1L_LdCE>6#Po&yJB~30IF1c8A>)jVRNXzbI~DNK){4+5SU2ZP;zbiNN&j_pyaj ziQI{jiKg9SX8Fv#)d&LNZkR{kX=;YkFF-bMasQA#e!cuIs(DDz%U`pJ9CrK+G@72E z(0BdCrp}YOX8PhXC(+FislHOj z2sx`*@EAQG(uA<6l^?6za}Qo=m@jwzhzwWb5~X5V9tZxQP^LA{=xVkh_}qA`y~p=T zU1GJ!q#A;xz3c14ndSB-6+;yCF(*>dqMBY{JfK2BNgdJqkLeMzThQ5XvPm(E<39b< zKVep?i(`2XX$Eee_u*kCkY|<`Jn&<>J#aR+M#Dt{%i4nqj5h(_5i~Q=hC*0LSwC^u z-!(r$PmeF!5OIj{A}+*)YDQ>CwZg+s(%oHbx@4D1?Zua!CXZI|0Rh%Bum{b~2-JpS zJ0XkZ(wb;K8CFgCugoTE#ZjJPDelgxv~LgBR&QTF+u$6whULq%2c0JCDsTL370u3V zp_4}-HP*QI6||SJy4Kvs-q!8Qtlh1{&G^x3?Wz*1Ri_MR)h_x!YzGBl6hZA4+h5*KrqIm!{ zc3apk!_V}0VIh^9MtF4rVdxMRsck70!D-IU$zj!A*Bm*O$zg;2__7}Xu+?ID@mXIq zX0}RgrmN|Ev*UM+HGO|>RCs4KzjT;>sXqeGBKB+)zq37j+4^G3c>A`qcjfc2i3frW zxYa#W4AD?<*=@FJ18JMy@)$=O5z^??7FnNK_it&eF1g%g ztF(?Ukxh>*EL@>?+&fWNuUUGuPK~LqE7Dn&z0t80ARy%%^WzfZO?wExSb25$a8nvF zg~Qk5<~Q%4)gPqMy6a7Dsvx$sS~v04w^S%i;{3WC-mmwkX}YW}PzZv4D~qjL}r zO*dY?JRfNU)4!wiEHh@->2xZqL^=d?DPyifR${Fh0B|hUU+fUI&htgAaH&;moU1ig zm;MeL-=mF+W~YrJFoWDEdB3eT(Y>eeI<(^^Z+T<@Z?7aav8ZZ>)G=BAgp1a`-ecJ| z*_d&EjjlJnyee5qc&4s0Q9hdmTw4adW4H2)-E?~025?@=jp>;GIJL4GAFtVFoR<(< z!OOk`U-bmz!Ue$#aPiL#>tOy)Xp%wrS^eXQImFR;f(ivfhld|_OpeGyB^Q)7y>BMk zryT*j?6%RC{zeYi*ZS_!hrp|ze*hR#Cr!#LROizT=zS%PmbzZ^nwV9KCKk;f#3jd1 z5SC7A_WE-~Pt z%Su2sAGuZo1A!v^%={kmOg*Z*}@DUba%jQ9&|%ams211@UHGMUP%_)8yN0J{jpsU+W_ z$gbn-4dq0k00Ix27b2(YD?M#b?8(%B%zET(P;27YTPIgc^(L*`u_h>3dH=5|Rf7pn zfI`(@cF;EH+gpt9iFvYo^`d`=xAMvIFP`r5PY>=#)LcGx=w0bAoSF#uA*_&y#0{D% zJR7BqP1fqR7Y8idxn!Y`{Lka`6l%@fsNaXJky?5Ztun{ z>)UkXh>5 zJa6ihkP?BI;O+TwSSd{K^1YtC6L(+Vj&{3WFhQw|@g@i#O>p${^}7$Z1s6$5mrvRMGExr+q3z*tE{lD#<># z6^q?;!MhcLmlIyyM&cFDb^fA-JWYN*X-RAL5iTYF+liMXmFKDEPvDGNPOXi2#$5^Q zZ(P%xi6eBox@=}l*m=e7l!{2D5B&wUO&gWEvpban#LcSZdKG2tSW;lUUBYl%aLxHb z1k4!K%kZ{}$Y6LSS@F8Vs%$j#`e*9mVoWuJ-9Oa@L9B_TXK)drD+=lP;Kt`=C%pIX z-8&hNN;k-p&34LW-stft-!U5XVq@$yZBBRlm_pB+o6W8Cl+KS4?}@xKOdl=RsSDFp zmrP@J&UgWU&)j7~Em&st0W;aE2x%n*&@ zIv;xqWd&vBkT0-Y{dl^|Cm{yGdx^4(XkBZrkl9r2M{13(cobZ3O2nu? z3({~h1ASU%FCNPq<=y_2r?oD4GV=5Ou#j7&HDc^XdyQ6^5*oqSv!oG53$6( z4B5=c(fe2_AqHR!t<4(NlPTQv!8$~ zoEHIZtAF9{+^~)JE(lMjN9t2*SwEDA!VTtgN# zG#?S{_p{FD+!w$gueRMuA1aRiy)rJhk5l1xy&*9Z9#%9LtEIeW?Q|L0%uI`~tu8;D z`%ak zwzUe#j|K=0*)8Fl+BkZ-(aU9##|@B1kOmVw9A#VQLX*$dZZ~;dI-vgBlpZitRT@e5 zr;-2#{~()M2FX{%nnr`;6rTpz zXQQ~RTn2|el-aVG-NBt&Q6%ps{?r`v#rW2Ohv^J= zgxmM6kbcInN5E~4giruk9(Dwm7c9PPx{*nLl>L3bpJ%y&S?8D+JzdKfLOrRNtRa;3<>pw( zPMObhp26szT~Tw8>FVB1u2^7nHpxY_;gb#7rr6y>{ssGYEu?Mt8z&krZxmyHhyfTk zFD&D=Byq}xW88r`@P%Ym_vm>=Z47hgD?+O^i-G0$c56o?*w+*e=e75;gdcuWgO2kB zlpS~n5o_;eMW63$pL>+qI)o`lhQL=%-HI;@NKqgsA(1d+_LP&@YyGd%NxQ)5uGR8C-RDl=VQ48o{ z2oNZl%T&xHkWXeNbwiy*bPcF(=Vqhw2Daj||3y2F_NQ15h)=V1jWHlfxSH}F z+iy~t#ep<3W+PZ*TXv{=o5o-;DeS`?bd{*o&w~`Ee>iXph6Vf6{s~!BMk|Q-1I!15 z4d_{wCRU&gsx1U97;=)=Dn$DkkalcUw^b{RI2@BRWSVHQH}Bq^ubXv-A%e~dPm89p zt@0H9bS;A)2Hq3d!k0!+8jcC*TRan0dB zPNZV62gU?AB8N}YY&vFPs@2H#Rjl2p(Zt!<&O>X=3NPu-yM-9V+tW6k6>NJ5R>|a^ z;1-e)a5%9X@|-{QP1d{UNF!vnCQT;pv|4mDHxhPIoc>C~G4rtc7(~c)X&Qt9P#DDN zOF}49bQi5&=CDO_KchE0C4tDnh_(ft!-(A=I0tS70D$ZW5sDa;}Zg zx*tXx*>CI>k*34G7CFxTnDuiy;|1m*cJ}`v-T&& z;PdHZK+|ePDKZ=UaoF4O^@7`D^ZvY^3~}>!nFust;g@H*B#qN$>j-*#G2UhxEd&Xe z+kM2$xSJT=`QFobP|(z-RpY+`jy7)r5FI)vN2$oZcpBm4KR@mTyXxlnxpt=ohZsu| zCIG#Ck@YWsO*Fe&&$C3CIC|pc9L6be9lRjQ0`E-1FnGoAyQHMjO^ZIdTjrz!olqy+Hs_{~;b4%dh)rSQvUT`B{9 zxu5;UgGcNI>FZn8(JF8)LVDHNRWx8y#pMnR&YY&<+N(el!Vp+X_c@q!08Ym0E=amhyi9 zhCq40;Mq?mqco3>pZWtj_3BNxHhXY%`x&}ScCk`H6_{EYu#%i5FC)()uVGq7>9Lce z5L=sC>RgdzoDH^2Ck?k0nVB~e{r<#dEwb*X4n30xb*z2(@=F-`-Z(tn;$A#kJp_RS zvpBW}{)jw}`3Fwn@SX#vc2K5t!Tz6phPJfz7AjhdTf86xoJ&8`GbLWy2iL3w96lC_ zX@g%zr+cbn*6zbtxN|={-aJsJLTNnQpem|xemk`~&U0qq8M}{Q%$m(;A6f!KI<`ZD zsukcDRsyaCX!_C9>AhZ)mu{I&GEwEBY~_`*Y~?0A^7N~?$fNH(|11VS`#ka&lB+U> z*x!);)+e*Ot*tq|#?kiBo%DJ&34t7cjd0aJ<>08A=q$hrMjxPTO%1zimIjRBot1&FYbc)lrL60(r61 z<~;p)XY4wHj|c)`o^E*J!TZs)PBr8WF9jFQe4xV5>Vx+n-v9)}F%~-6eLbmdr(^k= zO&I&cSnOizF(Zay@N*+kA|!-crf?cZ-xQb;$FktMfXx_d(@&yZHh%)gPSmAMSgNqvnn|zLD z3~bbf6<)-Z7%V(^5>wXh0>=|0Pk=ujZC29+uFprCZUXJ(%Z{GL_(dCVAw3C?cWaMs z_tZnN3T5C{ya1~t1|$-2O2{|JUH4UmE0d4<2=?qdjE_J09zU$xgr3zZLK+N9BeKF)EWNE@{c}UyWk>eb_H@(j=E=G_$kc*MklF_kP@<#GE zyoO90N^8t+TE7$J%*>tAIgdFdaFYc1k@0&o?mI*->?5n}>VwoME>i$5UATZJn>E3V zgGcaP_qOO7Ucx>~icL*FI_>Y-zXz*A>S7e<*3#sPi|5S2oR?n4X`P&-RN0~}t@GMyfHUfpzb)$s|!)_=Y40S-)ELV=4 z<8(p?kK?WX16!(zUMDo#_V~^@Wh9c%)ZaxeiPmL4?vL;CCs+|WdEi7 za@wganQN)HYF@L_lFpIWkQbBxAfMv7QXV9C)%GuwN09}#cOn7r<2?5i&-<6)d&8%7 zr|UY8IVEtt1gsB?%RW3ZgKR)oskqO4{L5LOTX*S#?)B^8^_rn5#*tFecJ<|ZGR)v- zuT4O+jT_N`W(7mkEEP*&`ND;m{lX}t0ZX0pCAaF^3>`UvqK)>(?;m^-Z#>)uKI|k* zW>2{^OizNMfNsyZOZaBXUTi*a5{Ib}zSOWL3Uhes6HC@&`tX75SSpU$t2W@>WvkJY zu?9wT>3{|`DkG?D3D~Ipy2p7MCoX^_q%-VGrvp4R; zxvUg44=srfRmPOhiMBiY3WMiVKMUi?>OM16(>EFLTaa50I+pPuSG2deTF$J zz(j5zBky%^J1(1&MH0o0?@9Z09m!wE)L`gL@ZG@go7N2#UK zY2P#}si`td7>*x4OaNYlA76L@QvdX2vrpVCTQ#VO!E|E^A^^G*a5)PCQl;te=fglZ zmlIPkfB!Ms9}mGsJMo%x=P-tyJVQFS1_cx_t-n!534oo7a@vS?Bt-5muf} zV0u2d=JPk&9Ah2wD2vGrB}F5yp!KXv*nmaW->5${Z3TA0~&yU;|U9{2)<3!8oV zv)idz+OPM5&>;&51gg@qa%yBcE|M1JNxmT`I&_VU+a0g})8wyd-WiHv)K*8bGczA; zyK=QtoC}6*#OHHoqv!mMs8q3{eRg1;BbPCe0C>ad)tEW{L#*At(+r%@%>`-2%7);X z=5-Om3e=mXBE@(}qukY@Cb>(aUVHK^rf%Jb1lp3CB;vs|qCb`dcpc`1;F~mZlrYXxDlXlOrz~& zvOdoQavg%NLZRN)=9?4knchiG3H&(;SRWOaMR~?JvOYS2f@}{)+dX>lARZq+0yVF=wqoTaA;kO@9a>T2sTCF;@m<&@;bgqJttB?d2EQDIrl$-YJhhVxR zon5^afegy+k(S})w0lqzWjVul#aHILbkZKI( z+afiMrBSIs=g*%HRjCd3Y1<4T`Sat=Z>QmhA7|k7+BHaIWulph0|EohQqL~3Kv?VC zr=2+qzJNe%X~Y8NY|xdAL^>_!x&*?w+U)V#6;XpRD$)gHOz%ar!qLO~ zak60-bh@vbspxbq$y+1VbUlC0Hc^3r(VH+HLj{^?E+5&7JgI@mQ^?P>R+b+;g$tZ0 zREXuPXQM1Nfu&k9x<|!=K^!t1jP(RyFqrx)LkFO8NJ+#oCWh`)qF^v;RR}{#zMBtL zti#q#+l=|65L+_7nFL9-THt4Klq*{b&1r6`U#UFS%w2+y z*;3jtVFKdVDX5|30W|&OVI{9?S?Aj8Is3JENr9@R$x7Y=S}iTkt_X9=I_Emmij+=W z6F%fxjF|QfnaOM}Tyod-bnQ#Je!p|eDSt1H_2D#sGo@weP95 zxX|YJHMuck2Q&^Vi5BF;oSyx|ou|=@okae=-loh7#09zn0Z|Hv#rm-s-lItagcUD} zZ#dlb?Io+Qb?-sOAZUmB<;o(Ewp`a@1>nw@0|GYVzLcY|Dz7S-scH7+Y`L-}QI_sg zT{<*lPy7+Ap%=vDnX@s0EP8S15Zv3Y9V#;nrQEh8kDO${N!NFfuCIMo#T3K&=6+kS z2)nj!L-P)8(X@4IGn&pC$g!2cE2>Ygfno$hnF92<3KSG~^(u}QtcNxn9Vn7bIA;sn zg+qH?7*kh6d{Yu*IHeNjSfkwIIH2Vfj&uv5H^QnzC$RB&BuxhAv7G+R)5x`a1JICD z!8fO!Tov2ookvltaB&pi01L@l`(&O`OF@zs5Nltf%H>gVA3~t{JEjc=`c7ui}R;na5VPA8e5spcr%U}Bz?O%vpqtLxL%Q9`?tFdC? zVw9^=iK8&P(xt7i$%F3iAZukHx2Kaeu!x6^^PD5eb@fN~KIe*40(X=EKO}3Un*2$3 zlFO0>#MvX&6j-JXV;nRJ3&Xq3Ya+~{b*bQaaRk(_3%`Q@oAc1ILkDAH)Q4+*yedp~ zVu7V*lpK2!?y2YS^P=^Ldh0Vh_8`s4EFej2ZbmU2;Xd|sZ$EPp2iU2$B{ItN$g5{Q ztawSpL`L8Q*W1vo>d_8OQG-tWy1LQ8SeR5wbU)o&F&!?(Ud6(Vf8di@zaWywM$z`$ zjT&1B2OD_MZOxV1uLlPq=s7g<&77;NSjEk8Jr!nYbpmR65S*kPc-Nl8SWiuO(y}#7 zWv4NZjzD7$7%5L6EKs-zH8NRjZcp1cZNl(|ZP@)Dk4NgeV$$X;yfbPPN2ZN2<{M2$ z`E0Kqn8LUWv-)&ITh4DL7cynfi(Y~Hd9GpEfl zJ!eWYv||Uf?%5MnYt=FxavHE>Ra`oLBb{f2ym2p&#Yf+Ni$T?ZZR{8*NWK4uh{MLC zB8NsSrYoirWy`x#^RZIowu z7&+`gG_O*QLzR7y!gGW-0#T9OVV^<438-5S9mNc4zBBe8Leqlz@o@K!s2>)J;2?qk zJAhmX!XC6WyHYb$N01$Wd667IAW+c_3=j}pB%5ez>@r2{r`EWI8rhe;heg8}6sAgz zYN)`GW`#KslDTuyzDEx{dT()bZGSH`wdd|*aTxmbBCK1!9F1DG z#148R^r44HA!=e1?rV-vb`mD?IrWUbw8hT;bq>NN%ta&lzj81Hve<1Vz|2w$nb{Sn z5z;cUXsY25X)kvhrp@{duHVc=?`|y_zL9oX=0$RMTF`6;vFfQ+VzpXa@ zTd(!*f;RNoEm0sp+}Pq!ybX`Mex^b;Ef40`P!754T}bx2zB=KgW5?6I@XXostVB*> z`;Pq#vOmXo%;?;Xm2038J6+WRDOR!+g7XKP{sn!jZhUV?5A4U2t=l2y>=``sN-vZO zaKlCVFFg0o3_Q-DPmhiqX@CFrJXpXhfi@)I{$Z6DlTXK=)6|et8qW1UD*+;kq5Aw6 z=xW<^s;NnqLG5KX0YFraASEAKfsP*wzPuK8(uek_yDyIJ-i@7{5h3Qv6}!z-Kt=Ll z6R>cxU=%FikGugM2=Mn}lRXMqGk!zJ^ZkrFkqVW*aB01QO>Y80Sa4ocV!>R;xW3PF zq|-;Qj7ArB%qhfS7&Xe#_ur2}Uw((@JGNz9+8PKe%4QUdNbTP0_g#eW7C$aOHFy@^LU#Ak$que_p516XXz%a@m%hP6g?nl|ko3h0UV|bUgsKH9^hj~jdYUVH4%+Yl{*iTS9v@{A{ zO+;E=nh1jF{XowOcYa&?{OllX|9+G|xwz?b(hMW!`(P(&MS8513oDJ5P3mIM1Kn_% zfr1VmI*wzU4P_#O-$(FD7WHV<47D5A=Kzu*{K5!n0e)$Ce%t^AxuqiN@FjS%wNQut zc4to8&(b|j<;$gmgspZrk?Yz+_M_RUtklk>Qv!FA06#8$GyKlXFyZgHK7g#+-Q5aY za-O4_hSaQz6;ulr54;bJN)<7~gB1dDQ^a{}ITk^%%S0GEK0<2OHuG$$ponC~OQe1H z^qw7v*t!rUYByov?KarTQA^eL97L;nRqci8+6QjVlRJ|@AfA9QcFB6I-hTuePo6~4 z(xuID{hc^`7*jv}1g}o~0yVfjV#okAr4xU!zn?KZsPGFc?V2i^<^-&CUhiN;BW-RWJ>6T>*dubkan# z2+X>761^>wI8DwKYQ+&}&LQ&58SLJ$2^-e$F^;9`778IZZybg?MGB)}K0mDDXuRcH zj$_&RbH?8I+GCI6)z2s5^R8_$kkk64uwptL8;`A&`}VUJS=k*xSq^9^Q?DWN7b`*Y ziX9sSS*mo-5V%il+X3%DPqgUR2$`%bPk%lQ!#mz&(MvFr1zR zYZ*ebNxSy;+L#GrS+V}|<8%ys;z@JbtM_$8r@A#zn!(aJ{icyIFKWL&uO}0YkC$N(>A|eWBqt4;b?!DMDXSoTmq01B^x2o*PPF%Vi2KnF=)%al? z6k@eBtd<^=SsHH~*XvPMax1&3gYB9GnaW8{3H&Pw@FTM9yYjPIN3KJb{mR1v-fbH< zpv66n&?3An-fYtVm*P3q&jv*ib3^O0#jtzL8a(jZI@IHA2<&!XA!C8Lfi(NU)o9vn zw=TiXO*`<+xc|rAR{&^PW$!=JO-v923`4`vC>;U{Dguh#f!(WXtnIpXt-5yW8fz;m zDo996cTU3eFx~xsp8J0Hn{S2z2G@<>f4MX7_tvfV+Y#k`>-TM8aIElB8p7<*5^Ycqo4bAu)Rh*@$&v?==C4ViQH4!;Qk6{kaA?{O#q> z(4jg9ow{_yU*@gBl6mtme##U`OYruy&Y*VQaxA{`97Iwz`kgh~@M}sI&Z1aZ!PmF$G# z3JiJSJ=}TOrFek$9)h@P5iAu~D+q<1G}!ar^UvXNdJ^f!`&+KOh!bBNB0_0XgZV0N zLB*CKDG{v}_K^MX!zxOq{J7BY@28UQd-9m!B%Q?-R-xt3}x-C26$-^uR6&wEJIFTIiGWB;Sm}j=<)FAmmhq9Gw5OVvSEV}>Rpd)cHWDQiNmnY?J4Y7kLP~fhHF3i9GyCKrr~i% zwv&0dg9e0z`KwAZ;8T@`Od`^ghAlFAK(+DE|11G`dauyQt_>}EZ#d;Sg7 zb?%0Dmn}yW`J}c^8W!QlM zNJSI7vR=%a)HEw)5R<8&D=&NUL=28JMLo82qiE~)-86pl94!*jZUn#Kgc&nTlI`VE`}D}RCh-8L>xk_udm&KJLfII>ZMCCoc`JDB&@vkZTc34 z8crgsIPt#y)RTB;{?C}iXII>KrC}|E6M1UYqL`wD1tTRhVr`T$PUQEjB+vcFh08HN zJ4e~!WLmbHMCw^=qt4{Nw;~b`;|eB#a2L!4dmpk~yx6A9TwTDx-D>#ho0|*zCVi{E z)Q?C!DRCbS?kD2Czt6!xR;|Vhk35WPZ@z~Ld#gHgg=(cJu9_3X60dyUbC6dF3epid{9NK=hrkq^q=n zRHQgC)Em=}rTM9pY_Gt|wOcWQHeb&gHIRnW3k}6ZEi@euCB zhX%^kJ9)u0pBPN3gR$iQ4ySdq3#T53_;haAB=5skQo~PC4!D@<`_lDIpI+U_uZ%`S zNQgniZQGI}rU98Y?ZXj9v8O=tD1#{Crj0x)jM#d*KFOR|O+)eNoe;r(ZeBh4@JpCT z=?ogj#P^?nZji3NM{#oVa*g4hHi5xtO(gU9DdXv~J=9Db732{ox9^K-J$stG0|GSk zIbYURqOQ7}$f%qwj~bNoL(7YE$buk+x;qKgKmLL%?!6YpUJm^E%Bk#H9F>`TYi_#}KmR)j=r2Jq-Z)=^&? zUyq`~N(aI_ifs{0MBJKQMOt?x+$miXM3@j@lzMPXUz)<3{HU(NnW}qDWi`J3W*&0) z7Za|PxaYd-aV>2!>s#!^?vdPf&7N$RXr?z(4ScUvp*slrN5fGF{1ynrGLbr&mQY^W z+>nO|@n;h^t~a7s_r-O^d2}$TNYW2O%d7%A%@Td_m2&Ns&<)J!!JZ()t^73b4gE zgGmEMp>u33E~9;g6R4%NY4vI>;vwXw>%##`N1II3j%g!#jvz%G-J4YD;NBQO zx!6$VW0&Q&T%Cnb1Bd~$$t%w#((ORI5<%o^mKT+o4YdJ8UY;R6DaqW%5xHMQZ@m$O zl|;z|<21s$s=L&K%dlhJFDNBDp`<8}D#Ud1$#|_e$;ke#9DW9KvXJOkVqyy7;(Bpo zj_ty4@FE4MI@zFh1~L8ZXhF)UJ)IvnCIv7wp4W-n28%!Wg#IHS5=7 z`Qkn!rq4~qHd(@!zmRb_2B-LSSTWo zwutP3@3*HA%aKh)R!a~Sa3Tr!c4{*r;+T%;IjAQF#PvjU$96`#!cGjfVAWB5v4(wi z+x9)^OAplTs5x0tRga2bZw#i?h)jWM;xQYjHeutl#C1CHNavxh-zXeKyp6zd^yW|y zV28AbR?*a{6tuLW(2)~_boO&|O`$JEuH-s=Q&ukaa)T$G$2J-!ug&=p&nG4rL~Rjv zH7T;R^=nYFJ03w)Lhe3s8u|{Qk1t~4{j|3bJNZ~gC({mmB5J2HG?-0FdVSV@d_ey0 z>l9<^(7Cfg&Kl?k3-+Bw5?#7=r)kITm_h`6>wWhlirp#(p{{$ODS#l~KG?@4qd@)g@fh}JWh-o;M6c+o0!BpXB2el=l9 z85T$VnvsQcg``bec_wzQMNw!UMB#uaM-i_Bn4`Zfga8MQ5Uqkmyt?LNr*GwDXCrOJ zDr!{t&|U(mHQH3zTU1W!D-eUpCoEX|D^7gzRg4`s&ROblP%EVdqL#zeTzZTDx_!+8 zq$cjhnh#bYannvbO~Lt-`*g(++8t<3ex2fp&nsSR1(D9uguSYVk!97nIcitWn+C%> z<=|2I;(1D?v{P)XIfjiIZK6xesl2q*EP1v4Q%og`GK#^K6crgZK{*j%1vdkQ%$trd zO-SM=TA5X6q0S{cDW=6g{%8yYO`-ROq4bJe#hrCgQ39plxqR7L44^xs5b`PaChS8Y z#m+*=vdJYP)t0%1xhkm%q>%Md2&WMssR+lDEu(N8?Nqc{@Bs$i{TwdfhND&{N~^kx zVyz@%*XB`Sem>$iEkkPZPW=4!0&Jc$53l#{&5g4%jHYBtCoa*2Xru{e!`9p)W7YkO z%dc>G4RN&h?E8bA&BQ0TGcWPoRi{WQkm%@7Wfuj>N0*nD7{n+gmAHaT?k`Rx}Zu$dGr)}m-DOLtAZK^uP_9gebgM9Q7 z$|jE(J_w~`a}*?{qJl~y`#Is&Z(V~P=NxJ(b?U%hHHI|M6`bXjb8UW)B3=(*9KAd& z1k{f$aVrTM1$%F9G8o zOqt!DefyevSc=x%9Og$z$c=zaU04etCLRh}R)+DgA(F;>evdGOb{kf`{Suyd@Ht$2 z!)097lkLDAdT+sm1lX#zo8iwP8x!3QsT;PVdi+Sr*GA&mufI3cT#!edlRZj zf2#b7KRSi#yW{~e&(=ubr?l?f+br>G$;%adgmi_lLb!Yv+zq=VnwyAT$6)=a-PljW zd)AOXRLxGt(~CCZ@=<*;y_`Z>n7q&BiK(W{5#z@lIY?Z6LJ5TwO)v0(yO41( zHOeGLXG1jm&hHWSB_!gZKmV01lmLt#F$isFH&`qo?gb+uISna$k|?hojeRuYzCS7o zVQi2ueqLz!1~W*dD;$E_)oZaB5_G}PFjG1A#ww5N^62R(1P%)UgNfMTv@R15OQ8|s z73HU)IF7r@VdPIrG)R=GM0M&&8P-Pka3NSCG$9xccw#u^;||*Fsi6@aIi!@y#j#Vy zVis+-j^*aacCsyATDAdi|GE_qo;V6aI!0kLUHZj?l#*f-YhI%DpInv;XY)x7s zmqs6Vq!~67UzDly-E>K`Wee4bfvl}Naosg{5}+ZNdhUrhW8!E;5)tH6(Ij_!JfgUq z51{B+75SBW=6{d-@4g%7TzV-wL`OS=pYRh!p0;o?+OkXvXPJ5%i)8$UGE%d7T!FMF zuTw!2pb&0jsPhH20;{N1=pXEZ0*U~}_^>{`Vlg46Bc)Ob@iAFG6JGcde>-bDrg0+7 zD6hiP%hy0^&(ioZdH)8uE1jWwO%BeFklODVl%M2w%mwO+Frk%kP z#^C}=%?#xC7Lz5DMjIP>y>}u+s?$_)&&l0O!$Zg6zQ-O#93@BGp-JE6b5XbId~z=& zN2CYH@G7l({?Tw00!In~b*!-F5~jwDYS8MzGa{QO$T-*LG}6^vwy&)?4i% zn>XU=hH@iNAwOq7{nO=8S)+_t!i#dd1zf@x5?j=uEH4QWQLV9R>3UT6?T*cadmHB0 zg>uSwP9BMqIRQRN|8==!A$id%oWiI{lO2A&t1jl@Klx)<8~4>`WSSQCH_ z5!~Glk3>I;V#QEqb9lGTxQ_1K+ZANs-m9)c*PcDySqLeqPPSg|7yqFPPoDEfuf2*3 zuDKTHjTlB7!QCl76@;{1`8YN<62WJT!v~wva2ol&y?S=X+aJ$CKtV2g9e+HIKmBx! zpFG(hU5TKHJz|59x+Ghph{$ClSMtZ&{E>|eiA@!^>4Uj*=3wdfbh}RJjXgj7Xjnp7 zHUrmLs8lT|?sgy>VO{bT#CC|L_+5L%l5Nz7Vp+3>#^IU88_+J;7ZVt7IQjJoLU3lY zlvMA33*0TzRsGg3UxuqFV_eSbPwqO6cA~?Omr;TqRCtM)J_Wb`eKmH|xOxOx2^*3# z@yH+Ug%q(bqn(de#*W2^@#D>8r@WeBWr=pM^WB~N{(UU_pk64BB3?o9HwFUM0$OK# z94t3O{OKhd#XSo$4?BA5_*`-RF9fmOEaElwwF9k)>Y)l8_YB(k=4KE#Z+@ZQt)FPd zaVHfIKA-!e;oE6-8%G=Up7+PHV6?A;LeEP`9Bft#+{Jk+b=wat8Tpw zXHwqv+OuLfnFJzf>wZ*Zmm{;X4)a#;!Ai;_zxVl9$XKz#kWQ!3koNgkUWs;82(iJk zE9?ZL32q->b|Cvk$VT$DG90vyPCyruPq{9u7<(Un82cGVaF6Ia0K?iu!#9Wq&bi^_ z6%a@*Lh^mND)FKb@v>5G7?R!KTScQj6$RMz>jq>noz@mpNjMK~*M%#VVg!@tDpyT1 zC?gRu8%*h6;9d_{2J`JlUSM-i{>DMxsAAxw)b ze>1{l#3FF)}Z%{TrT zXVMhmHM648K0FY~+j3F8zXI8Het6;2`Q*`h++HLq|)dn*|M#j+J-H0jZ6xkUFjiNiGU@oT#b86#p-ZF4k zUiFm45?-m7Y795#0%)}?j0}mO!IkJrxYw2u_W9{3ElQ)zZaON-U%zDHM8bO%#>_m; zP`|x7`L!lHiDV%&ae{^YzZ>o1yzr5J^YMTT!aXH615 z+8U2npL_yY8JUI!_VwGJ&~?)>uG$MePTx65{e@tKOGS0uuSAT`{p5zRt!JL$DB|^m zMhmV9Q1wp%)y!tRR~H?~L)+=5yYei=Br#5r0J_$peGidhLbTcz6~#zBE3?x6iRbT? zry7jcZe+~s|69RD4am66hun;LY5p>Fq;tgHgT|patTVDh!{Og5f}0!x9N-QwQdX}> z(i%+OfJhvm8Dt-DBusiata+Q!0%h`79}96n_Q2r0=%&%6|D{OW7`oriY* zW}bf@rf_*abi_zwbJeN~3!p+^3U;C}H^1|bC|t1viVE`a?7es6&o8}-3#s0E<(Xa4 zzHJZ{L-wOOuL66k{qWky^UWsDs~grKj`GXrU2&z6rm$6Fzc7~vudf5SsNewnoREgO zWaaFnTyu}U$DpEh7vvJz21L-Kh94&<24Vbga){)zzR-?|ttf&|aW;AsreWWT#dv(hD&v`R8nyOLy7C&M71*^~JO+?B}}e}5lmUVI5g`NZIsKlaDaev!zeRkxxfiRAfW?&eH`biexG z?>Oy(3mxeS0!SN>3Ks4MLAWLkf1J+bh>%sf9$IkPIE~IuYc+aw^yW|yU_)3VF(N~5 z<8cm;hBe@!5+C~AdcOVx8+a+p5PMVMV2!7CvyE*c9^$8^@eYxN`<0A+$mmB0K3T(q zWRZ~$xLcnk*AnU_CifTC6X$A z@#H_|qZf;Nb?a7RzEbtE1Ksnwxf)rX>Moi#lri<0as7~-xCar@eNaag;hdld@-!)* zOaQfQLnH3~!3KQ43GS+Z!0$quGNi6cA5z8sRVDB*qb6cOD#~`Q#ezIBHGu0+KMg0J zd!A9QkjY6qap=1&^=`k_B={$PEYg)th!0e??E&r!^{B^;2kJ-Gpq#BU{U3G}DNH(pWj@*gHz`5>hP<1g1s0 zQ<>&H9zsA00m|aV{I|6JGx=f%)084ws*2 z{SFpa{YBKc@LaQ<%CV11RxOcX)aY4A3~rBFR>6ln1AeSw>MM4+Uk9Jf9ED8S>Q z@nbOU;!81X^cZxbtynptRQUZ6uuKig_nEH|Te|1GGiVrm9d4tE#dD^1Gzt(&n{sG_ zyb8NYz3|7+=i{tF1Mu{hUm>PPPbw5h0imHOS~&0xLeuPDO9a@t{{-x7)g8s&ev|_i z+-tO;plw~jJ(P+LZ4BJS7dD8yMU-a)&>gswDy(y)QOIvYSwL%)2DCvy+isLf3BcvP zDQMF%2G4v!I~ZSnZ8q!9fA}GcIsOFnj_c1lIBt{#dm&39#~aJ zgmeWWPh4Q~_xw4Ecs-$Uh-(7V$m8)mh0%WGA>e9*RMe>u=RXTe>&s%v& z#IQNGgUXi3idYr8ZZ7D#c+g6J$WG8);)6W1b_$~}RZUoNRaT->x+Jpz*`~ZJny22P4KmU)naL}D(+`mY5Rl(GNRIsJY%)^%c zmmyJ-c!HjH&pN8;TE{OZPz&$R6Q*mArXBYD=rIv$!6&fB6fpq#v*cUwM_r z$72ivI#SA8^;JVlGfol-^`!dqgQ8GOZLFZ$DzjlGxEsoplR>G`1goIZL@NV(!QGj3 zS~~GH0K-teuCC=n142~G;KbsExvLX#)@6OkE;%3PoI4GxY2@gWCtk#j_udcixb=eb zF!|J3IEJEG9cbgiezWQ!g!fyJo<+LSCVcv?JMib1U&FgyI!nos9Db3W1^k4`3}tNjr*;!ixZfh=-Nl%>j3u!ZQpgvVhK zFY6@)EreS+@0QO5zqYnBV2vLSX_8w zJ4Ce&F&mgQd6h`3_QtCpFEqNmKmGbE2GO>JQxuvOg@+K8TbxRb*OI~Mkc?JtJ!A-*d)dhJdD=DIdtYCV~rTto3pA!F8!NCQ#e|zM@7uuB% zsK}?0bs};<>TEH;kop?rXCz@n?=EOXI|p(xBRSdtvK9K!B~DKoKc6yr6rCU@;D@=3 z@ch?5;yqe66R+no+F6}?>ZueQ!&l(0C!aQ2d=_z-%zopK#JSq_?1h5(J(L`&F>vtJ zM08NN`xv83R5yj8{f%`S6i zTJw@$wMvTqWzZNH-aTd)*9!nM! z<<(r_P*kvIyJXX-(fwClj@Q2Z0XI(@g9}cKra4b~&)^1Td2Ts*={|VdFTQ{9!3T|42pd@devF4U z)g;m2p0tEKDrhWPzQZcYN{#xa+?$uskc!MCmhoA323?9IB^r&PwB#i0pz?qkyp6WJ=q_)R2$bgMDtT=VRH&*#FVZfvT!f`g3YEO_%x zvwpvZK;d)YSd~6f}j7lltrF&DX>PD*B zhm>{D#?Md}+7UG&L;!r-x~2r{7kz+BPCgOG(ABqht6;N>FMx@%RMId?NA!&CjIpDK z;fxc;qaWS)q;5~bOFzuTi*yYXK&`GeZQCL|f-C}3iMD?BJ5>t>Po%rx+c`L8%2YEG zJ$Ci+m@>K}ib-i@5$P6{*I;3K75+MJ86LX%Cfxb>6Nb;}ncm6q$bQ}R;YY1rw+_4M z*fXS4ckK6(9j!wL2kvA65$@&u-imU%Qzs&=Cj~2OU4pse9mJvJm7CKi9O^bGX%+m- zb5R-I)jT&w?{}@n_;4?rbowa>qQ5=GSDOg#QXdbX5t=U1?Q!g|0hl&(3P$wn3BQUe zy!hoDy!GCDNc(jaf~h4JPMZ`#l*d*b)!wR;Csk_lW!{<$lXmaMZKqAcdrMa0j+tX| z(aCh(P6A->R*uK~3aqER+}&R<#)SU;@VBMQ%m${Ydy})%q4a4-5`=VxJ2h5c=5ZnG zsqrEfgGI=EeDH9%wmEpp|D88xsPl6pc?jt$m}hEyb&NebcHrfQ9>mIjd}@|4Y7q(a zi4l6JaQ#_l;JJ@J=JJnJhAS^u{NJ?hIaFCKdFOQUhM1oBp4SaA4!jDdL6I)>j$5`j z3w0eU;Z4F-1Ft1PWjNFa?Vi%*jj{MeWb4Pl>Q8-nU*7;;Qw)s=R|Bqw2;Dz@2N^W9 z?a+#{3%Vx-T%Dg`qy+l+=}Cov8q^e0Ac`&KMJ+Dl4~?t{W9E;Pb^zVP45KEN?6Xck zX@YUVzToE-xcjN6&2jIw*Wk=cFGbwoA#RyqArX~t_<|hZ3g3P9DQ28~4u-Ic+t9F?fZexA2@Ry z&YaSP?3Oz0+mVNIZmRyezaCHgxC|GL9)Tx6`P{f-|E+8tQ@FDtbsC-KL0A{#`-k;$ zi$?M=U4#1nPCm_Kh5jbzi+Id|ke5wAK_OTzCRuO($FPO}4Q9XMk17{}m}=#T0_@msK-=dT*dIMaUiwxn5II zPcbQuOaxSyAYs)CoHlthI?>%tSwbpmIY2egg?PP*5afQxhX~h)jZsIWUQLAqUq0*6 zD;C{pRc*q!V{!h4r()^SwYZlXUk~%R|N84O>%t2$VAwDtUuy=wkO{-5@4ZW-HJ4#* zRA1aQyBoElB4`_zb|+FwkVE7%KcyTWu3CpTo_@+Wd^Da5h`jBllF8(^{rX|zdn@NAl!_4V-j@br!f3R;ikqMzyH?P}{a~(Y zB2ByakhN5pv<-u}!#AK$4^*YnI6A%f$|#NC?!$EQfhK35J~Ufd$7|w8gOsCa;Qe^o z#T`yh#-~l6L<92M@xdd{+sC{6}W!pblgW?di##izi9*06zcp~N|!V=?&kP^g>X{0 zY!Q3GBcH>rMm*#GuU~7EO7eONrZ?MV;!v9R(+n096@YV3>gl|;146UCdBP;xgkYx? z&#R;;7I%OA5w7CS`oLq3HM2LPnb}^=kU-=FX3*weK~jbWk#z>vZ8-~^%cyf_uNUHpe&;d+4LIx)w)!Cn~;LHo_z)v-*6*&*F^N9T2C*a8G|1=YYJVYQLGG1OF*l4Vkk zw$xw?-oFaPJx)at%hG0MV0j_-uKON0oH+y0+*GTQh#wET?ydl5K$ySDr3DD(Ekw+3 z_on4K;~11Mui_-=O?B}gn#3GDxF7m)^)YqoSZvz52k$-oBBq~xF3ysQ^@9%>gX#X9 zJeMw5fb&i`!3@Zku9=ES$3&nww+h+23JhQQm&`i+ZNUoMaOw>F`Hi7ZAu<0}W{NmT39-(<@lA&9~f9z@8jUp2p>n$)i!kw8#*D-5LffdS{uz-cEQ zhh;0*;^T)N!L*lO!d;I&W+?8PAAAVYdk?_%7xqB6jucxWKesx!5~*C~e!VH10{bbL z!wGfqF&?NL101ng|KMoCK)!(9Q7$)}uxzubB&;)V^y-4|YDHai}_@kERp z9D&@7a$|R)lzA^mF2Nhi*W-qnr?`=>H7aB$mZtSO)V)VL>4SuHCDB8X7+k=2)y$J8 zv;VNXydw&%_c?`LSjx!Rse@eFqPTeHn~lsou}ue-CF}aw*=wZ5jr2Y=!t0 z)MXFhLQiO$Iq2*|3Z&D(NQAZ})l*f655M1o$G?>V*#Dg%DP4VhQwmo167`TMMh8li zI;fJ`TLcps4t&};2o-A6s-7yWjRFUDJ*nEjc41u7b09Kf58Ps+D(~w3*{I7;hhI!z z4pvGtP!rC#stm={n(E7)@g6bJ+_mS1j8Z614){>!7tE!uLF6V=zBLi_h`H)t6BOA1 zt+@FnLo@X}{8|$U)9$b8vtxS#zWLxQ?5AVNO-y#qp#He}yxz1h7K|jx=yIvMj|k=4 zEmSN8!@D?xXAXFymoEVswGDi)(qR%)%fadDPXiT&x6*krIDJ>6R0i7I#a?!+ekLOlN*sUpJ?K16j&Im!T8R` zlYOT;C}t&FvsF<#TJM{Kysop58$z$c?AQ^tr6^wf0WQDjOq_MxSd+|B%PxAnarsRu zW00XKScP+CLROu_7NUPUTB7r(ZQ=?}noC!2q@&5Z4QpX2ao?@C4#CI)QAkfDTW4P> zO4)96x94CHSrvaIb$m4?CIYFh{5uD`v0lb}`w`*pW&MR8G}2ofnt-6i>HcRT5~xO?v_JoR`E#IuJXa~EBd>@R~~pf&}x5kRpi?reupm^->J zHHT>GzO<|sS$XA#&&B*2Mwz*3^23&L4@UOSB5FR>a`}9)1{7YQ308_y33D~ci?{43 z=Aq8z71eb8Qtb?Qz4MIjzEM2-`=aHl21-;B6+y#GYyk(s6&1>BtzPjR2d1kiMRo!o zlQ048d%Mcg(Dn$fbQ+MQ#g)dEtjXJj5ER=>yLOWKts?4$R4rB>77r4LnP%jk_E zhZe>i21P@@YP3Wis@iW2LzpnOPb|{nYe;#8;fizO5E(`bW1DkO&-P5nFUF_8#3O^A zIQ%G*T|zI(slTbSp9$Bjy-Q}Km~=S7h+tq{zuE517wCo)FLOd;I4>WGSQ+M8_^RGEXS{rfO%)EKnp zge8-pt?AiUt01$n=45X&QY}pT@gjibYm(6i*#77)jgyI1Prh{&Q{yTtxKkANk*)F3 z3!jp8)CxD<+80B5^Vx20qGeVfk0OKLZ%ju5C&nHFd*PbLo<)Z)U1&&&78x{wvM#cI z*P7B2&cx;dj8kCaU%t|K)r8iXk}YBMg3%;cUhnCw_g5B2G>LP_TfyOII0OU;RI4V; zUJS7sbao?NAqRCDZ7w}<(~WqA8!?|gekQ_6y(F#A;;uQVUiCBv^|2aKx*g*p(WXNX z`x`q0m(HdqI#F8uYx6uXfu%8A*}BU1jAtIPQ`yi=6z3IRG|7$6n|2wc*0YA^l|+D= zZ1la2g2~T;gNi3%%g{6u%k=m-xj*;3Z=9d2Y-XVeP-@3EG$)&>0asS8Nk(F72bQa} zdMaDC6%@9&AD~h~g8h-OuMj`Z+eua6Y>XV$1y`Oogns2{XPA_3J$H)Y^NaBD_X*@f z*Wldq`_VQ+pqU)~0!VdtcGq*0x%olw0%SdMQz5`va`U}6d1cytZ#K{^i}J8FXf_rf zS_;A$P44KfTcLU(MDBKziqIG|gRa>Eg?C8Xiu^fj-~d+LT){tp8z}bXU}r`+SR6NQ zU|Ne@n$O1O;7oo8=GaiD=F=bksdDfqg_lNqw)2wqVQ$J^+&X0dE<9}jWr*vMv>}IT z%JtYz^N63$-Gk!#BHVk+B#a*1*?gaAGXrJMvRw7%ht`sclzeN$ciM0Aj4$vqp5c1w zIorhpNcJmzxY`w~MDY|~lb?dQc27#m-==6>mg1FjRi=r@$-eP>lbJ~?&kU?P_cBa8 z)Argc81Al@tzRXbZYp2TQk{GnennZvgrj&~xpEIi4~RtTcEQM|H6*h2oS+U!#9G5;eNH@=>_WT}-x04(@6?NV z*=%qR9M{`MHPACYVETh1Zj+4ps~l3B9+0NID;16S(MPi=>L<^Ob=%V z%4MTS6SQknDA<7$(XKr%eDo3AaQ8hpC+}FgrwKw%QlV?$xl*xLiHh_IaE%Vm?9$3n zTN>O->Z$qQir@T8*HcnChhku=w`nwz-BDf6&l2XhL7h?cu***^-J4nVn&_&E^5XJY z^2w^Z@-;P7o+^W9Gw5_}Kgo0dLFxIyhCQHb1#{-LSCMn?LIL*Vdgwlx^4xbVyRcO` zRBR1CeT~>a@mWRq^g9ao(_Q2f=a0lG6MK?}T!D;j`DT(^o05Z17sR7|L%|%w`jOL@+z*UFlqP6%Lvr&ff4d>6*V{Ta;KKOn!K3GRr?8o-NRTm63 z&QlW!TUt#)3O6u6S+tw%DsLJ#4?#O}b2_$bhq#{6%*US|G#u?ats+&=Mle>D`F8+( z_{74vi2V9DS0>=$+iyp!)@@Wd&tL(#-SzOmtUO~kbA1%?HUkvTM?@Y{1(cB-t%bhj zaS9K?O$eE->I5zE)al1lwh%=tp74W;9f#oXCbYf9ytTT;}SLUZMzVksu`_5n$m04HJY{&$3eUg_CJx=)1khU+? zK#8nPedv9{TbgL%e=1m{t^8O=A%26Hl!>0%QK5luf<3!#TW~sD3KgLhjHsSnnT7a_ zyu&0S-IuNyhsh&jQIJjbZ_0{l)v!E16Q3-JNB=J2m^mUAF%h9CDd570nF!%(FUqNN zbEWDfpS!>15S3Td!-jg4tpKbzX&ntby&pC%)z?qvq1Six*O>97&uWG#%>AiNIv%BoU-6 z;@#;yGQHe&*|O77#M?4B*;wE6LR@=ou!**`-OfFLdePSCB~zy2+jr+-;;;)47D>VU z!U`f@#`XwmJq>X2vUX(@pi6iN!UAdNMf^37c)$753=B>Nenf6WxOMcURh64WaVHY( z#Edb_2t~76&@0u^qn_rT?*-@<27ydmAGOO? zdT;o2aUu#>?}x6Ri1EWZBWrIF3X(Wsk;?mJM>@V)nS`O;qi`ybZim)EQ6wJg)9-5agm*M!`@rMC~b2TW4lJQ)Q1I_XoM2>E8J+J*cbm zK^o!rKCc3)wR;yr}q4Tx{zef>o3xLEZ)}nkW8{lW&U&Nykux+Er z&Blo=V5P775dPb;eIW58rro{i5lcj{2sIFAS}JPYEr+_ z>?FItHD%l+&ijcgCasDXk0)RDC;xV%;MZ;tB^|;X%2$)70gS75>fo9%+1G2zDIHOm zX-tA9(J|*&YiT-C9)8b07Bi3Ujf%o5q|w-oFIS8k_T}O8MX6{NScl&2yb&H$N44+{ zXxF+G2^O)fOp`YT!665eXQybmuH~eVTR@2<<`hLrTEPSHDxxL2`ty1YkA}p&X-W7u zjJTFmST5Y2Dfu=$AR$4>d>Wf^Tb;v%eSSD`M+<4D~ZlIWJ-tp{1PqsSTp+hJ4rMX~+ zHPtlXNO7!c+KMfwt=>w?<5m?Hpg1iVrL_K5O;5w6i5n=pxr&r@0X5c~Ec_#T(wAE& ze!o2X#R>ciyGO989e%2X2k?{mf@YtOT-mEtr7h)w6p zg6skxm*?l|3bskw_o(H@mN)0U;Mkc6A9^2xBif@iooNP=HRVTjUQOnHG;Z%BHWJf2 zdmtY9s!X5Jb0i6iKLl!m{i)+V4BEv--1G))jQVx?}kkoKH=;wv_vACUA-| z;iyzYGr;-1A8D3T_=(4E9=&-q*K^S&wj2KZhnw)gb1z`#32|gM1fh)P>7+tzkQ@`5 zpq6InL^i|now?YNUclMh&#)ha2y9oiL&FwkpPR(4SVFi$Iu*I`D5ZJB9%o;MW3GM@ z9R>_V5PfZV2C)PzlGUIv9o8U7&sc}Qw)Hw}On#^NROiRJW+J;vFzHo34_^uO^Y9N2 zK}cl#1G1K3oJyM86pN#h_8AJ(Xq$mlb9Ulxq;6b+D*9V1-Mtx=Y$5+RN+qSy@ST28Wxn~r5c5Xx0>p65tNq6Vudw;to1u6TA zjW|=cs8%T9)^|=3J({uJ3btJ=Tn=`xxJ~0U_e^mPmq&B&Z92~~nm$u`N^9dd_s$c; zpLO+%YN}$FBy2Q7v~7osMXxE3p!2{XXcN#n?V=X~?CCbc0F7B{Ji;rEq>^ zm@nS`ZVQThs!_y_WXWlF=1nftxr9DtDlw=_G_sPC3^{%w%f05qcQI__2wFpHpf1gF z(eyB;)Sz zAYE<7phN$mCVCY`^vWyBjVNAk8lDO}oKs^w;Cgn*Q}Yl!WRm~gm+V+Zx`Ahn_O(37@iRbo!%?%ZWOLDDmKECr_6GJ7bV|gej;GitZ%tGp}T_{S;fKPfhlHzw`L*5=_C05c2 zV_RG|`xvqh0*&i+uku>t(%8$lyGyYlCl?a~gV84}$_#4dZg$K);M!25|3g=@i$MDo zULirK&8FJ*hUJ{}yCdRH=b`^eQ_*M8K%+M6`9+@Xr{_)kmuK9j*LHje`MyMNA2;3q zAg1;nf~(GmCR-;GX^BNBP9_V<-y18^tMTNIOK~~-^qnvKjb1^VjV_yKZgw`dty_=X zKg~z}^B=N%!gmO(TvV5O6Dl}s_LfuZ53 zBt>vQcVRjqTklZX0Lacq{n{LKn)5KCr=Ms#yUNuv(6dvjliCh%J5LpnY(`p|aX+3w z?-_fxZ%0u`#JabCaInD7e~pj=0~UKS%vf<{;mH6`;r-#$p6;M1qGl(NgVj*qOR@mv6hGUP8joS8o$B6LYtn>H=L2@J9we)O zTR4h%j~s4VVkk(A z3xf<=(I9m1s4ygy6ktHBaKmnB(%?q?#+wMbmOJl05n<@Q@lCWDJb*=WoO6&^#A+V! zM;%iGL(jFuOHNF{hF@1>?XqQ9{n~3-=!)2ZXMIGMPSH4#NKxf#!=#Y}f-A0bVmIZ0 zRGBqKj@R{62#!Y$d3P22bFlCqpBNWN>SRLZ?EYp`#EDJB%!>g7`y-SZe*rA7OYh$3 zF=!C2TLmDL%f%jaI2o)Ra@IeBVm&e$6R%!yGGf+k5;iU;AP_SWeBF9=>V_FkKrkJ|MK z2!He%bU14U!g`WIbOp65JI(=c7JLmXUC7wR*EuhTtTeW}*gE?X5|Aee6cP_w-RZNH z)}QT~w0#@yeDpC~aO@CVd3JY-f(0Rd1NpQx7F5a&%=vpt@cxQ*c##^Q*WLLi_X^4M zVU^Fk?5fEU7>1Z3eAUKnIUZW~>x0~>6Os1R8>sqjEBvV9?W|5L&Qy&X2i4J+E4cz{ z>;1y(GlmbR2WB$<8EkpxhPSRWcO2EMN6uzYx}%6!P&k6Xjz?IVrA0c=LcSOC5C!Z; zyy_6@Xdz*(aQv*(@Y*-uAdyR`{uCR_r4YVnU>`Q(V*wPI=}TAGTlVDQ#P(f{rjD?= zZDkE!EemV+WWuLQ1bV;mI6`7$90cy_bjp`?GKcD*LmL@W zKxV|nQRIg_#HcDxx^L1`o^W~fhoPjw}EuUocZAGVpnx%8)bMqEU3BY22%7Pq<`HxCD%Ity)y ze8Y&4`yX=*si3x`n1j$cHrAN^6JOm9L?&qzJYf>Qa|doaek{I8&&SiVPr}J`y24bTDcOt z7cDlMQaMbN!9)GmGsw82!~m`se+(`^y*om<-w{vtnlB?3QS|Q%N|dDK*I)wUWOGHf z@W&qz)uAIganno_uPtABHmV!@xp^jdGI+C3N6{{D>jC|+?}N{f`-kV@GnRInL&+v0 z`>0tM^IIi{y`QE}6}oogM8t2_IQA%40I@hzLrLK`wb*Y;<%VX z72C5X!YJw_j~M|(&Ngh@umRT(8)hh)m#;e+lj$xdpYpj`w4EG49(;UZE#CTUG4?XS zH&lQbcfu5t)NBZ`&Z?J1oHkF-bDdyXSd8-2G*o2or&!-ER3z?2^^Rmz|D0r^>g&Aw za8=Qks?%KM7=-LHOICr(7)0sYarYph$^fUiwoS6Q}gY+thXIZIYx-yKh(mg0iGgF2(Wq{8W! z9gO7x&E2tsW@EWxD-*_Y5d*v0p{m7XJA@4Agb{qmi8aCO~Iep37 zDxn6@vx_(5qVRs05Z;N-0I3b7j!S-A9bMCSbLYAD+fO2-E0HeqwUmj9^qiUZB`4vX z=bxt>@$(qLckTPfQ7yOJp*DqRNzEE8;@UhhBi!}`4S2oOf4xINDF0@I?d(co)9b=q z+p{aB;+R5R_vY3VVxBrl`0Jux1UoN^2-yuQeXAKr%2sv&7_X|N1cm!j4I3k!`DMDY z9>a5+_K_HH@+pWLISP^FGy4NlKkoxnYa!2*| z+ezgPN6ekqp$*;hC}^T~U}~Umo;5)7S!22qiLp}ttZI|JuU>yPuOP*hu5gS}gFxsT<849Y=&xq_jU(z*bKs+>HU9hj@J-k4A`B6|u;0F_%n}PM(HWZaf(yx`tsN zm$lwxEi`m@#b)EP6MViQ86o+Bm>nBuI<896;HX=ZiC!ySHOr@_NY~vt*zp$3or~LO z7g&gB?9icg3GGOb9|Y=FnhdKFq`*5|Xxzt9vHUb9=4K4IVQ;x;rpEyz_30M$Ve(e(O&Y~=?8BUZN9!gcQuBN9gV-d z^$xoC>C>ouwWBRl1N%@gG4K>D7!DS%mfCogk~gJjTWKih4}FPd-3hQbj0yp_qcw>en(|tA#HM05b4siDcisVkGyoRB4oMD zpN;_5#HZux(}&^oseMsFg@@E_xwNh8hm<^e>sS;|GLl}4Y4E0+?d6$`<=x7WsxMhg z{-I&0A_1{u=Po3&jB`f}$9aGJBgRgejEE>liOf9~Or@M3J6>uiVwU*QTgbobP593k z=B9kvx`HqMXJ@1xfSS1!9RKcl2kB}++0M?qg=n<#Ad#@Qo_mhsYj}q^`XX@kf98%8=q<{S4g;)`PGtVY8bMHJf7O-^5d_>6$b~WU;mQ(Ii$j&Du6oH*OBbIE80i?{zDr!-@zZ}qlh@A*;^z zFt1$q?zs9^!iSYL6NKW*yKbB9PO5SN-;L*S=ExD$2rNYfC$tK-rJ0!3UTot7`js+vAzg3s%vTfX z-ge1rf{S9)N{_o*~bjSW%u5PtiXe&+HoOAYJCu&?og(hA>w2t@>d(nsr=&*OQiqrro>cDgVb~ z3g~PUE`Uq}{LItwJnW39$Bn}4NK!#mg$9=?hjldfLL{zh zzOZ0&0Ogsn`u@Q?1cTYo$2^- zDVLPdqVfY!#O3HRChx|hA2%+e`}V`o<4>Tw_tuCTG6ZcYmn@?xlC)?p7z#}zrxswpZqEQ-p^Y_=0!3dJX*Y}rQCtjZv@nat1ShSMN!;Bj*H zZsqW@3eU*s@DF;m(^g2d~Y(2=h1}P9jAbF=&XH zh|7s^HK7?qt2(Rxje-Td^U9Y9SNj%w(sOb9Rbz16u`%S?79p3)5usuJ*qK>`FS&vX z^e08kAy7q1o}>)3Wt(#naD|Sm`4;;fd1bWe_*s{740-k{|mk?{7TkY+UoF zyG&cDdUky1TEX-CQ{MbLvaWoL5)a3a^-)1tdOxhHDZrJ#{D67f44*P{X4CW<>q4{& z(+Ia82tR8s)_HmazpDSy@EajOz)OX=isx&Y#mhX@TXcwJWP&prVs za0!_~Z>WY+bn{uA@v6O**SXvr71jeY=_R&~luN&l|AxS*NQ02<^3`@KxvhTUp1X1H zlTYF7F=L24k}2XGMjqY+yNKQZm&u0NHJ*FTMRUzwNP$6`BExGn{U3_AY&bB%F4{3Go#LxuDO${ zD_?h6N%X-*I~lj$I*JBW=mnT6z+{}0I`^i9u~aNrnZ_M+N{Z0ayRbWa}h-t^&L16-N=%NjHD7tml(4#qsgebpgv&Q!i7XEvUA7f zdlSd?z_zjbz3W2kpz0D*o*7vvOQSd&T@jV8-iYe7Y*hcKsgsD55-&)j{pXSG(eus6 z&??Shn+TD+^JRHLjFysLziJhpzw1wUfAKH$X+{<5A%h(XJa@loXUBS-x^zOL=&< zizgfs|Mog^Zh97e(?`IUHnYDg%fLOWmty;-P3T9lM$eT1vlU&TRc--~5p2UftkY4% zOJMxl0|9RlZx!GF&F7tu&EI{CXP&tbRcZMsq8DNxA^}Uz zscXx>*8(noQ%Zty{rU{VE%*?TlgB$9&oyvup6cl@KK3YXyysp+=@wB=*DTr9K{X1< zTdn8jhcBsu04gtJZ`@>DV^8YX0n=`}*{DPIAo9=#g&l;f!oMuYA*k#R%QR#Wz3}Gq zD5uzDNa*{cSi7<9}$E1QS zy!l=%4Y5n|$(PmO&g7x$d0@1k92omfuigzq^wwuq%_)TWD5rh%O}OE%yAerSy`C%~Wp9zM z((QiY1r*(L52^=%;S>Z z1M#x?{K;-eD)YwmAI`(~H{XGi9)F0Kg#0=Wip5g9KYslcPM&o-PU9w8Npd1725vN2 zG(x0XM`|IE%P}G2?{@6O8RU&!$fekri4)PTBcFQ&Ap#v1*?7D<@=N9AkxF^CIA5j} z#auw7P)Z87gvdFEJL@I9PTjo=tG}JY2`im^=>V-I&G@S#g;h;=M#4?O6h}&=%kNR6 zJ7e}aLlD3w64DLi22~j;q@UI%V)eExbc$|6ik-D2oD2eIx$a;tUH1mz2^lLMHykds((K%_8IMpi?1QDtaVq;oH77ryLw}0(X(E=Kc@f)N6R;*@iC<tx7|o*teHu!NqBJAj z)Uzp%Sz|<#K}g4F{7M@RL)(Sph2_7Z8~OE~->&@Zc&g&0Rj^_;d}5+eNWQsPUojqP z@QUjSmg4<8+vZ~XkTBNZCc~XpLyrIfI~c*{a`b$}5Fp?K2=GdtPiEq|t_(Cp+_1Hc zzBeHOL)Zx}nmPcNP9KVlEtzIHV+WbWoi7nY3Fp>3-kFC_mM+7Habp{G8oT`2O%0&a z`VT-1<$1ZY<_ubkJUBAyIEaEfbv7GpODQz|$}`X4wDZrWK)RM?>bK73kR&PGOHlLX zU;CqS6?C@O`hI&fSGw32DGBjdwQvFEy!3SV#AVhcPX2#K|rRKLbe`lsr0sADYET@x?b}0&p3x0Q8B#k!aaea z_}%2+5_f14a!r|*4HiHxOM#@4&OM7WE0J4YMu@A#Ag;ZOG4sqb5ZNvYKfUo8!a{xU z$J@uCGqoxcDY;Y2<-5F!Y{;s^lix1J)V{s&#Ft;ACp8ie7u4o?+)PW1kH=Ob;d$@A zgKu{3GIE=}xtS*A6>av(APzf{M|M=`E-s|@T7M+3U1wNVZ*1I%{(}cMf`Jk=g^@=l z&0VSxH9$U(UwQn@V-8`whiB!bRj!CIQO1W2&9vlU(;SW90RaNsh?DTFS#Kkcempn| z8u+}n(~IBv)IE3Mo~NJ1!ly2TZyq-sQi~c%H;9TV%i4*3(I?=_C!RGs>o&bT%htku zq*lIp>uvPuA7|8a&FEmE3gQI@an{e-y3KUF$A59;QuR)GsKXsh?N)LAQ$cl8u**By zkXXK8KE8eDJ$y;!2Za+UJKDW>YlOF@J0pIF98#vGmmrOBOi0bg|6}hw;Ik^SzketR zgpkmC3!xW91q2lpQ4s_iDE400-gR|Xv8}r9T36TZuCA-D4SR2hprWAkngl{i144R( z_xqcB=iy06LMZO~e?Jb~-22=+^~}tfGpEh5WBZ^hIuOl_GE3(Oa7oZJ(b;@|;YNG; zC-FgHS8(;j*f|TyBbeqc& z)TOV&qZ{^UT?{`Pd$+Vsov=oP`d9XaAmhdG%BSK-djG^$cEe2vTf2lfcM5QQgcaq) z`F2gY-Sgp8J8l2{?B17NwvK4rh|7hiN* zLLCO}X@%=T=0wz@p#`*zok+93{VfCbnpi@wz_;xW-*y|GuP>Xs#*-RW%iIufqk%3% zypOmZr7xy(S6k}D_l?d(>3$140Re#RdA)a%{AMn{Es5mj2mjQMKD2}JnfvG^V{E_V z7Peyc+Q4~E*y%dUs(96h*6+HHtpCW-ku8xj@_Bsm;fFSA>{z!&4ESE6{(>OttuX?k zj0Fp=4gAmtrcSe7Ol}2FEeA1wJ%8QAqdFduh`Rmo%{T37v_Ql-AIUZT%!938a!YHG z5Q8+lK0aRRTPz~+VpPTc^Wh@9W6CVMb>a}4cz7QUeZ&dSqHK_EjIG4+%9QU{*n4R! z?Op^(Jy9830tNIVy4CCE%yn7}WN*n_Ex8h4VKnK?E+P!BZd$B>s}5rn1xiOaWZ~)? zL2yhZ&8Bc7i(+($7%h?p=od>Tx-ke&{Nr*KP6;D(VE;%`vz$vCYEi{)FiReK(y3Ty zb#nr|wjktou#0Nr7n7_~@Nm>i-{(>{{5x7Nm;dpPcmx~Zc>hHpE%E`Ftx_(@yMf+Y>(b;I7~9Z;?Z=7!_kvi4h=zW(|=lPe|+c@8?kq9yWkYuN20cv zI$QoO>fy7v!aiS$d&TLq>~}|xx5>riYj^z>N%-Lj|fbB+#S4YvibQP zk>&92Rj!+9mxOK_2|U*9D)KkP3xswx0BEOL#(f}p{mi8iP-%7a6Cj(F`-0J9ZFA;N z_Q#t}cYb=^Da5cCwyE@v-cHJ4L+qZAOV@dGl z3fHU-B+F_916F5V`+CVr>YZ0!wmT90 z2^pQdM^_s=dQa=$y@e$<5rrp^2HS~xjP3mVQhWF7<#x-fGi~w-L+!Xxy=^1bXgO)Q zNYGhyHW&d##MIM-eZyjFbum3_Hp zo$EX&+@U;aG)P}Lwqn?tZ7cZlT z81#*WkR4K|kBU}f)wYRKjEN(mqGoqfu0xL)Z5@)5Y_I+HGo5bI8LCk%BRLa_Mm9u^ z1gT_gh6)!_DxXncW4V_3+Gt!eZ`y%Yr-gZuiQw*GFSn)d;!ER?=bQ^y8NVK)ewkZB*b$fze>44@ z*ge^n&6bZ>j?gAc7Ggo&S5KJVzNXgpT;KDvD3t7nCT6{PK;p!M@A;ytDxlYSH^jRQNOtZ8KnBLnu;Mdv7{w)CkEEb3s3ZCAo}a$|uXg@5*V>d@Cs+rB6|3fF zIX}J3Mk~$y**cHE&W4_QmGCHO)W5QM57K}0`R6v`&_nIGefCCIca3}RGF9G8qK1jh zY|gUf_V%~m+TO#41%urq0~hhv@yAm%8swtJrhWQ}-F@UZ`}`{d7+~u`xSLkzIdkkEQ?u>)uhv?8 z={g&F@ZT7EVb1n%I50L zmB`h+T^ox>l}QVuqh#T$mx2(a{-g=do|SKurM~fMgX&ZT?&?!bU!@hY5U)6R);Lk0 zL-j#9=cc5>O|^&>-T^#!27wMc>L}~Z!kPqP>CmN%#Y0&)#SkVss7(*val2i2$DMfG z8p45{t63<5MM8Zb%RPnFD{|qKH?kv7?qwr5QMnx_a41x0(V#92Ei2+Qj*^XKHgmx` zyXy7nHWmTxHJ1&yge|ap7Ob}>^i}%CBKzkjORbc4Z^>C>&$GMe3x8GhsE&Q&3fX}_ zf|)MASTCM8*Y3OF27B-Q4{ZM-gRKB6-s$;j%!fhkO-$}-Z+$=0?!4*ucI|Dqy9KGX zvEb8Iy$fNh2OCtLQv#AMKd@fr`3&ZJ8Fh8LVSZN%0Vb&Q>bD;|0|5Z+sorx){|r|p z#H&30ia+OvnWh6VZ=HloLj3fuWP4`pN_2v2QQG@9^!Bf<>(CLER%Ib=4c4tgTtiGxznH()W*|`?hQ9R5Je&p1Uu<_i`ly|L(M1lyX$RopqDidaC$jqy^~bRU zpb$55tw3EbGb7WMz%N|-^G}wF@blX+u0)neTx~F`Z2%=2165mIRLCj0_+x-~Tp^7C zkgxMU;Sh0%7{c;hHbGT{=PCs`o)L1F*{t#waw&o;Uq~!11fq(QxelImg7w;KZ|l*! zwTABs}$5^7Ldg5J?ypbzO#4m)HnY46CzRaU{how3@v8CRGU|&q*(LTtt_!cOD8N% zOh~YHojT#@M_NNcP}yov6@G9u0-iz4apbpwxZ5D#DqZZe{q4S^5CFKq*ipQigK`m| z9jfOAx$OMFlcm{Jr=DhW-?K*szJ@Mg3mVkuar*nn>O8!J6-LK$xPln(9 zC3U-D!YDgzNOPQGb7BR*HSsm6wEdN-=_VBK%h|%lHbQd&FH`rtKif_}2(NF)4`Pd1 zXxXW`Y~5q+qd6S$lV50K26eWUI4I95tgtt~OR*`-SC|Y1vKWj0#19zvP(NNyn4Ox z)iT&*IDWeGKx+#kk`LoJG#`HElHlY?ZOKk z+-riVtK3ff@l>mH!(VvvNjn#I($RYiw8B;C94`-|q)!D6!7T=Ol_KP<$jq|8&z(P<6zc2x=jHPn*6;qlg+ZvNFG zT~8rMPxs$XK8^|X4K@gF%yUkzsSY?aAUEuvLC)muUFb&2xPCG7)Qu$dj^Ec9}5V|i0SUA)NBU{!qm z_s4B6Cco=k4(1FM;+r*Pcar4HWXss82%!dkifOy>g#I{$!CezNoYc~okWUODk@7;U z*np!{3G2fv)=}!+5SsN8f(NmevM@-Kq7hWD^S$BP3ojSFaarp9w0y0dam*kacTf*& z3tK^OFV!|03O09Lk!^rwQ;PYzsNW`d(#wLMKIPjq%gEehf4TK=>wp(E`O=M5epFMA z?9Uk4!&>45zLdP0vOtdL+1eIPHGA**f7;O}avBK+C4#39RpIL2@5o(jBQ54J>;%ri z3!rv|7veZ)0tPLcmNG|}Yl5XL?809RjioC=wlHTx}7@DLIPu6x5Mjc|~&N$N!9d{(W>%cirmGLB`D}%?uToSsCXWWSG z;54B4y%j0w$8TZhApl^3tJCEZvVDb1h*!c?&D#20mBJ9rd(l9&SqrdE>(q;*%oA+G z&!1WH#A_U1FuEf(*~O2Ok5G}(qMZC>)Ff;~=V#6Q!H(+MnbUA>Vchjhq60>WUSP%83#3 z(qs$~T5Zr$oapVO(g|Tir&qg_XKVPqy7~+lfZy38+!6GfCg0}x#%XL{)0Qr=Sx{JM z)27kTsf~-YZCq9&k)j?XZDq`@7yFOK#&lPs(6^GbR%z5RQsRU;_G7u?*f19%cXG8F5 zorVJfH@Pb1u=aZ&FJ=F}cC_<(v2u1MR50~~dKMz@*BQ;g(n(GJg=h{>gPLCtX~L>(zrt2S;ydTtu}=$d`#~<7n6IwC5I{BV=)e8sp_}VfR5`8d*uZ zL474&uMn^czGJ#2C)>4`UTTm1?Qb^rfbLAZrdViUdDW&v5K+iV6EH*&+S<-|<8`M% zUJOsmnawIDld3V4#JfEH?cz(Fr~V*=U&r6*!8m)ybh~E%1MCZ`cGsz+Y|MUb5uPDX zWRglZv>ZPQnakH(0jAXTnbZxLxZqH?YL%96u^-ZMkoK3@o!5-C_IQ|D{M{N`oe#oU zUtv9gpWF~CF9k>och?{793(0dWwxuqA-t%(m7n^^^^<;t-xN>h$F`XGiG|jj@4vGZOP1Q4*I&0EvT__HB%Wn8=Gu(5g)R*vIo%g5Q&QL4K6`bvGmpp5M5pG!3SC?B5QMYS3T(s5 zT%OVKCa!`Tg78C9bMoyUKQFVtUvQ8O?bpH5aqF0oU0`c*VBVdhjuJ6YDPxgPaCHfu zpc+fr-6fz8oNc!=)}_UvE@;{$&bs&RW8K)Yw@1ZHDqgbc^K(P6j2<^GyY2k($Tt=% z&&zJP#g@(a*%p2Im35Tn4}LhvT#1mk2^OTM=a*m!~0nd2e)TyYpO|!3YXapVRglAH3YlEd?Tw9P7NLH&- z3BHyqu*ws~yM=Kf7oZM_P^T@dG%@mk2b>pjPTzzv{cRFLv=)h&Iz#2kO0p=woW%}r zb~%N>U2%ylxKow}uF&bcl#O=E;ABT3&s&&fpMHRbM@5+pOK#1HA8hrtI+Be4m4e=H z;4HP6`kO62ZJN%hca}06@n9pgN3G2j;=95i(KD^)VUV363JF5Df04(9f;}XDjRz9OV$kQEFZ^r;xx=W4`g5-iyB!z6SKZN zb}`5rLPhAP^_=w8_6j^jIlb{|cA*)&oYW&-+2gjUQXlKrJpp9M z_=Aeu0wtThG9PLSy;AtN(kYb+l@Ef0w!`@d)?WM~#Ts|SG7OWb%q97@|FHJ9R~OD+ z<18_;6sk^q;Xb+AOv|rtzkBIpd+F(??4)zgb6I=%`f~njcPgdUsgE?FJ(U+-zSv3| z+12F2lT2H>%rcqO3ox5qjzDN0tcst)$l4qAO+bZA-qED-fFFq>##9N^85jjsG5Ki* z>EWL7vU7&nsA1ilPOhj^!E|F*sbwu)&q9vCPK#YM+{s(;6wBe$IcscIMvnFB+}xJ_ z$SK%+Hn-t8z-g1v*ztyioTFhcxQj)RTUcuU_-qk74f>D;C-ASU#SxzmxFwB2NLT@@ ztcZg$=b}}xmV8dZME*3iFh-0T6_Hi&+QCZq?S<%mef+O~v1_iq&JIV|R;2A@EkV74 zh%ER=W7G!qliur}zHAfJm+UxO^yzE=`DVJ^bK{M6?HzYGHcE6oNayGO1@!q-e2e*w z;@t+CyZj^dhZlk%>hmqh`P<}PTr1Z2uUB^+^SK7(wrnYT|=dBpAhN6N- z0HHj*2i+gM(*)J23#;d*+P+7Pk9gsaO8q~1^hmcD_Q(_W-fdUjey5!`a+sYuwzGBN zq~+pLoVQE(RlLPA@Jn0>Yr**@W^hMOepOzu66GXk*%J@yZY$AfXv*1Mo%ifw3B1!$ zG2-V&vle`#9r1>4^n$e2?$w@yhq$}1pO5!ny;CXmVRd|C)x#rWXF3e!meixCt6nL> zsRGyxI%G0q%^F+IBAWtrJnN~)>>akeK1kHp5uM(c`kdN}`d8kD61(-PgKghEWHkoQ zm~m7eq`VH_xEopQ8o<)2igcBi>gW&;fA;U&&N{ZhLl`6bkO}RrCE6nj#UO0Ky(&r) zqS7uUEh`V+ZV9XwRO^Z{r*^3VWLAcnUs3s5)(1|rhKLxB`LtB!(x!c5M{-@rZt#*@ zZnZsdrlU{=k%c*JdvoK3#a=w*F&y>uB8Yd>!ub~0rCZc;ww1QQ;|?#Km>k&BMU}}nlG*zC^X$YCBkaa|?s3k4#-FfNRV|F|N$Sh_{S7(VMh+eu`_Br3XzEJ7r(=; zjs{i)jcXFIo z{0%sRM=(}_CPG7$_ydG15LF*!N8g$W-ww4W!NwunP>@p{(p7v3+t#8^_Tr4W_WSeB zb5?xH-NCT>V*V#@RiD}rvA&B%p-Tw4FKSH(j~taqz?KQ1n#aKv6hQ<eALZ z6N>DP%P+CNy*|b6IQ1|)`hYgr_&0zu6;ejBzPiw|nEY_@5aeJg9ks=+t)6n@DCg^b z<*5mQYI0~RjZ%b7Tbkv}ZzrB}wiAB*PpRG78W;7gHbksw`mRfeSc?6+dgG5nJkh`1 zdAof=oYRKl7W267))g%eX)Q>2nU_;!nM-wE8jBxDSC~M3w!NSohvVahC`V;iH`bJu z|28`<+g89rYSo(4jce*(U$*kYIg~#a$K;+;)_FS3@<mE)h-nZhWTUL#mAkxW`8fKf zi6d?8=FOWqq^7ua8Jvo@yMpSVO9L$h{naav9_NtmL+6jS2?yf=455wqiVYaB8EkPg zpmcF-;0A16k*?}0Py|KixeN#tbjrv5xBS7bn0&ikeCSZS^yEI)6%{WD(Hejlayese?feZ$^25bW zSWH(PLNwfkaH|_%J}asHEm*zYzW8A^)VV0cIuzid1Ql{l&fzN;tX_qBT~j->MWS5- z#hbDO*Oinlgj$#2>)16Z$;QCHE?T$7u{G3aAzc~%X5$|8tPX?h^5nrbvUOMM+&ICS z#sc~2(>_@+~_zm$^fy!?D>HGfQVV;$!T@4~_MHF-xrY!7#p1%s`&|!Ddnb9E* z#du_?0m~u$rimv^gnS!GE2=}`D6gC{I+tJO*(2SBsPLTDx1YWA<4^YFrAOKk!`iqt zLVU$7o6*BvyutAmoupk=snW(>t=(3nTW&MgmD^n(d~FwwA7@haQSbt-T`lL=klrI- z84%>dYPj#F8|=D2-DAI>aFCsIY_hd#*2u9J8p1=(PA{-^9PlaOWj%ymZrtp~M!50D zpIBHs3)gJ0|9raKIVo~=tUKndR3q)`N)a$N?cCLAKYa9-Y?1=rcwOr6cZyQw>wWx* zmYXLPMP^Dk<~EiW*`-|v+vv7Qj>;`Y$R{4JWGP#(%~lF;w|FyFZ}4gMYl%Z34yzR_ zA|EzH49GVUNT)Kz=2*(h@4xvb_{97SgMUck_kgIB{`+tD-2?$~02Dswn(v;N`{BLP ztwZO|cHK2s+pGV0)mdNZqeq?|IVblY;Eux4`D0GMP~E7`<{v8b9JjI|)X7xp|6cHX ztR)B-&K$ItJ38f`*Bosl_ihnTI}AMO=dMQgGk;xPz|ZxK+l|%Kd=jNADf7%VrS|yy zU)y+guYbT#fXt_TJ$GY~|I@5|d-C9=-*eqHcH5(m+num=PCB%MCE%|@I=a#x$iQqj zd)bCSH`k4sU!d9mU*^Wk)@`&G-<`+F%rQ=@!Lb;mGq}?M%9I^mKhpm^?Q17DP*#3U z)FoRPwlu+7%_&?`msk*JO6dkWt;0YY(!9NufLO&g@RYo2Tlr$Q!5azI3a}CsJL2Sy z{jGFMn9juxYa(KbII8dL88e&#jdm##f`{4q`M=-)8N|yfC)2QPuCU3!x*MxK)phI! z0b6E+3YGR9>J$wk9fiLCs8R0dmSFL?B~U3TsIXO>>|4B%BVh35>Z$VGSto%*YEK&A z->)vRH$IwfeJJ3sKg{FAhot{+k*FtH+IBgL~=98ci>ULSB`8Fc4s|{<}5o%UbGaz2Jn;#K`oQtSaZ`coq zwM??CvI6%mI<3Zc3tr6Tz|Hlj_9=u5g(~e4Z^vJqa81aS3PwZLRal5jji@e=Mhl4I z`8aZ#;Ge5Y^51&rvFLv&@Q+;L3aDO@_EC3!az@vlgKQ*sIs2#-{2+6Kgv5&P-$Bt- zloq>=&VmP~EoT!X!oO7Tb@hYVZFAu*-uAl-?K28}?#l5txOY>iEqGoMZskMi&da8b z(i5$_5N${5?~$%1_jep;{?4Zh-S%Q8TfN?x?t02_NAmnN$5kI_u~T^NZ;#j?AAH#E zIPGvda#(O+sblLf>eg_cUm?OYCq(;&W`bZZ&5@L>0(*1r23wS}EMn{-{hFrTx==m@ z0~F=fAuo9VKlR5w@Ejc&+kSk>^IrB(!2zugo`1eOBB`$ZDLDCj*5e?^%?jiUnH2?g zNvkf_1Qv|g2yStRF29y%OYNr+XnMVHz>Du7`m%Cq$Z)Z zUXr2;eT$+jD=4&9eR|m!_-c?JS$X;2-N9{@q9%m$mn**YD89>a`O_bLVt1Z$ud-W`;63$_Uh+9nrMu#KK|I`3&RUyLW;v>Okn`1|&Y&ut@?mOp>-rE}^e3Qz-H<5!oEvhJG9(LqQ+WQT0ZWaYM+ zYXXjcrs4+mz{3vXmvES3)ji07Cl3UeK#~pRB{rmKXX_T1=+wgMp3Byl0g~O~1aK`H za!NIbwt?gI1}H#mz!>!Nvq8LTSFW(0_)Ctev#)%+|0&`P5t;fx$Tor?d9Oa{#kD8b z9^tdxEw_k z382ED`d&s^(edZqge{d&k? z^gL;Ul=RCvIwk`_T5tk_cwEuo*bS8;i$IV@J3+eBmuK6QFMqbZ_uA8Le)<{PBQ%tV z){TuWmHu*U3BA*^4}zP&{JUS%U3K+sn)d0(u4)sG=xxn7&c0YGVl2pOW&WEDr5n+W z-n}hw-~#M14Dsa~D(vNNQf=DWbbI|@FW9jso$SWSiRYYymIXd;GdT1UqmT8t&Xh~j z9Qe`loR*uNWyP3DXD(mv*kfK0CXZmPyCqqh?%g@=sJ#u`dv6<{6PocwqM-KIb!_{% z8}OsNa_0AzNZ)3Z6xiUV9jys0hp2AfF#yqajlDSXXcN=K)|BK~6A-T-MXrw!3~nIT z0*In2!B5Qp9pa^eS_Fk~!5|K(g6;^evEg0b+5&Qrhgw44D&JO+_t(6WuY`}uU+f)q znlDdCu*N_<0Y}K1r1gXaYmHwuBhuH8Docxr?~@OBUdz>iON)S85V$*}OPa{(0eYrv zeTsy5^mIEPu)n2YnwW@RQ916A@MJT0C#@x>yqAqcv0iDb%OjFm9Ur_dq^O4JUw&!- zxbM&Q?8l!tN2B|a$6f>bTI#%fJ80h)*0x2w6X*!}X0zol!*O{->FDnC2(P)48u&W% zOL-jYeX8s{^ca&dx!K&>%)t_iqBOkm!*rwI_0bA!6fO`lE ztl~H)ZsQhvc}AK;x}V{PV-%-{D7Xa?id;^_byXKi@pqsxRa7WF9Zip5tgC%G&L|!Y z9*>Nb-m@2W1n#cyNSg+7{|L*z(df&U<)mA?MjS+{dE?+yd5)>9?FKg-5Ew%&N!vlH zX=*`qaP8nljEpua2jAGjZN5~FxBGulDN&HXFoJM}U{B*ZhN~(P72*=xK@}Df7Nnw~ zrT6~3-V2#^3oqq4pYs02{W;1LQK-qZjW>9`SI{?Z<-Ts=yRO`Oa_z$#>odAVfx)E&;gWCxlAEWJ5QMS(QLU@Cx z+3uYO1jr25N5Q5Nyo(hhFccM_?%BOT6GoO%Wo)azUcA=6_oW2%i)Z`S0_k>_9?5KnJ2@-A=KOBO(d6-RD9$8_13AmDs}{5n>^`eX zcly!wmF^1aBXq{CiQP#zkCf~#l8 z)fBhtd!_X|gBcJEt%;y~q?r&4e>Rv=Ra4dNx7ry9*~%=lv;b8!p^U9T{wOMN%p+G< zI>kc4j}tw*7pUd`*AOp-tG)8vC1k3~4C8hM*HK*2Js_l|xT@QZ)=v!+A{1g1L5Gdy1tD7@2Quclp_Q<8YqRfAd;R;F zHlH)DWKOy(!BZIW32*~PxBPSJ*LFN7Tj&V%JOn73)DnD@Az0~R%`Gt=#21dKJhr(~ z(wC>J-97m=kz;AQd<1-sf7oe1thJ7v<3T)3QmCh9cy5~rf83q$z~UgPDj$dO-@Ngr zJF`q3ur3NPTIIVn=k(m3z43_E%91g~ZJ(6nkaDY*Ev!T5E>LYPIMF!PnnHz1ElrD} z`pNZGR9?}K>Mvai(eX5>d7;0%6pGN^sFR?MgLhG7MwefuKWt0?J;bzWqYp`|fD}m%MrEAwXgw~b?-UZ+8 zpay!CwxftG$Y=-t0$kOVMZ&p5LpOO#H-6v7b-LbjDF|r^393FqN-k8_G5%W(cZ#X! zxA4kONLc+Z2;9SU266AabQe`7U0=bbl!c@)HO)5u@S~+szo`i2gqX#f5|t#X zMT0>1o;V?D*P|z=U^KOkxOEieoWNE(9xAyNl#SRKF<3ik!qeZ|g}chHEhvwgg3PWJ zq|Qf4?zsRb@mqjJJ^WtnPQo&D_CWAuUg`K4!`cE$`t`}TLo{iu8yN7N4anoCwnInR;q@4UQh$FdOht2})W zK~#dDiew0(CX5Xih(p*K>6fN>=@cOr+FMTuSEb*;^#?Ae-ND@^gw58i8~-7M9pMKcv~RR2-VLk8|t>mzyB2 zzyKG*l_rHdv4K}Qg1tmzLiKjvL5xCNOpy9a(1cz+tr>TH>qMM>q;Z`IQj`7t7Ld74 zEtvhzyN+tsfGOb0;8N(pHLyo_Yu>95CbBK9Cq5H83qeC6w`i7#-`$SZ6iV5#G7yUT zlXkZ^wgq2Z(ZTmmBl<#N^*WpkZEj0E7TzMYuX6n8v^(=GnW*@=I{$7~)@Gah&Qv>P zzhQRIi!WK1?%iP}favv4yRTnCaFj0nuQ281m+Yo{?zLm^`B02YNcf8mQD&EM0tfY` z_vz;ygO|D^hWwWmY_RV(VG`W{-@8@%NUPKu%0F)Gk�Q!kvH$6LF@nz&`{(cjI3M zP2_Mlo(Ge)klnsv5kiE5Py~@_rP5&Z+ZDyrR#8$(Nk(aqSq+7m}_6&UBX?w<(s$^Jn~c=k3ij|)u6Abo5+_1 zztZ4SROO|s9oI9Ibpm$@5QlR`cLG0a4}y#8FQ+J@jylTj65@rIYNphC3IQ!Ot%@r2z6<$NrIHg5&U9bU4^a95a->Qr938UGQ9 z_T$`jxb^F#ZB5W(^=eJP+FnJXzN^hbh(fw+HWb*~Gt(>+Rz|BfxEX}Ht1&o4?Ebhh zrO6N-)K*or0xKhTV5rJb5CTyf^q;%a=HLW{hD{T#F$+s;U>B}h|GQ?5*=-9jVETkI?L$?qe?C-Mb*`MG4!Y)2~yxscv zlg^2|Uj%lol--xw3xbv7XYamiXP$bRjqB4JSqPdGQo+OrgDlWCm6bST={60n-|Ggn zfZ9bj9RrH5veT^)lVSOCkOf;1$VUjBaiRW~MutKZocxTCMDhE&mFlvn`yVDuM1aNf z)ua?Psp%*ls*rtTCa7Wxeo#rBY88@c+5JmbaW-z@zL~$E(qMK8qW%o!`H*wZwV}9J z+!GJ>-FxrHa)kzSE6h1h_Oh;A|*Y`|hk*S)t z(r@zcJ>=i3lPZ9dsMl;%b1oh`#@5XK$y#!hxK?u^0R2`|opz-#9y}UtcRl@jTeJR;|(2#5xmg&FWnH)8s=fxqUNR zJY%iXDRs+M&B?ZAP{Wfb3Dvm)2yZE-sBcYAbFLucIe4)Q=ia*$$<$DVZC|^I%4^ys z%?B++f~h-l2dcR~h`9j^pscObCpsBKXL*(9Jh%0obgH%J z*a0fJxmz$>qYoQLK8Xkb|UJ zS4_8_jaoTV>e!(Kw|Q@b72FI2}A9?(Rc#Wk#MrPifpVH}TixhL@I zIj)J}C+*ULJrWNBk}i*MH$vI!`0c45ePka#@`!y$oMv1SCpBZ`(qML8>?7M61S--Q zMKz_EtzgQEmCjGVAmUHF>PkCe!UPU}9PYMuDo3FT@)XrAeirks12_?_pF_C&bfB%-g{Xg zjQp(u%9s_o2^~HM9oys1I@4}svb2FGso-}35jc>aLvSJDp2H~z=df_KMo1-IWqE#{ zfVMUJYM$iKeB-Jj>$y)$yY!49R-936S!tWBF((4=EU!^@iiOIR=ECe1S@!-nD-mwR zI4@_V|0|@sP1}KsK%=2Wef8kR8H=C#K$B1iS?n1h;Rf(tb>^6SZ3w0n9C1_*8$-7J z1?=Y56%;vsq+g|daU*%?cKBhOJl)bdbZ8G7qla}!N^;)np8MU!7#g&*$#)F5_VIDH zeCAqCArPVM@;?sAmwW(1HLzdGPj{Ud5uK_1C&=`q5R;_=^5N6R(Ar4fC&uDHPNOZnc- znMN$Ka;oEUr9UNaiAR|B&b;~d7;2XnV%*~AYuIj&cpqT?T^-aW{IaVz3UcfP?tRGX z4X*xNDo{fqLSYJM9^uMi_U9kGZ*Ky$m%f_nEbMxr;*)?EeZ|qqpE?5D9qz|AA=oC8 z8$l9%@g8-Ih$uY?Q1aHTwT0=coPgxgOD}QiRekUypb%ZId%-L25$fAb9A0?Dl!Gdj zv;-FX{Ikvf?psSmPqrQtXEPH^E2Sn>eJ0L;Ll3k5g9qE-;rltis6tu_RRHha`S<>Z zFU$X~%e^2k&n_5ygf0GNhIQ!M2QNe@bDc)Xjv!xGL;l17cRAOYgD&t(AAN5NDC3o9pRp6pJj+q`j%~%5 z4_c(Y@(m^8FBts1^hHFMy8&y>uRi(Mp85Un?OTYi5g1(P=p#qzvhaEME3N$KE-1>f z>$?rN;Vn9DMVvn4YVM+FrT1G%e>}T#j?KwOXFMhzxdAx9w;zB=#?gRzdrd;i_<#DUf+!4JX}q{zo-M*8LTLQ9cdHP=06+;#q1_(w?rwTJQi=g~h+sWPj?m z+wB$X>ZK&x1|%Ze?lQIqI*d-fXNBBcukv+8hiIg{dgV&{d5m9SO}>c<+2zS3o4nxL=7XUt|G^~CU<9& z^|Ri6``LWrUCQB+NnKE)kT5CQxsKpLJL5QNJc|{032D!U`aRvZT&09mh8!qI!!ntMMzhOfm(z$*#0e+?eZEZ-C>7xv5`GGSTl~?aS~+Jr4HGs zJ7g3g>SF1-T>JFrRhE;3ABq;i(eLtfRi(?eU#VYZ3_tE%C%YQDaY6q{C>YJUkkCrU z;W*X>i@D-yEa>+~2tL30;Rh!AQac)pv57+l+R;NgTU(I(X7Um(=D_Y@?~C;?iFzL# zbAvK^UI|u!>uuW1`8Mo`(RLjNM)uuPd&rtYP)3+p*wOmsiVHg|`nkHC{Je1U*r!!Q z(pR0QdaXINDlN@fP`>u?L-r=SyAix^iEvpyaUI?e@e?)hRHrx)Z%Xk78{4XLJsP#N;w=H;-uW^aw6Vi=rtD_OnN(4Vo{pCK#gA~A; zLt#lNUnhYE7FlG;By`>9paXaSg6klF?f|N)6DL?vTqNQX6puI!;+nYg$i!#+V0nsN zecEYu4cm~82;*EnL{+EiS(m8mV#(@u9b<_Dnv7uPk(@o0P+S4NdTzI0J2 zYf@6(x$ckOcb`MTO=V@qWKdb2%2#s1;3s(4U%mgHO@00a`+;{&xrE@AU)7)iA~mOU zE}`_{Cme5SKmNe#&Qb47M76t8a6d6NU=dQ?*8+ptL>R!;2Ns16M_m8ybo+!M(TjII zLF&rGZO_#==_Q$oE~~SJtuizs_i3|EW*>*xSNbZq~bxy@g+nzi_I;75Mg0 z_z$e76Jb4dH8`MabO!3&**Ot*#P6(_M(XffXP?o4;zkef~qb)k{pU zlJpfe?wV_C*rA8I1;0(Zw$=gxb|c1_bed(%<9BsombKn|{ast4VyfR2{5|T^Hjmon z>Abl`^XB0T;tQL?>D+UPB`PlwR;1=$IdL=&UE{XN0d&DfIl0lYbW_j$taWS?$EOWy zQ8|HQyRh&<$l6DlY4*&TpR8BoRuDzPp<&Rtg{2XXzIXQAdDfBL<&&R&?oLToo1@yL zZH^|@MBcr@H%_|S#o#gtDBV-Y>cud-GVVgUE_l9>5aMSbn(L1`+SxMiHE56(vn|k( z$qr)CwNa-KUn~Y8SCR4Bl1X?W1m*w@k4#Ga&TLi`n(N3AVT1!$X`6KDWbqQ&WJ&EmeoRxUt61*G@nwkT3RoVO(A1@ZF`^{Oi z?UD({+T_O`XR9p5hJd=&!1)Y;;ZsbS`Mi_tvu)Qd))*m}+94@_37Q*`gUn<9h%2mVqA>HaT>6n^olE_ptHa>6qMMqj9it|DS!<+=)f-R)P1`7vwFN6u ztv5-+{RY@qGrqM+$4{_7JoGRFDc4@XN9QdMJm9E^BYO382UHfXUk_U) zj|B$b3v7=93chlQ{+C-U$Xfy%oy=NM#DYZ|>Vzx>Q~1&K#TO^Nmhvs)C2G-g>>M#O zqT1pMt@d6O28tAN5BPiPh($(2q z5~S%g+J=1Fajk{u`qZ@z)i&%`{jy2B4mJgO!*fu3XI&yqNa35U@)8NCr+xmJO<@Q1 z3@TL${kR7AZf7mqb+RV->QyKxDzVI!OYId-rf9?OJv((q>Rrn2LSGpdATlA7IB762 zo=b7|BAsSkxmh_*h`m8kqk>fCM16MW7^~(8cZG;slV0BP61g{$svL8zK`eC$yGU#{ zse0uzKeN_wdNS&wQXS1kKmCXIKX5xVUxYN5+QN!#1hOw9P6_xxh|irr=p)tPE~-~P z7FuzN#8t^dttoP$GQp6#2_JBCyx_cYcFGwk_G;Dw*b7xO7<@UTYYJ=b4T!mC9)8%) zx$H9c#S?V=P)l`b`#u`P`w8E~9l*$YCqop{9ZeP=h1qOHx|--*TJ_ehO1FEjyUreZ z`4wmDUkuPnbF!@gNLRqC6qE+)wGkAc1PfngW;?Y54WPdG_w2^mSpt53c8)EcGQ~a( z6OZnU{XM#n#o1ZBp;=Ka6EZ7@dTrXh2YRV#wmPrCUYtIibGY`iAJC*Yg1f|TGP~Wd zU@lf+GR0H|RVK0fS>;`ovV7SU@Te0oMNI|S58Q9Ktpo{wO^#CNZpe160CU`O;?#o@ z6(#F>yxROp6dg1&fRP(?v}=b#nARa{mr!u3rk&Mgs=7s@R)v8Q=wo;bKMRSfieeWO zVOUXwWm^DJpBWTLlLqLua>O@?x(wl&SPrgAL2lds>&l9*PgFg&pZfns4EnnvPLlFr z7sa#7d3xrLj%DVF8clQ|U~ep=Qz=s6{m%VymMvt5_2V0F+q5++9ji(WZp}5Mb5|R8 z@kN%&eE9N-C#^}RjuFA1`clYOi?Ta@9VDu=&@@g(Wr|JJfH6$5fdhVaYSQE>ShL!4 zHi0so{x zemXBLy>yeE*si}F)xL+rQ=)QX!ugb5m}hUSTVmgCT4l+NS~y)@VX1IrxhKKLYY!cL z(usEWGtWAI!M;zz3PII4dG$*mTX6({nLHK9dFM_95<5Lz?vjZKpJ>60AYBav1q!gG z^XJ?3#~ot}mM*tFhYhnl_(zU?5G`?3eBz2Ps}5bic!^CXy)gRtJMXkXP@~=0Rwd98 z@?IqVQW+~{G27-fsFBDK#=ow=-l?D+meeiy4u$O?70zNI-wIajCY`$28uWs@!RI{r z>Z|SP>#nt&b(z*>@IIEiWD!U=!67!)-vtJ=3Fgzchw$$C%@_h+Yc8lw&$OI%1%D_ynxbG+oIYhp4;Qr;VBRLC$h_(uCYT z%0NP;MP?)Q-9o(@w{ByZ^hb*}E$!J^KjYVo8v)12MK$YbUv%gR|m6 zuqzt3X~XZ#C?S;w90Q9%&=_7;2}pc(`YI>bQbV;PQ%C613N329SwB>;pyHkRWTcnK zg2~W`@-&FbHf{)3Y6h%pKRGyU1j-R2i51dka8=S!Af|!_r-IlL+VZ(_+`C;Lgf8RA zAL;~4LaJoiZmN&8t=qUZ!`2c zD0@AGK~D};QSkELw!H_i604*UY%1w5uguxt)T`7Vx>UY4)1v5-pbQ(UpVcQJ?0CIf zf;UGN(h+=I01*k<>$hv?ZYfEbC_mUQ?ARKzMSgy1nzdv5UWKxXlqSTbOGwQjw#b)l zA6|Rmc{|~ZGwo=cjr*PIM>as=1;73^aCypaqr24I5e0 z*m&+BP3ohw%>VsZC3lST^#B*C8x>Vw7Q;ET{W)fj4BI%E7 zM4`nY_>ywP*9#WfVS5a)^H47vG3rof-|pj5o`0TSc;HLJmq80Iol3A;t1>ByN{b#0-UVu z=AfDI(qvAK$jq^>1Nu94mTe}U7g~|G3n~MI>{d>Yek_Yl2?#X{N?(kU)F4)+zgpee za<%J`?0CaP+1UsdHU^aQcHmPfqnduGZVq{fO7HBbmQ`5CEmWCYhm1SY`qH0W(68>) zJqd$`z>B|NUx+%*dYoRbTD8iukUY;puToCZSCVcxS6g-y5>%GJgRE8qgE+MY!ApGt zq7$Vt2SJ9A>$oegup#Itck0^BjVJLV(^Hn)9F+dwf|st*-xZ`%#4f`f8@?-(H-wKC zrN(XBJ0YjWm8ax1enjz=u_(H+&JLjxlBuRejnFC>UMCC$%~}dTytju zCar+BjhZ+$E#Cqe6^K8YiTAGT(n4EYm}6^8^Q=|FCYFFFFQ@w5CbP zwsFCHTZSgco729wAqR|L+JH|g_lL)c&oe*HqzpBK$E_#p)VW<@Vx{JB%adeuK9>p* zk>T?3SbzTP*){>qfsP1O;=3iWnsT(Xr~{86XmABm^u!*$EQiTGlML=hXK>=_r&}9r z*cH?m%5s-`v7JAPuOI+OkoWZc_uI9%-DcyZIgz=J2?8Liwzg`|sv|VPq4q@SN>X|0 zq)B!TEQvk#-rMnJtE-0C9rvNY{mF|jqGuaYzLh&-Yck5%46=_&M5BPVu3x;!K8Nxa zayxLyU>k(lbMk%Mj+a@~G(W1D=yk~|LJ4cEJaYHlb~!E+ zC*ogu6N^<rt|;ATNsXG@@MayYUqWkZk2CQ^w*J!p4gPH{UQbKCEbLtKgSNuWwqSwY;ly8IE?b8cq^*281_aRw=;Wx z$x=HCIl|cAU2J_gW=W216bk5@#dGG^=P&-t-lU%682jZ2gPbz|&LDjC3)FKW0@^na zlF7xP%)~YRs>0Y+pbme0+IbL0`$8BU#=Iy)Xr)CrdaSBfq;{1FqSX6i+Vrx)9EC3G z)}P)Wwt+{m)fMgf0oXN!x37OPw|#rvG6Ytutu?$;A66se65FC@tzEg0 zhNy`aAl&RecC6iqj((p310uK}sz1bm6CsL1u?^w<5@7CFvz2qxsH#ICv1c*#C;BWd z&3Vlukq}OzDV45ltVXUr{&<`J^)&0o1j`dbh@N1gIvM!d!oLHhd*$h;*;RMkX`Q-t zQ#MukB<@{Je>8{`q8c5E>K6e`?B^eQ%+9>@Qs-$aAHkmd$vTKfVuueJ&>-E$csUcL z`!A68sTW+}2A)hMHJSZ{u9{SWz8H|!_tU1?VIZr6*t&_Y8BJkn#gq=RbTMZjY)c}O zE1!JQM#8?3QAc!8+kE@$=$EQNX67HFZF4zeXIS?nN3H8zysF#ys_Cl!77-x5 zYwZ>{Z^*Tm(Sm8qyiwb#Yjf4~Re$?*QsKH4V&au2p0GWUyX2*&umjPe9nL2bYAe3H zWW61b(8Z2v-^;o+NpuvU)ULD*S5O_cZN2*x`JoR9Wt_6l2$rCH1HP{~Nfb$n3y(qPyGb-Sse*jcRY zmDm=qV(1(**A)sWYXd^H*|{0^@%j|U5>d-4Az#`mdBNMvk(dARoSk^)S&VVDi)-sb z?!^qsb`5oO zT9$K24bRIXISnN7z@(Qu1r@75|KSgIJ=>jD{Pt8!^hB+xQW`)CuH&m2`1&RU=_R}} zmS_wQ_eJW}w_ksFzIgKrfv&ibKweESbP&>gf)(v-gnJa?^cZ!Fxyq|4s62hwWK4&e zNJB*}9)Ao9LK!N5extCaP|6E7cY_#I>PMpd_FC>tF4R zi!XMcwGhKmExkb}&d+ z&d|xkwa@iS)R>f2m(0Zn;3WL^C!ewbNl81VZ|y1;lr0_g_m-vDJQr|Z zv+^I+Q)$2Z`fIm~I|O!!P80ELDvc}gSoLp3X|A2rc7RR7thQZaPWXVjmEN`5tS!0n zsRIOE93NlK!xPwXc50kpQ?iy=_r}fb(xkz5K+8_no-!odk|Ub91~Jwwrjd1yOW+9g z)|SoOO)cIC`$3NEcPzw$+*iY2PNJ+!y6s~lC`;`TdqJvWItHjIb!yWn&IZ6j5gTNB z?&@vI(p-@n0z;0G*s_(4IQ(#ng>~v-O07g`2$zZ&Js$YT=XWJx8t*Fw$0n%sX4@Zb zX&|G?d>Y0jY@$88D_9duL0h4_o_Nv@zyKjv9V(t~fF|5(sx5pYCZCY*lYjY(U31%Q zZUAc%DG#5I%dCb!0)XpuI$o+|(hnAbRS*!OgD7w>NVpIk;?bD%t%0>6!I7w#=%9QX z0ni21?Is+JJEThlM>&IZw-;0=l`p07Q4=S+qmCP+5g=Z!D6IHSooqS6ku73)losM@WbFDOEY~BAFr4$typruC!m< z_eLnTzeT!p(FA$y-S;Z_5LF^AgjMTOIkVb-DXm{j1xq2pYf%q88NPWc)PwXOWpYU& z+h!zq0>cz2)+V>@8r1C=WO zvzl2Bf$mJ92J|zH+%Dc#eHy~Mtzhz_4Q}^h_|t`jQuX(+*F8Gu zw{NH5sPR1#3O|EL<**vWX4*RDqElA$%)FA>`Dd^g&cRBpQ^<;{T4psVt9|9+@F}M1 z&qB4gg*8>`RU1j+DP0*AT-j}3JBWirWl$hQxT^>^xppf;XB@>t)e4a-R9hBrRWIe=FQJuV zcSM(?zAR@)-vZPAWjH6wgze+P)-m8K<_0Hq*944U<>6K*p4^>i7ww$JOX=sQnI~?VC3#Q?yx`qxG`C#$a72ZC5{yJ3>4B=N`sAZ}Gxlv9G;gmj=?y$pLbgFLT?=0}oRU)SdqE z?Fk~*?XS{ltYxF8{>;rc*})`}bM|O~w|KfMN;cYso%Xh2Ejl;~P&B3x;qDH~F`#a1 z&)=71wjd%b0Xen?l)1 zxj!)FBdU-Z=6#91``QQSRo(%@J^$)!g7@KIAlY_91nDwiUxC*(=D6c*945Y+NT*$R zfhB>=Y73tEEnezScr%M#WQG(JmD8ie{(S}8!OJF{V28lJQ`i-x=+nlvXzuvM&p!N+ zU2)S*mDnmfGl5*is`(>CCM}5lXcu)P*(M&1#fE{ht(&FE+Bla7V*+y(qfQrT{$GWIgBgPGC;iH3;C?Q?Qf4- zKLiQ#gXzp*!-*wcW7BTQwv+*U+SoC6Dac0%Vn-%<1nE+qs57Y%O<=N$|Moraj+l77 z+k!^N3#%5nX~65dLZ@y?&hkpDm#l#_ksG1|`xD>JzUEqYtcv39%p~(^Hs$0vTl4on z`P80!iMH`>U>LJe+i#&G_>#aJ*M{vWVLO&{y=W{0M)F5b`X9oZgM6M<8`A z$HS!%1fGJxQ~Wt6SgU2HAxc|ThIwn(I2Vhj{_b~f=d-h8LitrlyM?lSeYRJB%^~q$ zu^F_{#~c$`7(D^8z4X!FeZ8eq{q*_g+b}%GRUj}FD@vmy21J#^i3~%VcC;hgCA)eG zdDeAH@5>T$6pvVlPIn>m8c~x^w4J-^M^=ss&Bho*f-tWkQBzWV&jazE+_Ar7Q50*= zDU&(`p)D>nWA7y2rgG*}&=-)-cwi7V&DMX}EzcoNqQ@{7q?CilM zC16R*0>nur7j?MITLM1CbA^-{JYxH>6_yFg>b;2Jsn?NM>l!iw0PsH zuLj#Fv^HGOtk}WeVZ1Um&1NiGY)zr`DiFvrgKEr4luj%xj!)LCvx~8O>w!MHg6h08 zL53s?V9v$k2O-dv-F_}wG*{!PYvLsrn^;H+J4@~Qh5o(QU$ZaBr}@A^?!;s#3!trz zhs%9Z$G%SEzWXxIiR$G|EKJ#`xkV#hjcqyW*|coA4MdLR-MspA z-2I?R*E`~Gcfj}rQF&;b)noD4(v_F+23*iPHz8bSQjs_Q^@6h*ZVob(m#yd^VHM<^ zeOpp)web6lHjhZ7BXsHW31EVmF3^>FJ^F}oeKGjvrjS0No zp%1Mk_~1|os`0fWeH7iNYgezaRpb?mirHq)SR-V^l4cWjD#&u}BZ zS}Ra3;tdmvT$At_vlb^Tfxz4zFG;Q&_6 z>NC({o{{!NqFem6C!~h(N(?&GwDl4 zA7J@L;5QD~r`0xiq?TR&}ymqEv{9-(hv8pZG z)?HZ*^MQKHEO}XGwjFcrwYx;^IvCLRi`Y$}W8$=CHfxJtTOhtPDYBu?&p+xYc3?wY zY~POBl2-jDm?@o*_Uq`2i%^MY47juTcrU4c8p5>WTJ^ELo40j*-8#&qO2uefFB4pu z1K+iLx}{@nrV|>HaHA*)=ag!9p(GwBRZ3o`A42RIAi|KOs5Xth3BgMZ?&!9?tOchB zmoffDE!Q=uzCwiJ2QM$!0OG|Fzs6mdfe0lnib8}EzDI<-j2VJYuID`csY|HDtf0eK z^y-25w@9vWVVHr}<8_@W$1pEH`Gny|i<4xV+NQJ=c#4hrMfq;<#9Iy$^FxOYaqjDs zheJAb6yA7bm5Xxy?U;MUlRggB8hN9VL!DXtKL51S9qU5C@rX_3R2SUD4FQ*iUSwM$ zi2^BrIu!+Y-3gN%a?uK& znU-evF*$TfadeuR%8P#Q+7@)aJN8}Q#4nYq5S<0mEx@SdNtmRP|^D3rZ+SZIOq% zd@R2P@!M~+t4qJVcVaVWvgo*&g|lZlhdMeyQo_ZWf)KwJcJ;|vbg?zAgt%&JM|FAm zKG}rkzycgaiCIzGsUj{9`cOhWsUCj2BHboK?aLI|!@Rn3>hezB0dGPp4=fLpL*K% z0kNtxBZndi&@!EcNA?YouX*6X2w!)5$kFF5|DAq?e-@AKQ@)yNo#6u(p$;Uc>E%rN zWFj9hYE+Pf3AVl9!M6#dIG@AlI+8%hY2nIrI|Zq=r*^lOPfg#|0SdC!KLVb2T$Ksq zCJ&ve1?!0i?uTtM$-1%aJL!;-c33O?GT^_oR+YG#3J%|EkO{HR;RNKSylcQ#Rf0{e z(6RUzP#k)%4&VCFmTTC+|a&@9ka zzlQwiL$PCY+|3DRpJjsv4|Wm#SgavcwXgl8Ux%*eJDj4>8QyY$recpLa)@SRzNI?nS59mE6E@f)IW-Dn2FQXOmBHOW4QKlj&L z{)oG@VDQwLVgO+LcR|_`S5Lmp4ncoYh7IDc#DI9$FfqoTcP=O8294j5iLnyN?iU|? zXdPjX6vCGjl{F4Nnin$dNM1oLgTFpqD-Hyd`Lkf<-#Mhq0MZ#!GN-->&57ecSSoWP z`p=)T{onh*gDf73-cj$w-LVh=EE5FtA)dH~#>Kibxxi+wZ{naPrJsD`tFM~09-`}3 z*K%CnhxK~?u}AIqP*BHXomiZeVH=k$az+jXm@2y!nsHEp<7*duXVzMf`aJA(%~Hx) zO8g;sXp3W4veQ9%5z>l@Ecw`R%;Dp4r=JlS6FYXNkGRaS; zC~WI6DY_Pv&w|zj)95cbtM0VB?zR@N!5rMfc0|YC-m{12jHy%Y!53e&WT@T3)#;8v z5-&8Xtk6a^PqMw5wRH$`N3m3ZTI>D4_Ra&|uA*Ap1Ee=X3TdPlNGJ+Yq=z6N1f(mX z6v4*B2A1bj!QQZe3SvRUh7F`iks`f=v_MGj2_&TV0{`z@`^>rfp4^+6AKhh88s2G#AiDW`Q=g_|*%dQPM9d%uU2n!9fI831S&8h9p`}cF zO6eEK#}63Km7Z#)%BvqJpd)E}*qNw8FX`5N{DMyvW#QB;oOn-C`lhUfj=bcnP! zgP4{(6Tzq9cBc@(6WSKDuzP#jyWW*{hX@LJu*8oR|8Z$ma2qC(iLM(^mvSTQMVDTh zPGlo#B_=A@A{TcC9Zk5bC)y}sFtJ9V=yPu?zMC-Jc7J1-KXgLu*mtKi8S{?sJNDS> zMp5p^Mw4scL+--|IGy<33ocA=P8Qr|CC^B>^e4DY_9W&a9{Ugm zp7usZT*!D+3cA_X+`g`0p1>x;0AgsJtmxZ3KIBJTS6gEhWPLOQujM+8ldBKw68N!s))H>lKC@2V zJ~3|j{L~qfl=XnhDWZ~{-S6a63*$(gR;FVTHf zP||G=hzL56&OWLdtwv_H2mZS;g9{CHMto@Ck2~*oT=TmqyGt(hcHai!)wY6iyp{~I z2C}J^DGvY%%ug@7EPWQ+6el1u@!FpatQ0&xedqjF(+94)D)dxy{ToWNF+cmoHc+GB z26UWlm37OD30GZE2Z{AO4ebh#73Y3zEWL*Ovy5}W%P*(z{MUI&k|ZHdg?V^151;j7alql;)K)BbL zK(wpz1RqaEwyONj{%8(<)u#J0Vuo8$Cme-&&{Z%aq2&{#sWv3})0Q1LW_~-OuQM?k zmB&tP$maa^Xz*{a5ZGry6T1-Q!ng{u7g|4GAr$+w(s3H&3^Q;bQ*HsF>s_p#A{;5W+cD= z&O5n<_tG#3?!|fP2798(8ye5)kj|2+HDlj+x*s@!ki^fFxFJ6S4w z(WQ{{ZemiK4OS*_fB5Au(|PQW9}kzPUEA(sYiW1tAyT6dSOuv&g#D0W+AksKg#KkM}_N2JUXB|?frU;*7E1NcQ{b-VI_>r z0DKRt=qqW`l`eIjoB)xLn^)Z8VQTHiW|E6oeYEmirsnTN#4nWg)u0tBqu`5KK&0XR&kL(tOC#o%-E=2nn+I3KHqohyUFf#;0=FyBsx$YeQZQ<2v^3@cZ4>3JV05C+c8eE1NA zw=Eo-+?%&Exce_`F8!RYn_4#^>vDt1ji>v0p2VrGE^G>k;9HB^wM^!j;cmE&)6O|3 zc9gvx)xrPzI`mW#b=*gw^RQ6XMZ+{L;jYNXax2F8%ddWw)->RNV5atW<=2je+f?=o;%(PDa@`nE*7FSMC?TPbY0L zR#~(=|MKhJ7SRrwen1l^?jmrGQVjKJ7$kZIuMQXz-@xXQxYQ7MVB#@8oQ@e#)!WMD zYbgAt{4!2HhI~~*s+L5o3>t|fPIxQFGw3m&!$$A$iio9=;Ej4)<~&K>_Lsk;J2>^% z6+<}I+i%K3H5xOv=g=^LsI7!TR@-Qj^*)HzZg#EQurA$&jocm(*V<+Mwh%DI4109i zksB&HRJ04~C9~NduF?uZs5S%E(}>}3{mT%XHbBIYs)|>zM$8;kC z?4BG6UO5|X7h+y*Z#rG*0ud2Jrp-YPqao~XU;V2z5{_{3jA2 zZl0zo$MZpCt49wV5*Jg-^*G2na;5fU^h!T~4ExPYB0@{bko#rA3ooQ!U`&08JLhb0 zxk7Lwaw(<^kEXBosc`kVEt*r0j9d?)pX+4RZPe~*4XR$*^4tapeV1Xw!_4Q`KK03T z0Hzm(94~$}@F%y?S~OH1y7P`SgfKzGnM9`XK4|2qxMC@XYaoe6y^NFdpEUl(xL#Zh zme)}ws|8n5s&7qa1@!Ji4o-XRw_g#Y3Ak08mcMhKgqVR1$X(E+h>JGG<0>MTVuHfk zp#ig&c2v*yh!ajIj1{$j8OV8SZ2n3n$wPWFkND}oW$1zH6v9;;wLa7ZRT3F3bh>Gg)DAxS#|RHAF3KPZ>pYX z6=7a_DSZ`=V=M$@+4O0#X%oE=neZnMhEk*WsWHd{P>%4=zxa7@yKO=8+gTEyfgG(5 z7d8sH;Tub*IsVqgj(KJ7-1PB74l4A8l}&r)5WAwbl?wWDVejGw0M*V~LR>6U=AsnM z1imrAVCSQqw#97Xbiy2U{)eme%*4pJ&4I-ndA**icHE@Nr=GOjWj(eZzH_<@%WEHi zV4VLypNTSM`D#=0m^glXy8Ox?r%}iuS0l?8>4Yw)L4VFzrcV>3a21_knOFt|rBxL2_`CyYUsClvX_>lXhpE$iaupr$YXfeP1- zoatT;hrMkFmaq70W^gjH#P=h^%%|>JQif{3P@l)qG#EhXt4mtjhqjuF-~MC9M7>HI z0nM(bDp7x+4R=Jgv;{P&}-!s4eL+ahNOJR|$hEOPLZ-;ne=2QPjXB|E!?SJSY zk!*7&j(X)N;qpUw-xU*moRo7itWGKQVle9AZDa&@Et?z5`&z;(U0AeX zRXVhL|1e&zJh`bG>p6EzB_VXvPkC!w)VfTgq0-8gcG<378j37=OF$9DB95s)v+R|$ zV_d>10T<+Ux$+4a{Yx-wV?Xh6u3eHRb$C^I)l3MllZMg{iW#zBgx%DiNQFq&pxAAy z2|W$@(;e4dmwI6YczxLpCCDb{Gl7;F`>_jCZ;pQ2NKNH-RUZ>xghTkrH!n#$bErTy ztKuq{+}u1tAkFy=BJB95{}FP{m;j920Emde0l)=b4+k)eFzQl{e|}v0#Cy)J9tCY6 zYKy!w;u~4Bm0$Y757U0!RIqINR5*8zG=L;>yNUtic8_|;J3`CA@SCcK-y4FQpPCvI zkR$hf6MDfAugC)fE6d@a#_l?*DlcqF9y!i)Y)ZP*+6e?~$HcL8{7dO>>?+TgJSBO+ zlr_q_$QZp z76zOYk8~Nt1((XcYs#h}8q=5CeLM30uBbvbwGIsxCD&^{1n)6yI%~lyjN)u!{p;RH z^wr3n)0->T`ZTxBhIhhW!=!aJ+B5c*gkwFjc$pRDu5;EcO^0?Hkh*b{w%{I1Oie?T zDfuZ|mD}}j#1o61&avi4nq`@*sd-{;2p33tpP91O3iT?b)U;O#;j3#_LEz`5UadNY z-QL>LX6|}ahaOlUBMgK(_j}WFsS8DRp&B=V%Pu>@IakFD1aVT-a^W=I55#PNdDQez z-ZZ(Pl46sIuHiHJRu4!TC2SWRTG@GquUF04D*6gV2#eJcPzX?S+5i#Q0aV7A|ATKtZhn?b8=I3)dY^(<~(8(-r{iQDCrJQK#Kr;V@7to4WKk<_44%iQm&TgcHnx%N!(`R&f9yB=rJhmfaw4Up~SHzU>T>a zT*5`0i$l*?u}B&GWqB5(Mz%|*?wFy}a8C2;l+%-LsmDBS*@Cn^Dr>f7&P}50WeH2T z9QP2y?NU*b%yH>)H_)u)IL7e9`txk8!~MJwxE2zMlLUo~qIE`MH3FV{{IOWhIyl^r zkaLrZ+ZOr9;)#>fkw?5G4Xf^nqjJ;I>%bk(;J`BPbekAHbs*rglcP4sKzZxzmr_r2eCMIfReEt3J z3uD_+8rrAm7wIl#qOImHS(f&H+i`JIY;*Fc4IBLcVtBIaGn^vqH*`q)>vz7J<`8fn z)cGdCjqbM3J`t^{M6osxe|rRae>4@2f`Ipe$a|Ifkm18qUsg7`&6}EjZYMX?zI)N< z($f&V-QmhtqB1BR3AR}K9ec(lmf_LDKdV=+ifh&f5nlr5BwvRiyotNu@FRTxEc+5& zVJKQc^m6gaIQP_}k0e^B;IPHb1_43=?yXtAEb!&9is-Aspquc_ZNHEW&r9ETZW`06 zeKFxfbe-(wc(q}0Z1>@4GN5Gu&NOGDxAac(a=N)on_+(H33t7= ziYKIrc&0r^kRL)c_$fAyH&$T{%>`%4;C8_Ql53haLoS&06$;65f&YaMe0;}q!1+s?-a|qm(L?DP6-!=!%CP5r&vAdBSlWJ<$6>geA*)V`aT*{}R z_5I)^zRA&B)pbN7JI8!8h>XI^Xhr<(7gwcScz2LJz;i6NaeMWx$Z48TPSe>GqjNUB!*I?Wy6DH#ap`?uSc;6i@gU*OOvVXj^;7?A|d%!;Z^>3sbIhb+v zZMUVBGiStsfg#zPVoTDlw2f+kU*fF)r@#8ubneGK7ALCux+%$b390SAxrx>=Wup57 z?sxme&#p>uK*XTYqqzI7BDZ&=q4&swjpQDQ7JRH zv;>{xx3gJt@ZpE&4=sLXoLjVDL3-39={cnmUS=;E4+djRTz+{MDqrV*=iA9PZAx3# zrBS?PB6>ZX?)@*%#~2bCG%HZZgQ!c8B!Y?syggS#I4x1PGX7#tbUnHR!|+^(6NI%= z5J$SYyR#5w9Jk}Ju;(i`ym<;xTa$u_D;Cif?zj~T;)-=Ip)98rgx>N%6nE>eV_ceP zdGZQy<5U~>k+)S%ZPw~#v56OlChGXzz!CFXjB~F&idsib~g~K%dVAxQP3@gSCYE6A(dHj%W1jJA1&A3fl{C0y3Ej8rWtLaw)VA9^Bx z^|H!a=e?5lMAWf0CKydJdP8p}u9n$04&6o&G1tGLvgfddagucc|NMtPlrG_B={LLi z1JU*TyWzqxlB+0SEqcLu#nRIDS%4_xe$w*rLL50~fCW$%Z!~sO&F^P2fyV`()G61o z2-vc}QzuPKk3INc>N;>hW%brj4P5=S4+A!G#z(=lv;;B^k4Fsn4JXCHgVJhG$W^#q zYrK+O|DCZ<+rI<0B53E9fT%ld<+E24b9qD-rW@`06`Yu%1n}j{W+FO!#cu_o^$&lD+Zb9yIOa^B!9jv|q+vVD|JK#5nr;hTBtX`8`jjc@zmc;Z z0e7`_$>KOl3+1gzv2Fn79qZ^sv3d(3x4Gqc06DC%3uYPHVexEhKtUKKj>T*r6Wt~@ zj4Vec3YnO>+kPJRmi-n^_dq5>PuB%df7QHPk$5CrLD{>w31oYg{^pn4s=4);i2kmi zQKbzSZla^&cgR&)r7gll=BazW0dDs#;h)xttRapG+pchkZ7zVQkiRFXecR z&FM~vYW~V5<%M7QN^0)dv3Rzzc&??mUtj*i*kf^%n?@^~fygU#cRTf{sE{?EGZc%x zcKz9<^HZ0S3{(oEAsMwL2Da^r-G_m}S+fS(sLB?lDIkX~w>)jt66E}~GMsmYMLFk*3di^k#UoLqC>T%j;=+?@^9;LEV=QbRq8OI!R@W(#Rg{6J!Cs6n6 zE?>a`lH0V=^SUT?Z`RV)(GIvMlgN}|HcNJ4J9I-sCy@dN0<&D6i^|mA=bV!|gJ?|! z(-zTvKFbkgp%t7@5cMV`;?BKlO`Q=r?9jVcI*jAV-+*)Z2L^EDbmXkOo9$1({aqTu z$(>bbNlYN!(Z?Rwtc{>e6~(ekRFr>TcJalDp`AKnr(!jtkop=K8>bSRk9fymFSYZ= zA)Drt`)U@J&Thost}q;pigxVA3R^-{a(vThEFJTg(CQ=zdGa&v9K;?!`qm|JyYn25 zHSbA(sOdpsu^bkUG~A+k+6^4Q`N?;_lg45jxuuZHICoZovmRxyf;b1a+YxoRd+~hF z=PnKvILn{oRl_#pCE^oa8^zCpgRZPX3{=iLr$_s=Mx>@%qpZp?7~~P;u$l}doN+_# zYd`)8Hz1EJY;@SJ70Bfp=Fwe?$)Wys#f!^*#rc-q*F!Du2)0EwGtr{$84XT#?T z4A5wV=Cpm@#7Uu7)6-L?VZLn$VHSgsOT&1K#<_*&B49(sF%9c_%eB|0?{O6Q#69=q zE~b~bN=QRA4Gba%mf=+~ng@Y&pvB;<_&1bCQ~XNEkgi8FL{Wx1fgL$XDCBa+j~4!s znAWyg^fUDl+kDTW0kd#2aux8%efOqce(=1on5H!|%_6RxH8a%4OfNQRnY=m=9+Ixb z#>bC1>2)qgW`*27@uP`ozOqMN`p-cI>dg+r+hVMqG$l`MAzCl2UXb43ch9s( zm!4q*qM`bh)7#X0k=M3qKH|oCYnPLLp{bDLNYMSK4dDw0?3a4AL-lYAG$%~gddxtC zBC!vArd#JG8CIgKl@zsRj%DJ=C zzARrw$Z3y18R|%TFwj?{&ZQ3R%ZM^g`RGSd|G|SJ`O2)-UKVet;N-SOk1wgRM1rK1L#6$0oA& zE1HY5o63J4tF6Cdl<2^by9D=H;)xTI2b0I1T5!9~sjubD{qd)(f0=H0 zX<|Bg-+h?$Cvq}UCaf5~(pZn~j_TM0xhzL_!N2BmqY^stbJi-xCjSI~E7zUKJ#EK# zACboN7!X8Vb*|>79|>0ekSx0YY8b2vwKQ${mN(W^i<`T4S$fM3L&An|OXa#|XA7y# zuolgX$txG9PTR5Cwg4sYG~@hrucaf-cz4wpe`(&zuXjVRSJ5X*3>x6JN(nadD&dtG z6Y3?fj=?f-(p0Y7;e0pmMUE0ZIuoKNsDIcJaDZsUv=luXQ*-6SglZnTEnEYDG86| zl>6?p$L{HxM;=XQJ@imI3R?{hq84#?PI@4Z@n2vGe*pyd@Ey3U5cMQs`-;{?{Aj_S zOprWw`v64U+o6?}=gHR9%BzTU=E8M(r1*sg?n}=-^hjJ~)SvtYck7x?8ZtDkMICK1 zHa_f|5W&zVmb)EBg0xTY7fy}+mID~4Vc33Kcik(KS__Ko>=tP+qVE@x!1l;;H`pHJ zr;&4QWV`^U`4MFO<--cW?X)PvB2tdO1d*2cDmGiU#MNkhgmzQf$>y%b*vSQVh75Nd zY4PfZ3(rRWrEZ!;zOJOc?7^WXJuPAqziDx58Z{%IbcQ>Tz z$Xs{u+8wYxZ>Am06+0*Q9TBQzp|jhJI&72?9=3qxUb=oQj7TBh&8;Fqmn(H+@8}(e zpxfIj4s%F6TLQMBSNwQ^=G0Y-V`HseI`osc3j2<(26;^iY*I7u3BX*qZVgO4&v(1kn*^D{hYZ&Maw1i$48=P3r@g{Y^wjiwC^ zCj_Alb>9-m`AckX{PnNW<(yiSOeiZCS;Rag$Ua3Jr`}#1tQd%#+l?}D!_#QiO)giQ zaVgve5r|6Z(>eLZv59mqde-?4xIC%h-6F7c#2~NwQ3N)E6^KUpwHdu#2aKptrs?>f z&M7r5kTr+XBwB-z385e7Oq&{E3NxnkLoraR@mh84kQTG4cg&96)3FuVZgVCYR|-zF z%a9LW$_C`Vq^yR9ZS8opjWgH|iW<7LX&;=ZpPGaErsU?+dI+8yXmW)T#FtjTnhxta zD4ns>uHn~`W5KzM<)`>27waJn)!s6$TMlKhZm4~1z`m(}`_5@Kvh$V_-JGYBkovm| zxQU}{EA3X-fvb7Ns&1#{LlY(3bVii|&%wEP+9EI^$IwRLK#sYa86r1^ft`^|K9nc&&c z*_pEbF*L%W3y6H;uN(k7h;4rYg>b-Es+yXRYvN`K{; zcqY2$W&O4Wtb`B=ii5IY9QPw$9RE`K^o19uy${%bYh<%g8gjgXoO^*2MG(h|c+eWL z*S&as@CR4K(fZP0E&^`$ujDkPYku}K44mJe`oT=BfZJ_|=qg&+kpltw28iYm-P&S! zRQP3#r%!|Uo!oPmbawyULSJ`l5#7LVGL^k>BhQUM|GWHeM_bP3MBFjmho${?=(9yf zgLA!dTg<=+&ngVI*P5JE=X=ep8KM6_aLAAxEkBD+fao$NRm&G704)MBUIH)F!~TUV zCnfD3ZFF-YrcGK(`PF(2>ns|#Y{h;+@H0p=Ahlet1>@ubP|uKG~48;Io~Q6}Ac z=MiZRgy#gb4}OK^FRyxE#paV-OPR=4i8213#h)={3O5vAo6bM}ZRs8FIVbH+Ivx5C zg6qa^DTFI4lCR+8*46?|C0>f|?eCDAc4c$QOA}Wi({^)HLhFqe8r$Zh8%zKIKmbWZ zK~$ETk5!SQ1{{$abBW&x`=yU-WpKf;kS5o|3&Z@I;fMN11Yk7FZ?WbAkC;kHGzGM(erAHhZd zV?dn0$5E_ygyxJI%WJ(JM}GbR>|Gy=9CX>9oZO1C z1>vKu_U+my%t?N7(EjO^zB|WKe`^ula=Fc~Ai{-gS=BK`4?D3zczo~S!TBmnZeC5w zIRsil;0n>jd_)Z>G1gB{+Qx)EfBoxeA8t4A!2u$nf-4p;(??Hw+waXXiesEI$R-t7 zQFX}Zw#JPDl`{W>9r2G|bWu9q0{|>(tK}wdIXef~42a9Q7hja-V5H@?UtXPt zpz4z^>vWK8$BwLOXFcE=ccAWPVO)71>T=K-XQW+7x9=dX+eU62;snBbQ|z3nQ`3YO z#-&HD`gth+?@RcTApFZw*K&|+%R(dKxi7NHT5S_G=eGmh$x5WtCd1rm)6zFl(>o58 zu#Ua^q*d(VN1jYJuW!In)*$|fj_iYuoDdWcdW z4Wj%kC);=Fm+nWu_7YAOdYiQ08zs7Y6E~X+=GCqu-iiE811CeFzPl$I9bg|}0`ovh zZr7&dliTT4WY?FR|6gIAQ}IH`buDiLAPH-QW`OeDnjzgl=*GQF-n-dGvxz1VSDx#p zSDxS-kJ+*>ww?dlKyT&ecXgi@TK5_(m3Wgr@VJ1(-D%X{sP zAYM&^9V(l0s@`Z9Rqm)INFSMrA-J|I@vXq0xJu>+U-@!e%;=qJrN(Pg*~)<;E(q*K z)0JQTa&fL2f~57QZ7~QUIKbM_Rtq4yJHKJ~uuc2IbI+!4LwJWH<7@?I5`%@wDr9wT z$iXeQLe_XPHWil6o|$fflfLrT>_(R}ue_U!OOnAmY@beMQ^uPn+{xC)L2=sz+&r9} zW))RWnZwNs65lP=ZjeApB)lc#Vl0vE&St~KANW9AYANyWgw{gaKK;0!9?NKiUB7l6 zB9E8T(mC_e!yH->E@VQv5~Aw`l1}78CMNNt=FgV&l}WXw9R%}e-KA?M zNob%5rr!|Z-O_w_%of!!Ht|e)`Vd+Z z`^2r=4V;dw7q_4X=CvgthpCLU4SlO@RRXl7#ILqImef+JxzG*{+x*OH=DH>6GlLI| zn}a37TPt5_s0SxrUAL-`CszyPN`)~4{dj|!DDx>@5Vq1g^=TW*jnhJXwlJearUf%aR66GiO^4R>#l@HPi1OEeVa zY6bV~Cf#(G_@v=Y7RQ`?au9>&a2K1(-ebW(#i_ot7#%;=#~4KsMdVS&`6$mTuKY=G ztZ$^_u11V-7OG=jg}!{&tXOu&?G40JGi1Z6)oCe&p$$9myQ31f8~(wqmIn;&EdfDz4y6oFqQ|=Gv!6{JA?#a9P%`m#L+V}ce}6g{4GM3h zxDU0sY3Qxa!645w(F%o5;z-_i9eYfAFPbF#al^)NY%j>=3HF6JB{69Ta#`i#ZP|th zWJB@vt8gW$-+%#O3)ji16QYr8k@LHWr`?Iha3SaGNifs#2zFDzusLq;frBE76RX^4 zTgHbXI*INte{@AUiqnnFft;>!Zn^F{>~VZOoq&CdRdBws8ro!H1zyrBn+&zNkH%)V zAu*Ty)3{vAU|r=zy>9*&^Pfv^XxB4+4x+oq_UZ^r+qSG8=8-{B%mUQb=C5CodbeVe zwLI1cr135{i+6z8XapwF@2^m1x!*04kxF=9sbrE_>G-J~&K>bt$svT+ETh><3Id?~ zPW!J?>$x*$V)4txa`T`&daQ?y9h;tnnC-OtZlN>W`0~kym(sEju_NGSyfUS2*PhAS zQx_mVdx#Lq(9TB`FruXN+?>H%TZ24My~^b~++B7AYIVzJqqEEM(X!AO+&Q))Z9ifp zO7V}UAD|S!7hGYEcN#LqX)ldWpTG1n2==~dHO6yd6AVr@0D0gxSlC|&!SuMX#52Sf zq+12+Onqa+hW;qT8+@-p7zwjnbti7MeF-y>p5T-4=5kikYr%_cmwo9c_qp{uHPGsnG8fK&E{j@*_xt<62%NF6bF zP}&B22p&}U9?Sc@iI<6UIgN7q*Zla(H1@Q2hKgjaZ#is3?>Stbe*I$4Lpisim<9B~ zeMI;L*JYjg{trada$XIkRrWJo+e_W(526KeG#i-eyT|R$jZ9p1@tF|#;j(>8K|)nV z$J+GFp@glqpPTes(S>s3ojC@4<%lC=Q*cMDsTD+b%hS(=upYL_GWM%H1=qJVV+QSy z09WzQG>s=6iA@(LaG`{=xtdSMhb>SetHckvg;E2d9^q|AK?a|AQd+cRIr^%o1xfTQ zJWGLGp!v+eLPJO$9~${1$2$hDR0|UwJe!Xug4zhd?a1-yBshp7UR%AA?{Fh51JnWa zp074st1|2PabY92&u*jAjlcgrGRU6?(G_Vk;x>|fle}}CZn)-};AY#iapt5$X_SxZYJPC=M-i2TIV!GsWpH0^yA6BfgJ)(m6?xtI@^WhFc7J>U_@P2GUo z?|=i6#*#FKAL1RNMP29%<66A>9bU z`ox)MrgJ~?kx0g}yqJ~+iy>2edMEQ9evai7=~)#d&%p1^s1kN=xz_8RJ?HZ z`R6x#<)xU!@#X2Xi%x%M5cD=oLTeVXnagr~Qy^hc-AX&WE0DcQI3+j#49Izz$JWYW z1jM&zn}X!xs5 zB7dIw-vY;b!XRv3-g{p<@UYQo1>BQ2P2}cQ-l7O0v?^#AmF^cHPI#CQ!nsEsnLc;s zS?RzrV^Yt`mG7I99~QBi_BW?9iF?7m&rb5_o%#jG>fLEkPg+SCo=Wk5U%5KncKu&M zZ7mOG#RJx}6NKOsU;I)Uf_(2&XPlW9PMe+%Wy#ozHe9)2K39J)i^(O@l|V)D%H3U{ zbPzF!f|HM$6SmD7Gz+S2F5*x&Z!_MNG`vLeEyo;F$ay6&q`ftuO1wzZl4U|{+U5}7 zt%vzlCMdr(1{oLp{(Elk_y{7BA-sEur`&Tjzs3?M`N!ZP-?w7y{^mb-Vq^8d)R_ss zoJT2mlwAVn1(9FCh>tJ_b-~_l4BDPeXfxxRzBBSNp4IIGZ&yrCB;at8N)ccxT}x_OP{S0|-~8<}RUxKtyL-5mLlpNx0MCLFq28 z7QcW^r@pY}maZwym627s+c$5%CH>Ed$EV++g`j+}J1&D7neO`KS;4KDx3Qal28LAj zW>5_uIVy};8(!oW8j~N!$0q)+|AX`{Y$YgT^@^ojpCW^;@YnoErYXd! zvE0f8@sC$uo!)uwx#{%74g(kdj_TC?aWjPJHN6vdG`nO;G z$cMQXuWjnT5`!>stb4H$*qnNqzWGS7PkQG&tJ#=`>1=QYi zwsgYy)T#?doGUn93^da%R!Bwc`hV@6c@V|jxo3?cY*moxwxyif>HYEar_;&3N2Gq( zgeWY0ZCPfU(wJx}eQBt&Ij#payrSMsyl6)QQ{+O|x?|klGJNskt0E;*OYXXbpZ@6Hfe?-@JwoO9BHcio-- z`lHL!ZOEr3w*zt3O0>;WE3c8I*(wWiw^i^e@?HZ5re|-zEyO;Sr5Kckq-+p!JMmU@ zeurbR%(f{ZPlWk*|H(*gIsW*|U!s@%p7b_!dza0f6SDMcF8(}nV$@fuHUl%RX_NC`uXjDOQ*uAuI41*U4Qs}I)!{Yc5dFrr1@H>a{kT1$#Q#Kefj0- z!yNoMfeqK?)2GEob{)|z7#|QTL`&Khl9C%QX=Da<(84e zB8w`~g-N((_KWE+i)W>`^cWZ?&x)zI2=~wUs|ckqwBG!h6fp`}vWG$nLp-HYmXd7E zYO)OUsSRw1#k?0MWs0F$4%X~DRyE=H6pM+>qlnr4$)SyCJtdmXlm6m%GHuAmVy@%Jc8FU_Lpo<{)@& zI<>z|GpF~^J%2BbpQ5Iw;qEHyKo~Evv{Rz2+Ti&hhZ-K{1?(m+#0g*`DaoObgqAm z*z5g|JsQ@_USm67bEr*)C@1MRmBY>HmM84ra1{OQ4}B;!7(4~2XsJqAEjsO{?TeLD zr=@n=cZf|yHG<0ZmB@DFn6Ks>y(ZmFtCbL&Ejemy*0!B0UmCOd)y#2e{K^IC0PI0{ zBk(>PwAh*iKDSMNnznbNsngrLUB|S>m1HA*oa59!#R%Ks)%B}VTXco1lM$6ucCNm) z(7s*Uc$WTTqh2YYY55%#_yXNTn)u81# z`$8s;@1t?>MRxsovRAf6qlhna?J2y>06kIJp7oW(;zle%fzW#(H6X9{CtdK zx2LfA=EbDTu{o{$)Dw7WZ(hw|3pWf^p=)-&rG98i@yh8-bk9Kj@OX{`YXigsDpev* zy6U=j8Gsp0v^9SG?Qf;s_j*&haLkyn)zKa+d1a&TnJC@+>dZ8dyU8S6a*Nr&DP_xH z7fXS=D3l-zvz$U7AF+D2+u$DR676IW7GE_x-O@^@KTMI zJh7=3XT9ygtG(?z3oXItrWP#|UF)TIL85DWtXc31hhZvxYGRbl_UX{4Z<-3xeHWspcYWZ4*qt~a z4LkIZ*qrgY`ZBzxxPgl)|Gs!ydUVm8I0PZyHOHi)B-HSpfYT89c!=L+Q=dpvRxM7w zA-Z0AnJ>HamWl$l4&gNwiLM;>EUp8(V)|2QTRg+ILJPA{6ykAeRmtT3tI0Sau{o-Z zhQ3`pryUVz1`wtKnV3wj{BHU(D5ucCGZS7dC_Yj79wXa9E{x)|%<**gE8|~6!=O`v zsD7Ya>#+;aXV+2bCpZ5!xe+DgrFHyh)}Mj^XX2U(RCAJWkQ%QgWe{17y99mPtH1Wu zxQcx}Tv+JPMj@y$ZDOm)?dJQkaywO?xj#Qmkb`lGa2ypzVUz~}jxf2Lc`0rD#vJjY zPR5ZK2;K#C?iW6v#!a6Y*T)w&FA8Llh^+vT4qzF5@%JuG-~IgO!ybgh*8yLZ8AeL~ zoIlnj2rdKHGCMI}bJbO843pVZR!E0KWR^{y7@9H?mjVjz!|Q=Yay01*e-=Pn6Bcp)G2HOVF_KBH z+5UaFpz?Qf#-+y=%?;w)R%Qu*T}ISuq{@p+aKD>c2g;#L^zH>S(l@3&92-+!@vc#H zxo7R#4rsG|x^e!*q=rK*;fb4jvo`If{Em{5hD29U$xqn8n~Q<;POUh?%fH-RO-ZX9 z#<nu^Yr4reZ{dy)|D#VXH>+CSO?tuj` zrY;YN)p``WT^e45{^cY0-W$Zr!f&Dib@8{-n|X=Ma#eCR-b5ggYAP5%6Tq8Tn*ZU6 zCsSWEDCEZCU z*f4gY&e_zSrfD~xc(Pw53=bKgVk5`*lTlIY%u`Epu0V>U{6aVf_S7k1*WvTu`d0eV z1sBA1JP!E0qWNd^QSJ-NXmrKbbaTR_e!uByq1UeG)zo}z!$o`<0|vM_qpIR3&O9Ui&lkRsPNY2InqdmgpZZZAiLPzr zO68Gt>(lqy030}USlqW}`rf3mKTmJqF6vT;^R!Z!fl=Q3qnS^o+g_d;W*#MIItfr+ z=%Td#Rvk^Tf{5w0?mwULT)J}hlc@)HlXcjRy&Z^cj%OIf80yE~IB!xA>3pMckz(`y z76gA=v?f}m7m(@7VJ}+0Iyh`q!2+7z^&4d_2XFME{H28Oh<;`=YXEX}w=Ur&qdZmq zg?E?83KUf zrzfetjy~nD;gm8+kPX)mM0~y*UwGioJJUH#AP)HDa7R_Md)9?P(;5?lFA?sTx4)x0 z7$ai+KVS1FX?G&sRk-i*Q&H}jxI1v}Alk+{+uqD`=UPzlx$H8oR4ZX!ExV`4zIM&; z(rr_wVtR34dW|J&H7N#!lZHFvW6=e@?k7J<=L{R1ZolFBxYjB65fQ5G80}GMS3Lc+ zF;u>n!{&Db=f2Ru&l3+llrDmM91C7N%wE8WTu}Nt{wt8xJMX?Ly%E{7`MDQj-FhSIKaYb$cP&~1Q^2w4RSe$> z=PN>a+EfDTJ!Ze2_gwnH)F;#3^Jj2#@RA^sBAJk7zY->o)IPpw9vf)Sr*BPrG(EHY z<+%FC^yGA{M+VSf0GFKg4ne;ZFkL|o-UpC zL}3ISOKaK}Hk@fzM@lKJroPsd+!*zL+2(bt+XhptiO^t}CVO(=fDGvUM1eq zxus5vkInrUV$pyV!hj5LD7|I+o>}g4-Ftm>u~Z z%L9H_*U0z{CCp(s8_4|}_rZGN>MF#KM!>NW;d0(`lwV`A`8tH@IP5|!df|EWQV|K| zxt)O_cnugo#j^7T(AdO;{5{s(ZpRY7b?OXPyd3>jPj$^%vz$vH7pIQfwn;l+C_Fe?(rN$`BK`cQHJtK{ zwTt75p11WLn!2Nb;NeU~c*d=bZ*9dS=>jUrSU-C$O=Uv3{gtWdfu%D-gi*+Jxm5xd2{%`3VjSJb2w- z(f~NZm2==)+PgbQFp&k~)V5pqv)_(c1i(6d@)&o`T^O1I@%;hLc$PJ9bloqpNWG=Oeou|J2iI=stU=#SqxC zAs@oarlEh_KI~k~hlo9j*U2pTk2?3gX%9G2x##V>c8v)o!Z+cMDq-HA=e=u_*tGfw zOW_+Yxg>e6QW;BcvQ}4xmfmM6;KaM zUngFba4Y=G7flOyydNVCmn#1S7km%|cvm(rcjR>4_Ne&Pi&skA%RjZ7G49!C!UEk7 z5M%Al_daamiDO~4uGA(V9rizvXJR;hH6lD$GMVU_Yz@v;nWC4*aXI5|yQTfv?A?~} zsNH8p6a`~cS0L6v6qT_`7&ph{F5PgFlU2l^Ylr<1iInx100eQWOJb965?>9c_iWuU zjqN!!jqKb#Y%geiEOXT+U~@a76;Vj=XS0F!Zr!qZi}c`cqbCtb_wMQt^d+B%Q7 zD`~s)@UM>T&E>nP??J^#EE5LG`1*}RI1aZF*E{ir7t&E2dKk&s^w;4C6*E)^35_fF zqK>OZnr~)spMTPc>8uMbMD=B_s#!n_Bf$`?T=YU2;foJ}%pyrGk(LXX!_v$_e%+Nn zN>`(Msgg+_mecO$s}`imMPvc#HuN3e2X}YG$)}{j9Kh&Ex!SOFiw*~|Cp`CDy6%TR zNPqTP7KntLM(tpb`{_JraC!nQhqJJWdJ#MAd8t<^o5T@JjYDpuDDzC}1zv1ZRMPsC zz;FDNxGK)sh!B^8)_WzM^yV>~c&H01mWl7Qi4)WN=oe4g?SvJzMG{{ok!WB1G6WY% zJG6z6E#eg1y`}`1_@QT=nR*Q#lDb1oG&rM4$aaJcE)@bHpab}&Yc@e|7UKx zA^mOQYmcjk(o1?eAL$=-)g z{gJJD4ZC{DYS7dX4sZ(@pNB)*cI%ebP=;q%e$OS&Znz^bsxyS8`+&iN!(N1WJbwRu z>4N|HOl*>Ng;DSV)asFM;#jEHIab@26NqiVmleo-UEWvmIl}Nq93?%O3Mc-_&yNcm z!;z{TL<^eI!g6m?>{8?l!Yk>{ZXV*K7?-EQI)b{8s* z&o?KR4SeguEhVpVBGTRN8}7O*9W-W4F;4jHc&G)(kp6ewd~yR z?3wzYYNigZ1Xx&2Une{6g_8+fz#K?heSH9qI@eFyD) zByGea4po>$i=kS65^PO^>v+2S6<7RbJ46^eh41OkzuugVIQF=75}cK?y0{3_8PcG9 zkB;Ffhd7x{s%QA}-{+o_-ohFBT|qpL3TIP+d}kJ3`;znggC$RS`VTRNE0>c*T2G-&*CzN z9RV}(os6jG{kx1z)9~FBS>_Tpu%b<>UC9TLpNUUZz}6f^?SS5*8&E4~zZnp~@sVQF z{>Y;5it7Rg*B1G=H&Lu&wc+HYJqcts(S~)YN$l3fQol3!F&^VVFY)o_>#t85WjYj2 z^5~&MW9g}FU++uHOXg@E`r|c-<#uh`hfGrug+|$@PMNw0O?0mwiHdz&l@q5Z6Zb-zjy* z@XA^ah`1V*uvbMb5t05NFO5Mt5k2q5F8<$p^brof6f#|d(f<*;_D|q!<=f4F{!=>c z)VHUjI6%{$<+38dFdX9e@1Q}c16+cL*M>o)+Ru8H5&TT^;Xvld)sX%R4A5u)@W&vw zJxFJF&enI^b&oKrvkKXaM=0wf0M&AK20=RX?H2|xlpn5>o021D_p&XVoN~vt5OsIB zV{lU%Sq~>0^;gVcni+iOziD|9=ndT1@&04Srq40H`m^$| zPlXDu`I;kJ7~i8%B_a_U#OEUqcAIrK>f#C0dWbWQi26@uz21N!Vg8?? z`=@@Jk@M!9RG(8F?%{>A({JaEgS*)_u0@jQ7QIP6w1u0}U4{`t|MsWzPN_=x+eE#&i-$@_*)4j@o>mf8{lwFF{04MGN6Df&f~DK2dyL2*ehfwWD6Gb6pCeF;eVez$#eBCUNM z?ZMzE(jvHRC=nw3cC13&95bwWk--Sye)8HV#+IWO>L>4mcCpsHYsa;X_o~*?K64)wcDb=f{QY`bXcoB)uE^y&C#)jM=UgTr*=QbMdPTnJ84sXkAm9 zF?DLX{L5cTm;BpBUnrQhn81X(5SBpSkFo`>-B-bx~M@?MZ86R%$) zA3f)zA4^A|3D6N8*OiDKqA@{42Sk7!TNxB`CoypF(>|?(DUbk}-})uXSmL#gokfX< zJB=EE31k2%iUGJXX77w7MFv)ZJV?)hXRac>w>%gwJp2>Isgx-q^3AB#Zh&wxQEaP- zf~DaqELYvm3ALYJ{=;-OMAvf21%=Eq;#Vi6r}()a9l}QG))BlH$#49Z^kYYrr(*5< za8$BHeECsAnDoef_od6Z1Mc?~Ie2#nnUl|2cEYud5Cou>aFRy7wyJ3|KL^;h8SHEO zxk9)lwlQ(RMZ0OXVEp))#rG!sdoRB{9ev^nX-8H(LMAZrgFg-=Z*#u#l5eK3T=Aoj zjrJKfG_{2oDZdX64uTjHgWKVm;F~~{!$kZc_?-v%Ct+wuzbZSHv%K)#?@FKl;pJh* zF%DQjq`fWJ8=Svz;m2c_b6BsQsUzi-`*)%bd(4zCCM4^h+rWH<+*Xl)CGYSr#y~;% zTZe{;xVrecXVX3FUrX=UZ=ZC*x4x71JJ^Lxr4P&LmzW0R!)UVD%$+pd7tRd+x*=GMr|SX{k(_Mh0Rcp{=|bX6eL9X#sxIabx=R zPA7iizthntofHPK4ErKBk1sy^%yjS5&!+u%->sPN;fNj2)yc*>LDez|=s@g%6T5IMZB-y73G-FGSuLljl6230%al{G8V53Rmp5)ni(2z8Rjac=2em-R~x9SwD4cieg2Vdj~_r~v~tuf9cWfH6hBpl0So*(DK zw{b41oo!<(DG%=3k_}mH3tTbW};oP|Kq>hz_ z6bbB%>XMK2tF0Tk@tcMVVo7Itzm+mY8%qMg5+=sK_{`fz}_s|US<3AI~CllW>(z(xedvIlxQV$>3^WOf%j6Aglm-XnU_^|N+q%?Z#H@Btv~gU3 zil1sYa(r!(E4Jo+Jrm!O7ss(7vM#Kuoqpf}>9}*xO$U!TJoT#_Jc<6LuS#ti>1W(h z%<;h8cc*KvxIF#o`WxbCtc1J!o_oesRI9z|Sj_;4F*1>2q7|hxy1z1HyU*dTY&g}Ba%TM*{q8i({XP!=vL4+T@?Y1=O&b!hB z5vo{mO2L(fn}DlPpDv}|h*)sH(@szOa=d!)H}79PN-J*ZWR6~)|4RA^c=Z|DX?Nlb z<@B9;*2>*uVn?1ID-c&ue9sW=!w>OI(T!uTRYz9FY+%}NivP4%QPU39H0B~VKDPVt zw0GCup$=!d5?t?6d!7xn`xehYTY{U=(Oyubv9YaSXoNUJJ-Q4X#zy_4X+I{vZ~p#| zX{SMh8tQ}0^&Uz4-;u{)u1`yz<@Tt?C*dk9&HI&n_yA6kgA-@ODI>^uX70;5`uM{? z{W*>4zC-GUykrH0ZyoZ*Abb+wh?bk2pDhBju5ea#S|Br=wnG)hF%{_PyM!w~Z zy&;2Kz9PQ3bPO(!&T!K~fq-^ymD6eXW-_~)&lB=PEJ4rv?B~)bHUounUHyr(BbwOC6o>b4b-Amoj^ zzTu5=rH=lTxjJzD-4K+Cpr;U@<9c`xx7>Bb57TXkDuhFD+wZz7>VFV@CR8;Y;)r)f zF(ze2Mb>K--)=$7CE>o9rSj3p>aA-zAJg=A)Q>va=Ne>x^nv@+-yp)jz5hXjq&MRF z?K~nhA+)8es+k*S)++A`#Mi3#$GYp)CW z@1ATZc0$!oIlH*z1X(msrHveaFkX&3#Zcmovhe5F)0sosd*b>IA69IN(#Azo@TfrM zd?lz7S7pEp*fjHI;&$7#3O~ar6W$2yTPH&ptSS`aq(X{)$v_mjI8>QV04@XpkK;l(>HQuiy(1|!XUpcIZXGiGl3M;Lk*pFby`fc1N_GXrR9oeO~ zZbDJ+c=eBM-~^QQ`ou(~?L}aI{40R6WzN@m=1;g0ILlUDneZOO#AhBb>SNEGJ}o`x zMD@hu=|1j+`v>0(a%M=@Q@HjxE2d~O8p;PX+<+4kXW?ezGDtp-Vkx>Cr>k~_D;+X? zcxm{0i7`RUNN9-aR2 z*kkD%U;0uy7!G>HjOoQ{CL>`be6~BGY8ovVI>^d!8LVMLf-LG9p%TY$?mM++Bhv}- zCMM%=BZ@nZy{bm6(dDsj3Yp2GE4F;BS34+|t^A}@V?#$j-v7em1e{0_eZeyjr#wKs z9P*yqZcV@XXq--@Ult5x*5IRVIypF};X3!tKnoOEj13qrdan*@W0kBCba`5YXG#EM4tr(j=NcPYs=_@BD5`Y zI9kj_mT)6g^(W$YWJ%nCNohD@r9q?E?4*BGvFih;i@4PY}~GxaWl|r6+@5FLM-m23+IQ;QIsSU#4KRJxy4Oa@&Xge7%hg*+$;T8I2 z0*5+g&z_woA?6szap%=*)<*k~Z0I|8g81!q^wFsoI?w|UKMkgTy1VjXB|2~RtaRE= z{i6~+SRJU79D6oqQ~?X4U`bJe5cQLe@Lu?Yck#ze0mU9_z|3be^I?Af@fW|q-pLs; zsgx&?2wY#T{&w0z=Ei(@{U;ONN=9YQ_viS%M}MF^8x*sqgR256*+2dG7&V-Auf-Z`F(DR4sFV)hZB*J1 zJ>vaFAC^W!Y*qBnVB-rSOop{Aoma1Bhna6L)1j+YtPBI|y&)71f_7*-D3ZuvI9*00 z{7qCu&LigMq<^mA30G|!fB!pxGL$Drxu0+0oLdCTZDTk{1MlLILjTKnage7<^JV+ zz*=OPYnVvPJAh|<2C-JWGMGk9{2=sv&xmVe{rnw%xh^&F_*{S@nU$;}ixLS5?ls0IPJ>HZ{G*(Nb7R*<%4c!z%VdPMy$fnvsuO+zmZkFIpov$@ zUIGK;J}cuhHzi4gk8(i&W{erjazPfD?g8!Z#r@% zWx>-XO-@rDe=N;;=;2sqSTmb(InE&>L3TJxj(hDqJPoGt`wbe*)juPGBkc#l?AB8f zUI(@L=HG@Wm!&R7{kL-XnmA4_7SCb{Um?AVd?C)io#+ba88U8HiGMk*9H^9L_Nw=+jZ+ieiP3?a@yocs!hMo( zuMpSq$?K*EJK?Ih$onwk<4(aTeaT>*&Ch3szy*F3l&bK}geCIEtK4Ve(R7@72)T`_ zVK!6WB39{dZTS4jcDnnPzox^-9vkQ6dvh9d6$fF~;;O2S1}0n;m`F^|4Lb?6rwx@O zuVjVt;#6!f5b(p;G&u#CZdblVLlEA)Dxg=!iiQ|c&E*%%s5Nl9((Nfc5mb!*r%52mgLA$5g zJf`Xu@ZNvqt*!0U1FeAuxR;s`#ZS}DP<}5pcDdnmP!Uv@Ifq%ie~>tTH2~>U37xc8QB zO8gsn+Lnf~+HytLho z-nAc3w*GN%Xq! zCAGJ5(?;h$ebO2x?qh;fP$JP4!-&vs~!KzPz3#87uHUl~N|y~x*3=2mj= zc@!Gpa&y0kug~C)r3|wu`#(inX(J@EP>n&QbDdp1TI_5p=lj7*_*`%ix?Z{xTd_{2hE- zZMi{OTOBH8bb_=EY_t7&oP|3E*B@7gOjIOfy4+UzSq|IKdu})T-u#T0(<+C5jvr21 z*HW%0@(0RRT|)D2Ux{0p01xK(J8&oC-0v%c#KruKlgr&$nJr_+O~cxUg5{92yvH9I zD2MWax8WpOhvCZd0`oL)^D-TujpfUyz)PPwf5T?n(%-t?hI6C#Rz1i+%E#{@p!zij zD)EUPHnt-we#3Y<2saY97j7qloi;KW6>Ty(LoeK zC2}1ES(_XWu2~m_ID~K~fAuTe33+tfa~S6jOcz%}Lvo+m7JtK?LcYrgSH(~Kwkm82 zX&TLl0##=L-tEwfa7UIR*!T?&2rC^#v=rD^8`##j!2^6sWO21}pdvs7x1)^^$XfBQ zeb}Lf6Uz0iWKFiDL}zO}K{o z+q81Ic@4;fIsf)K0rx|k#7+jnw2WuDB>Zx30est;m$%?-v;A<2BWgiB62~$=`F(S3 zBu?Fg`vdt&+ycn2IPNZY&``HgyXjZ<9yuFxN^Ggx=L894O;nL4ue;NV4O%lKS_(Eq9tqN-$$6MaQdHw z+XvUX@|(wI;T1wWh;zoffh^`4gZyIf^4rPe{}fcpE@7!BpTb`}9fp$tkHR@%e9HMa zD4Pl*t%SdjR($?KPQa706RUtrKS)jEw?e?L+aaN`MVqNn<##a`8Hs(7WK| zO6{kHK>T?b_jlZNxLe5Ii6I6TkJ`4ey{d%8Z~}QduJ^!=#+k40-Eeu(^4cKe_IZM@ zE@AKVNg1kyjkHapS=Qn*q%-jO#|83 zD-9`Yd zBf@jzKe^&QHwBS?BLP<6{~YcX+>Mk$q^<(xa4-(UIY1>e5|v@N7|=YYSEbss`H1FXU9Vvm3U)22sh#+y0a_w;k9wRh!|VpPi}}UN{RyK z4z;s_Na5!omgpT;c}j#uQ2kp1axF7)64-lj50lpd@E8&s&4qxjjhzomLazva3# zE%yJ7P?b97evjwYKs3_L13LP@(i#%2^6wTm(w3IHTt3RQNsha=khOWYq`k`dZ|%0J zE$_bp1_Js0`uEmopIm0rI-B<^acyxDXbEK-T;M9-dEGqFN992Fw;a2KJktm{>@N`R z*E}Wce*2d(S@lY10k#yVfEAMqu3ehQQ4Pcm#|_0P2GGx$P;iD^g>wmJV=ur-Y^UL- z`XtR6JnI4ZNysI%^|%-Ot((sB7i2U!mCpXYyC;6E+!f3yW0+qvkxpM!fER|XG@cukq+ z8Q0?6Ak9uczZs?$9>(KCxbCI&t9fO=f7SC}Lb#}``N(1Fgx~^r%G&4m_~4&X_?QLp z@@cpR!{h%S_)iP`rv?5$X#vK6Ril9S_EhV5+$p%jaITV@0 Date: Tue, 11 Mar 2025 10:20:42 -0400 Subject: [PATCH 299/391] Adding tooling to use Amazon Bedrock Knowledge Base as a knowledge retreiver --- .../aws/bedrock/knowledge_base/README.md | 159 +++++++++++++++ .../bedrock/knowledge_base/retriever_tool.py | 188 ++++++++++++++++++ 2 files changed, 347 insertions(+) create mode 100644 src/crewai_tools/aws/bedrock/knowledge_base/README.md create mode 100644 src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/README.md b/src/crewai_tools/aws/bedrock/knowledge_base/README.md new file mode 100644 index 000000000..6da54f848 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/knowledge_base/README.md @@ -0,0 +1,159 @@ +# BedrockKBRetrieverTool + +The `BedrockKBRetrieverTool` enables CrewAI agents to retrieve information from Amazon Bedrock Knowledge Bases using natural language queries. + +## Installation + +```bash +pip install 'crewai[tools]' +``` + +## Requirements + +- AWS credentials configured (either through environment variables or AWS CLI) +- `boto3` and `python-dotenv` packages +- Access to Amazon Bedrock Knowledge Base + +## Usage + +Here's how to use the tool with a CrewAI agent: + +```python +from crewai import Agent, Task, Crew +from crewai_tools.aws.bedrock.knowledge_base.retriever_tool import BedrockKBRetrieverTool + +# Initialize the tool +kb_tool = BedrockKBRetrieverTool( + knowledge_base_id="your-kb-id", + number_of_results=5 +) + +# Create a CrewAI agent that uses the tool +researcher = Agent( + role='Knowledge Base Researcher', + goal='Find information about company policies', + backstory='I am a researcher specialized in retrieving and analyzing company documentation.', + tools=[kb_tool], + verbose=True +) + +# Create a task for the agent +research_task = Task( + description="Find our company's remote work policy and summarize the key points.", + agent=researcher +) + +# Create a crew with the agent +crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=2 +) + +# Run the crew +result = crew.kickoff() +print(result) +``` + +## Tool Arguments + +| Argument | Type | Required | Default | Description | +|----------|------|----------|---------|-------------| +| knowledge_base_id | str | Yes | None | The unique identifier of the knowledge base (0-10 alphanumeric characters) | +| number_of_results | int | No | 5 | Maximum number of results to return | +| retrieval_configuration | dict | No | None | Custom configurations for the knowledge base query | +| guardrail_configuration | dict | No | None | Content filtering settings | +| next_token | str | No | None | Token for pagination | + +## Environment Variables + +```bash +BEDROCK_KB_ID=your-knowledge-base-id # Alternative to passing knowledge_base_id +AWS_REGION=your-aws-region # Defaults to us-east-1 +AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication +AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication +``` + +## Response Format + +The tool returns results in JSON format: + +```json +{ + "results": [ + { + "content": "Retrieved text content", + "content_type": "text", + "source_type": "S3", + "source_uri": "s3://bucket/document.pdf", + "score": 0.95, + "metadata": { + "additional": "metadata" + } + } + ], + "nextToken": "pagination-token", + "guardrailAction": "NONE" +} +``` + +## Advanced Usage + +### Custom Retrieval Configuration + +```python +kb_tool = BedrockKBRetrieverTool( + knowledge_base_id="your-kb-id", + retrieval_configuration={ + "vectorSearchConfiguration": { + "numberOfResults": 10, + "overrideSearchType": "HYBRID" + } + } +) + +policy_expert = Agent( + role='Policy Expert', + goal='Analyze company policies in detail', + backstory='I am an expert in corporate policy analysis with deep knowledge of regulatory requirements.', + tools=[kb_tool] +) +``` + +## Supported Data Sources + +- Amazon S3 +- Confluence +- Salesforce +- SharePoint +- Web pages +- Custom document locations +- Amazon Kendra +- SQL databases + +## Use Cases + +### Enterprise Knowledge Integration +- Enable CrewAI agents to access your organization's proprietary knowledge without exposing sensitive data +- Allow agents to make decisions based on your company's specific policies, procedures, and documentation +- Create agents that can answer questions based on your internal documentation while maintaining data security + +### Specialized Domain Knowledge +- Connect CrewAI agents to domain-specific knowledge bases (legal, medical, technical) without retraining models +- Leverage existing knowledge repositories that are already maintained in your AWS environment +- Combine CrewAI's reasoning with domain-specific information from your knowledge bases + +### Data-Driven Decision Making +- Ground CrewAI agent responses in your actual company data rather than general knowledge +- Ensure agents provide recommendations based on your specific business context and documentation +- Reduce hallucinations by retrieving factual information from your knowledge bases + +### Scalable Information Access +- Access terabytes of organizational knowledge without embedding it all into your models +- Dynamically query only the relevant information needed for specific tasks +- Leverage AWS's scalable infrastructure to handle large knowledge bases efficiently + +### Compliance and Governance +- Ensure CrewAI agents provide responses that align with your company's approved documentation +- Create auditable trails of information sources used by your agents +- Maintain control over what information sources your agents can access \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py new file mode 100644 index 000000000..c01e83cff --- /dev/null +++ b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py @@ -0,0 +1,188 @@ +from typing import Type, Optional, List, Dict, Any +import os +import json +from dotenv import load_dotenv + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import boto3 +from botocore.exceptions import ClientError + +# Load environment variables from .env file +load_dotenv() + + +class BedrockKBRetrieverToolInput(BaseModel): + """Input schema for BedrockKBRetrieverTool.""" + query: str = Field(..., description="The query to retrieve information from the knowledge base") + + +class BedrockKBRetrieverTool(BaseTool): + name: str = "Bedrock Knowledge Base Retriever Tool" + description: str = "Retrieves information from an Amazon Bedrock Knowledge Base given a query" + args_schema: Type[BaseModel] = BedrockKBRetrieverToolInput + knowledge_base_id: str = None + number_of_results: Optional[int] = 5 + retrieval_configuration: Optional[Dict[str, Any]] = None + guardrail_configuration: Optional[Dict[str, Any]] = None + next_token: Optional[str] = None + + def __init__( + self, + knowledge_base_id: str = None, + number_of_results: Optional[int] = 5, + retrieval_configuration: Optional[Dict[str, Any]] = None, + guardrail_configuration: Optional[Dict[str, Any]] = None, + next_token: Optional[str] = None, + **kwargs + ): + """Initialize the BedrockKBRetrieverTool with knowledge base configuration. + + Args: + knowledge_base_id (str): The unique identifier of the knowledge base to query (length: 0-10, pattern: ^[0-9a-zA-Z]+$) + number_of_results (Optional[int], optional): The maximum number of results to return. Defaults to 5. + retrieval_configuration (Optional[Dict[str, Any]], optional): Configurations for the knowledge base query and retrieval process. Defaults to None. + guardrail_configuration (Optional[Dict[str, Any]], optional): Guardrail settings. Defaults to None. + next_token (Optional[str], optional): Token for retrieving the next batch of results. Defaults to None. + """ + super().__init__(**kwargs) + + # Get knowledge_base_id from environment variable if not provided + self.knowledge_base_id = knowledge_base_id or os.getenv('BEDROCK_KB_ID') + self.number_of_results = number_of_results + + # Initialize retrieval_configuration with number_of_results if provided + if retrieval_configuration is None and number_of_results is not None: + self.retrieval_configuration = { + "vectorSearchConfiguration": { + "numberOfResults": number_of_results + } + } + else: + self.retrieval_configuration = retrieval_configuration + + self.guardrail_configuration = guardrail_configuration + self.next_token = next_token + + # Validate parameters + self._validate_parameters() + + # Update the description to include the knowledge base details + self.description = f"Retrieves information from Amazon Bedrock Knowledge Base '{self.knowledge_base_id}' given a query" + + def _validate_parameters(self): + """Validate the parameters according to AWS API requirements.""" + # Validate knowledge_base_id + if not self.knowledge_base_id or len(self.knowledge_base_id) > 10 or not all(c.isalnum() for c in self.knowledge_base_id): + raise ValueError("knowledge_base_id must be 0-10 alphanumeric characters") + + # Validate next_token if provided + if self.next_token and (len(self.next_token) < 1 or len(self.next_token) > 2048 or ' ' in self.next_token): + raise ValueError("next_token must be 1-2048 characters and match pattern ^\\S*$") + + def _run(self, query: str) -> str: + try: + # Initialize the Bedrock Agent Runtime client + bedrock_agent_runtime = boto3.client( + 'bedrock-agent-runtime', + region_name=os.getenv('AWS_REGION', os.getenv('AWS_DEFAULT_REGION', 'us-east-1')), + # AWS SDK will automatically use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY from environment + ) + + # Prepare the request parameters + retrieve_params = { + 'knowledgeBaseId': self.knowledge_base_id, + 'retrievalQuery': { + 'text': query + } + } + + # Add optional parameters if provided + if self.retrieval_configuration: + retrieve_params['retrievalConfiguration'] = self.retrieval_configuration + + if self.guardrail_configuration: + retrieve_params['guardrailConfiguration'] = self.guardrail_configuration + + if self.next_token: + retrieve_params['nextToken'] = self.next_token + + # Make the retrieve API call + response = bedrock_agent_runtime.retrieve(**retrieve_params) + + # Process the response + results = [] + for result in response.get('retrievalResults', []): + # Extract content + content_obj = result.get('content', {}) + content = content_obj.get('text', '') + content_type = content_obj.get('type', 'text') + + # Extract location information + location = result.get('location', {}) + location_type = location.get('type', 'unknown') + source_uri = None + + # Map for location types and their URI fields + location_mapping = { + 's3Location': {'field': 'uri', 'type': 'S3'}, + 'confluenceLocation': {'field': 'url', 'type': 'Confluence'}, + 'salesforceLocation': {'field': 'url', 'type': 'Salesforce'}, + 'sharePointLocation': {'field': 'url', 'type': 'SharePoint'}, + 'webLocation': {'field': 'url', 'type': 'Web'}, + 'customDocumentLocation': {'field': 'id', 'type': 'CustomDocument'}, + 'kendraDocumentLocation': {'field': 'uri', 'type': 'KendraDocument'}, + 'sqlLocation': {'field': 'query', 'type': 'SQL'} + } + + # Extract the URI based on location type + for loc_key, config in location_mapping.items(): + if loc_key in location: + source_uri = location[loc_key].get(config['field']) + if not location_type or location_type == 'unknown': + location_type = config['type'] + break + + # Include score if available + score = result.get('score') + + # Include metadata if available + metadata = result.get('metadata') + + # Create a well-formed JSON object for each result + result_object = { + 'content': content, + 'content_type': content_type, + 'source_type': location_type, + 'source_uri': source_uri + } + + # Add score if available + if score is not None: + result_object['score'] = score + + # Add metadata if available + if metadata: + result_object['metadata'] = metadata + + # Add the JSON object to results + results.append(result_object) + + # Include nextToken in the response if available + response_object = {} + if results: + response_object["results"] = results + else: + response_object["message"] = "No results found for the given query." + + if "nextToken" in response: + response_object["nextToken"] = response["nextToken"] + + if "guardrailAction" in response: + response_object["guardrailAction"] = response["guardrailAction"] + + # Return the results as a JSON string + return json.dumps(response_object, indent=2) + + except ClientError as e: + return f"Error retrieving from Bedrock Knowledge Base: {str(e)}" \ No newline at end of file From d47adfc34ae9e9b40b27b5744de0f0879609bd53 Mon Sep 17 00:00:00 2001 From: Raju Rangan Date: Tue, 11 Mar 2025 10:21:30 -0400 Subject: [PATCH 300/391] Adding tooling to use Amazon Bedrock Agents as enternal agent, enbaling distributed agentic capabilities --- src/crewai_tools/aws/bedrock/agents/README.md | 181 ++++++++++++++++++ .../aws/bedrock/agents/invoke_agent_tool.py | 140 ++++++++++++++ 2 files changed, 321 insertions(+) create mode 100644 src/crewai_tools/aws/bedrock/agents/README.md create mode 100644 src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py diff --git a/src/crewai_tools/aws/bedrock/agents/README.md b/src/crewai_tools/aws/bedrock/agents/README.md new file mode 100644 index 000000000..7aa43b65d --- /dev/null +++ b/src/crewai_tools/aws/bedrock/agents/README.md @@ -0,0 +1,181 @@ +# BedrockInvokeAgentTool + +The `BedrockInvokeAgentTool` enables CrewAI agents to invoke Amazon Bedrock Agents and leverage their capabilities within your workflows. + +## Installation + +```bash +pip install 'crewai[tools]' +``` + +## Requirements + +- AWS credentials configured (either through environment variables or AWS CLI) +- `boto3` and `python-dotenv` packages +- Access to Amazon Bedrock Agents + +## Usage + +Here's how to use the tool with a CrewAI agent: + +```python +from crewai import Agent, Task, Crew +from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool + +# Initialize the tool +agent_tool = BedrockInvokeAgentTool( + agent_id="your-agent-id", + agent_alias_id="your-agent-alias-id" +) + +# Create a CrewAI agent that uses the tool +aws_expert = Agent( + role='AWS Service Expert', + goal='Help users understand AWS services and quotas', + backstory='I am an expert in AWS services and can provide detailed information about them.', + tools=[agent_tool], + verbose=True +) + +# Create a task for the agent +quota_task = Task( + description="Find out the current service quotas for EC2 in us-west-2 and explain any recent changes.", + agent=aws_expert +) + +# Create a crew with the agent +crew = Crew( + agents=[aws_expert], + tasks=[quota_task], + verbose=2 +) + +# Run the crew +result = crew.kickoff() +print(result) +``` + +## Tool Arguments + +| Argument | Type | Required | Default | Description | +|----------|------|----------|---------|-------------| +| agent_id | str | Yes | None | The unique identifier of the Bedrock agent | +| agent_alias_id | str | Yes | None | The unique identifier of the agent alias | +| session_id | str | No | timestamp | The unique identifier of the session | +| enable_trace | bool | No | False | Whether to enable trace for debugging | +| end_session | bool | No | False | Whether to end the session after invocation | +| description | str | No | None | Custom description for the tool | + +## Environment Variables + +```bash +BEDROCK_AGENT_ID=your-agent-id # Alternative to passing agent_id +BEDROCK_AGENT_ALIAS_ID=your-agent-alias-id # Alternative to passing agent_alias_id +AWS_REGION=your-aws-region # Defaults to us-west-2 +AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication +AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication +``` + +## Advanced Usage + +### Multi-Agent Workflow with Session Management + +```python +from crewai import Agent, Task, Crew, Process +from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool + +# Initialize tools with session management +initial_tool = BedrockInvokeAgentTool( + agent_id="your-agent-id", + agent_alias_id="your-agent-alias-id", + session_id="custom-session-id" +) + +followup_tool = BedrockInvokeAgentTool( + agent_id="your-agent-id", + agent_alias_id="your-agent-alias-id", + session_id="custom-session-id" +) + +final_tool = BedrockInvokeAgentTool( + agent_id="your-agent-id", + agent_alias_id="your-agent-alias-id", + session_id="custom-session-id", + end_session=True +) + +# Create agents for different stages +researcher = Agent( + role='AWS Service Researcher', + goal='Gather information about AWS services', + backstory='I am specialized in finding detailed AWS service information.', + tools=[initial_tool] +) + +analyst = Agent( + role='Service Compatibility Analyst', + goal='Analyze service compatibility and requirements', + backstory='I analyze AWS services for compatibility and integration possibilities.', + tools=[followup_tool] +) + +summarizer = Agent( + role='Technical Documentation Writer', + goal='Create clear technical summaries', + backstory='I specialize in creating clear, concise technical documentation.', + tools=[final_tool] +) + +# Create tasks +research_task = Task( + description="Find all available AWS services in us-west-2 region.", + agent=researcher +) + +analysis_task = Task( + description="Analyze which services support IPv6 and their implementation requirements.", + agent=analyst +) + +summary_task = Task( + description="Create a summary of IPv6-compatible services and their key features.", + agent=summarizer +) + +# Create a crew with the agents and tasks +crew = Crew( + agents=[researcher, analyst, summarizer], + tasks=[research_task, analysis_task, summary_task], + process=Process.sequential, + verbose=2 +) + +# Run the crew +result = crew.kickoff() +``` + +## Use Cases + +### Hybrid Multi-Agent Collaborations +- Create workflows where CrewAI agents collaborate with managed Bedrock agents running as services in AWS +- Enable scenarios where sensitive data processing happens within your AWS environment while other agents operate externally +- Bridge on-premises CrewAI agents with cloud-based Bedrock agents for distributed intelligence workflows + +### Data Sovereignty and Compliance +- Keep data-sensitive agentic workflows within your AWS environment while allowing external CrewAI agents to orchestrate tasks +- Maintain compliance with data residency requirements by processing sensitive information only within your AWS account +- Enable secure multi-agent collaborations where some agents cannot access your organization's private data + +### Seamless AWS Service Integration +- Access any AWS service through Amazon Bedrock Actions without writing complex integration code +- Enable CrewAI agents to interact with AWS services through natural language requests +- Leverage pre-built Bedrock agent capabilities to interact with AWS services like Bedrock Knowledge Bases, Lambda, and more + +### Scalable Hybrid Agent Architectures +- Offload computationally intensive tasks to managed Bedrock agents while lightweight tasks run in CrewAI +- Scale agent processing by distributing workloads between local CrewAI agents and cloud-based Bedrock agents + +### Cross-Organizational Agent Collaboration +- Enable secure collaboration between your organization's CrewAI agents and partner organizations' Bedrock agents +- Create workflows where external expertise from Bedrock agents can be incorporated without exposing sensitive data +- Build agent ecosystems that span organizational boundaries while maintaining security and data control \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py new file mode 100644 index 000000000..41ecad75b --- /dev/null +++ b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py @@ -0,0 +1,140 @@ +from typing import Type, Optional, Dict, Any +import os +import json +import uuid +import time +from datetime import datetime, timezone +from dotenv import load_dotenv + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import boto3 +from botocore.exceptions import ClientError + +# Load environment variables from .env file +load_dotenv() + + +class BedrockInvokeAgentToolInput(BaseModel): + """Input schema for BedrockInvokeAgentTool.""" + query: str = Field(..., description="The query to send to the agent") + + +class BedrockInvokeAgentTool(BaseTool): + name: str = "Bedrock Agent Invoke Tool" + description: str = "An agent responsible for policy analysis." + args_schema: Type[BaseModel] = BedrockInvokeAgentToolInput + agent_id: str = None + agent_alias_id: str = None + session_id: str = None + enable_trace: bool = False + end_session: bool = False + + def __init__( + self, + agent_id: str = None, + agent_alias_id: str = None, + session_id: str = None, + enable_trace: bool = False, + end_session: bool = False, + description: Optional[str] = None, + **kwargs + ): + """Initialize the BedrockInvokeAgentTool with agent configuration. + + Args: + agent_id (str): The unique identifier of the Bedrock agent + agent_alias_id (str): The unique identifier of the agent alias + session_id (str): The unique identifier of the session + enable_trace (bool): Whether to enable trace for the agent invocation + end_session (bool): Whether to end the session with the agent + description (Optional[str]): Custom description for the tool + """ + super().__init__(**kwargs) + + # Get values from environment variables if not provided + self.agent_id = agent_id or os.getenv('BEDROCK_AGENT_ID') + self.agent_alias_id = agent_alias_id or os.getenv('BEDROCK_AGENT_ALIAS_ID') + self.session_id = session_id or str(int(time.time())) # Use timestamp as session ID if not provided + self.enable_trace = enable_trace + self.end_session = end_session + + # Update the description if provided + if description: + self.description = description + + def _run(self, query: str) -> str: + try: + # Initialize the Bedrock Agent Runtime client + bedrock_agent = boto3.client( + "bedrock-agent-runtime", + region_name=os.getenv('AWS_REGION', os.getenv('AWS_DEFAULT_REGION', 'us-west-2')) + ) + + # Format the prompt with current time + current_utc = datetime.now(timezone.utc) + prompt = f""" +The current time is: {current_utc} + +Below is the users query or task. Complete it and answer it consicely and to the point: +{query} +""" + + # Invoke the agent + response = bedrock_agent.invoke_agent( + agentId=self.agent_id, + agentAliasId=self.agent_alias_id, + sessionId=self.session_id, + inputText=prompt, + enableTrace=self.enable_trace, + endSession=self.end_session + ) + + # Process the response + completion = "" + + # Check if response contains a completion field + if 'completion' in response: + # Process streaming response format + for event in response.get('completion', []): + if 'chunk' in event and 'bytes' in event['chunk']: + chunk_bytes = event['chunk']['bytes'] + if isinstance(chunk_bytes, (bytes, bytearray)): + completion += chunk_bytes.decode('utf-8') + else: + completion += str(chunk_bytes) + + # If no completion found in streaming format, try direct format + if not completion and 'chunk' in response and 'bytes' in response['chunk']: + chunk_bytes = response['chunk']['bytes'] + if isinstance(chunk_bytes, (bytes, bytearray)): + completion = chunk_bytes.decode('utf-8') + else: + completion = str(chunk_bytes) + + # If still no completion, return debug info + if not completion: + debug_info = { + "error": "Could not extract completion from response", + "response_keys": list(response.keys()) + } + + # Add more debug info + if 'chunk' in response: + debug_info["chunk_keys"] = list(response['chunk'].keys()) + + return json.dumps(debug_info, indent=2) + + return completion + + except ClientError as e: + error_code = "Unknown" + error_message = str(e) + + # Try to extract error code if available + if hasattr(e, 'response') and 'Error' in e.response and 'Code' in e.response['Error']: + error_code = e.response['Error']['Code'] + + return f"Error invoking Bedrock Agent ({error_code}): {error_message}" + except Exception as e: + return f"Error: {str(e)}" \ No newline at end of file From 292adef7baac9b44d7cc6cb8b0f3b9ec2eb7310f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kopeck=C3=BD?= Date: Tue, 11 Mar 2025 18:02:15 +0100 Subject: [PATCH 301/391] Update README.md --- src/crewai_tools/tools/apify_actors_tool/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/src/crewai_tools/tools/apify_actors_tool/README.md b/src/crewai_tools/tools/apify_actors_tool/README.md index 7e465037c..c00891deb 100644 --- a/src/crewai_tools/tools/apify_actors_tool/README.md +++ b/src/crewai_tools/tools/apify_actors_tool/README.md @@ -91,5 +91,6 @@ The `ApifyActorsTool` requires these inputs to work: ## Resources - **[Apify](https://apify.com/)**: Explore the Apify platform. +- **[How to build an AI agent on Apify](https://blog.apify.com/how-to-build-an-ai-agent/)** - A complete step-by-step guide to creating, publishing, and monetizing AI agents on the Apify platform. - **[RAG Web Browser Actor](https://apify.com/apify/rag-web-browser)**: A popular Actor for web search for LLMs. - **[CrewAI Integration Guide](https://docs.apify.com/platform/integrations/crewai)**: Follow the official guide for integrating Apify and CrewAI. From db309ca1ae5f26fcffd297b5c2112ee348d136fd Mon Sep 17 00:00:00 2001 From: Raju Rangan Date: Tue, 11 Mar 2025 16:46:12 -0400 Subject: [PATCH 302/391] - Add custom exceptions for better error handling - Add parameter validation for Bedrock tools - Improve response processing and debug information - Maintain backward compatibility with existing implementations --- .../aws/bedrock/agents/invoke_agent_tool.py | 42 +++- src/crewai_tools/aws/bedrock/exceptions.py | 17 ++ .../bedrock/knowledge_base/retriever_tool.py | 204 +++++++++++------- 3 files changed, 184 insertions(+), 79 deletions(-) create mode 100644 src/crewai_tools/aws/bedrock/exceptions.py diff --git a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py index 41ecad75b..6c43480c0 100644 --- a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py +++ b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py @@ -11,6 +11,9 @@ from pydantic import BaseModel, Field import boto3 from botocore.exceptions import ClientError +# Import custom exceptions +from ..exceptions import BedrockAgentError, BedrockValidationError + # Load environment variables from .env file load_dotenv() @@ -62,6 +65,31 @@ class BedrockInvokeAgentTool(BaseTool): # Update the description if provided if description: self.description = description + + # Validate parameters + self._validate_parameters() + + def _validate_parameters(self): + """Validate the parameters according to AWS API requirements.""" + try: + # Validate agent_id + if not self.agent_id: + raise BedrockValidationError("agent_id cannot be empty") + if not isinstance(self.agent_id, str): + raise BedrockValidationError("agent_id must be a string") + + # Validate agent_alias_id + if not self.agent_alias_id: + raise BedrockValidationError("agent_alias_id cannot be empty") + if not isinstance(self.agent_alias_id, str): + raise BedrockValidationError("agent_alias_id must be a string") + + # Validate session_id if provided + if self.session_id and not isinstance(self.session_id, str): + raise BedrockValidationError("session_id must be a string") + + except BedrockValidationError as e: + raise BedrockValidationError(f"Parameter validation failed: {str(e)}") def _run(self, query: str) -> str: try: @@ -123,7 +151,7 @@ Below is the users query or task. Complete it and answer it consicely and to the if 'chunk' in response: debug_info["chunk_keys"] = list(response['chunk'].keys()) - return json.dumps(debug_info, indent=2) + raise BedrockAgentError(f"Failed to extract completion: {json.dumps(debug_info, indent=2)}") return completion @@ -132,9 +160,13 @@ Below is the users query or task. Complete it and answer it consicely and to the error_message = str(e) # Try to extract error code if available - if hasattr(e, 'response') and 'Error' in e.response and 'Code' in e.response['Error']: - error_code = e.response['Error']['Code'] + if hasattr(e, 'response') and 'Error' in e.response: + error_code = e.response['Error'].get('Code', 'Unknown') + error_message = e.response['Error'].get('Message', str(e)) - return f"Error invoking Bedrock Agent ({error_code}): {error_message}" + raise BedrockAgentError(f"Error ({error_code}): {error_message}") + except BedrockAgentError: + # Re-raise BedrockAgentError exceptions + raise except Exception as e: - return f"Error: {str(e)}" \ No newline at end of file + raise BedrockAgentError(f"Unexpected error: {str(e)}") \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/exceptions.py b/src/crewai_tools/aws/bedrock/exceptions.py new file mode 100644 index 000000000..d1aa2623c --- /dev/null +++ b/src/crewai_tools/aws/bedrock/exceptions.py @@ -0,0 +1,17 @@ +"""Custom exceptions for AWS Bedrock integration.""" + +class BedrockError(Exception): + """Base exception for Bedrock-related errors.""" + pass + +class BedrockAgentError(BedrockError): + """Exception raised for errors in the Bedrock Agent operations.""" + pass + +class BedrockKnowledgeBaseError(BedrockError): + """Exception raised for errors in the Bedrock Knowledge Base operations.""" + pass + +class BedrockValidationError(BedrockError): + """Exception raised for validation errors in Bedrock operations.""" + pass \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py index c01e83cff..55a15b621 100644 --- a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py +++ b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py @@ -8,6 +8,9 @@ from pydantic import BaseModel, Field import boto3 from botocore.exceptions import ClientError +# Import custom exceptions +from ..exceptions import BedrockKnowledgeBaseError, BedrockValidationError + # Load environment variables from .env file load_dotenv() @@ -39,7 +42,7 @@ class BedrockKBRetrieverTool(BaseTool): """Initialize the BedrockKBRetrieverTool with knowledge base configuration. Args: - knowledge_base_id (str): The unique identifier of the knowledge base to query (length: 0-10, pattern: ^[0-9a-zA-Z]+$) + knowledge_base_id (str): The unique identifier of the knowledge base to query number_of_results (Optional[int], optional): The maximum number of results to return. Defaults to 5. retrieval_configuration (Optional[Dict[str, Any]], optional): Configurations for the knowledge base query and retrieval process. Defaults to None. guardrail_configuration (Optional[Dict[str, Any]], optional): Guardrail settings. Defaults to None. @@ -50,19 +53,14 @@ class BedrockKBRetrieverTool(BaseTool): # Get knowledge_base_id from environment variable if not provided self.knowledge_base_id = knowledge_base_id or os.getenv('BEDROCK_KB_ID') self.number_of_results = number_of_results - - # Initialize retrieval_configuration with number_of_results if provided - if retrieval_configuration is None and number_of_results is not None: - self.retrieval_configuration = { - "vectorSearchConfiguration": { - "numberOfResults": number_of_results - } - } - else: - self.retrieval_configuration = retrieval_configuration - self.guardrail_configuration = guardrail_configuration self.next_token = next_token + + # Initialize retrieval_configuration with provided parameters or use the one provided + if retrieval_configuration is None: + self.retrieval_configuration = self._build_retrieval_configuration() + else: + self.retrieval_configuration = retrieval_configuration # Validate parameters self._validate_parameters() @@ -70,15 +68,115 @@ class BedrockKBRetrieverTool(BaseTool): # Update the description to include the knowledge base details self.description = f"Retrieves information from Amazon Bedrock Knowledge Base '{self.knowledge_base_id}' given a query" + def _build_retrieval_configuration(self) -> Dict[str, Any]: + """Build the retrieval configuration based on provided parameters. + + Returns: + Dict[str, Any]: The constructed retrieval configuration + """ + vector_search_config = {} + + # Add number of results if provided + if self.number_of_results is not None: + vector_search_config["numberOfResults"] = self.number_of_results + + return {"vectorSearchConfiguration": vector_search_config} + def _validate_parameters(self): """Validate the parameters according to AWS API requirements.""" - # Validate knowledge_base_id - if not self.knowledge_base_id or len(self.knowledge_base_id) > 10 or not all(c.isalnum() for c in self.knowledge_base_id): - raise ValueError("knowledge_base_id must be 0-10 alphanumeric characters") + try: + # Validate knowledge_base_id + if not self.knowledge_base_id: + raise BedrockValidationError("knowledge_base_id cannot be empty") + if not isinstance(self.knowledge_base_id, str): + raise BedrockValidationError("knowledge_base_id must be a string") + if len(self.knowledge_base_id) > 10: + raise BedrockValidationError("knowledge_base_id must be 10 characters or less") + if not all(c.isalnum() for c in self.knowledge_base_id): + raise BedrockValidationError("knowledge_base_id must contain only alphanumeric characters") + + # Validate next_token if provided + if self.next_token: + if not isinstance(self.next_token, str): + raise BedrockValidationError("next_token must be a string") + if len(self.next_token) < 1 or len(self.next_token) > 2048: + raise BedrockValidationError("next_token must be between 1 and 2048 characters") + if ' ' in self.next_token: + raise BedrockValidationError("next_token cannot contain spaces") + + # Validate number_of_results if provided + if self.number_of_results is not None: + if not isinstance(self.number_of_results, int): + raise BedrockValidationError("number_of_results must be an integer") + if self.number_of_results < 1: + raise BedrockValidationError("number_of_results must be greater than 0") + + except BedrockValidationError as e: + raise BedrockValidationError(f"Parameter validation failed: {str(e)}") + + def _process_retrieval_result(self, result: Dict[str, Any]) -> Dict[str, Any]: + """Process a single retrieval result from Bedrock Knowledge Base. - # Validate next_token if provided - if self.next_token and (len(self.next_token) < 1 or len(self.next_token) > 2048 or ' ' in self.next_token): - raise ValueError("next_token must be 1-2048 characters and match pattern ^\\S*$") + Args: + result (Dict[str, Any]): Raw result from Bedrock Knowledge Base + + Returns: + Dict[str, Any]: Processed result with standardized format + """ + # Extract content + content_obj = result.get('content', {}) + content = content_obj.get('text', '') + content_type = content_obj.get('type', 'text') + + # Extract location information + location = result.get('location', {}) + location_type = location.get('type', 'unknown') + source_uri = None + + # Map for location types and their URI fields + location_mapping = { + 's3Location': {'field': 'uri', 'type': 'S3'}, + 'confluenceLocation': {'field': 'url', 'type': 'Confluence'}, + 'salesforceLocation': {'field': 'url', 'type': 'Salesforce'}, + 'sharePointLocation': {'field': 'url', 'type': 'SharePoint'}, + 'webLocation': {'field': 'url', 'type': 'Web'}, + 'customDocumentLocation': {'field': 'id', 'type': 'CustomDocument'}, + 'kendraDocumentLocation': {'field': 'uri', 'type': 'KendraDocument'}, + 'sqlLocation': {'field': 'query', 'type': 'SQL'} + } + + # Extract the URI based on location type + for loc_key, config in location_mapping.items(): + if loc_key in location: + source_uri = location[loc_key].get(config['field']) + if not location_type or location_type == 'unknown': + location_type = config['type'] + break + + # Create result object + result_object = { + 'content': content, + 'content_type': content_type, + 'source_type': location_type, + 'source_uri': source_uri + } + + # Add optional fields if available + if 'score' in result: + result_object['score'] = result['score'] + + if 'metadata' in result: + result_object['metadata'] = result['metadata'] + + # Handle byte content if present + if 'byteContent' in content_obj: + result_object['byte_content'] = content_obj['byteContent'] + + # Handle row content if present + if 'row' in content_obj: + result_object['row_content'] = content_obj['row'] + + return result_object def _run(self, query: str) -> str: try: @@ -113,62 +211,10 @@ class BedrockKBRetrieverTool(BaseTool): # Process the response results = [] for result in response.get('retrievalResults', []): - # Extract content - content_obj = result.get('content', {}) - content = content_obj.get('text', '') - content_type = content_obj.get('type', 'text') - - # Extract location information - location = result.get('location', {}) - location_type = location.get('type', 'unknown') - source_uri = None - - # Map for location types and their URI fields - location_mapping = { - 's3Location': {'field': 'uri', 'type': 'S3'}, - 'confluenceLocation': {'field': 'url', 'type': 'Confluence'}, - 'salesforceLocation': {'field': 'url', 'type': 'Salesforce'}, - 'sharePointLocation': {'field': 'url', 'type': 'SharePoint'}, - 'webLocation': {'field': 'url', 'type': 'Web'}, - 'customDocumentLocation': {'field': 'id', 'type': 'CustomDocument'}, - 'kendraDocumentLocation': {'field': 'uri', 'type': 'KendraDocument'}, - 'sqlLocation': {'field': 'query', 'type': 'SQL'} - } - - # Extract the URI based on location type - for loc_key, config in location_mapping.items(): - if loc_key in location: - source_uri = location[loc_key].get(config['field']) - if not location_type or location_type == 'unknown': - location_type = config['type'] - break - - # Include score if available - score = result.get('score') - - # Include metadata if available - metadata = result.get('metadata') - - # Create a well-formed JSON object for each result - result_object = { - 'content': content, - 'content_type': content_type, - 'source_type': location_type, - 'source_uri': source_uri - } - - # Add score if available - if score is not None: - result_object['score'] = score - - # Add metadata if available - if metadata: - result_object['metadata'] = metadata - - # Add the JSON object to results - results.append(result_object) + processed_result = self._process_retrieval_result(result) + results.append(processed_result) - # Include nextToken in the response if available + # Build the response object response_object = {} if results: response_object["results"] = results @@ -185,4 +231,14 @@ class BedrockKBRetrieverTool(BaseTool): return json.dumps(response_object, indent=2) except ClientError as e: - return f"Error retrieving from Bedrock Knowledge Base: {str(e)}" \ No newline at end of file + error_code = "Unknown" + error_message = str(e) + + # Try to extract error code if available + if hasattr(e, 'response') and 'Error' in e.response: + error_code = e.response['Error'].get('Code', 'Unknown') + error_message = e.response['Error'].get('Message', str(e)) + + raise BedrockKnowledgeBaseError(f"Error ({error_code}): {error_message}") + except Exception as e: + raise BedrockKnowledgeBaseError(f"Unexpected error: {str(e)}") \ No newline at end of file From 0c3140e7588ceb150216deb70f4e98f03352de3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 14 Mar 2025 07:39:52 -0700 Subject: [PATCH 303/391] new version --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/databricks_query_tool/README.md | 66 ++ .../tools/databricks_query_tool/__init__.py | 0 .../databricks_query_tool.py | 736 ++++++++++++++++++ 5 files changed, 804 insertions(+) create mode 100644 src/crewai_tools/tools/databricks_query_tool/README.md create mode 100644 src/crewai_tools/tools/databricks_query_tool/__init__.py create mode 100644 src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 4d2ea7e16..79b0272b9 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -7,6 +7,7 @@ from .tools import ( ComposioTool, CSVSearchTool, DallETool, + DatabricksQueryTool, DirectoryReadTool, DirectorySearchTool, DOCXSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4a9786fe6..25992c179 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -6,6 +6,7 @@ from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool from .csv_search_tool.csv_search_tool import CSVSearchTool from .dalle_tool.dalle_tool import DallETool +from .databricks_query_tool.databricks_query_tool import DatabricksQueryTool from .directory_read_tool.directory_read_tool import DirectoryReadTool from .directory_search_tool.directory_search_tool import DirectorySearchTool from .docx_search_tool.docx_search_tool import DOCXSearchTool diff --git a/src/crewai_tools/tools/databricks_query_tool/README.md b/src/crewai_tools/tools/databricks_query_tool/README.md new file mode 100644 index 000000000..b5f4880c6 --- /dev/null +++ b/src/crewai_tools/tools/databricks_query_tool/README.md @@ -0,0 +1,66 @@ +# Databricks Query Tool + +## Description + +This tool allows AI agents to execute SQL queries against Databricks workspace tables and retrieve the results. It provides a simple interface for querying data from Databricks tables using SQL, making it easy for agents to access and analyze data stored in Databricks. + +## Installation + +Install the crewai_tools package with the databricks extra: + +```shell +pip install 'crewai[tools]' 'databricks-sdk' +``` + +## Authentication + +The tool requires Databricks authentication credentials. You can provide these in two ways: + +1. **Using Databricks CLI profile**: + - Set the `DATABRICKS_CONFIG_PROFILE` environment variable to your profile name. + +2. **Using direct credentials**: + - Set both `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables. + +Example: +```shell +export DATABRICKS_HOST="https://your-workspace.cloud.databricks.com" +export DATABRICKS_TOKEN="dapi1234567890abcdef" +``` + +## Usage + +```python +from crewai_tools import DatabricksQueryTool + +# Basic usage +databricks_tool = DatabricksQueryTool() + +# With default parameters for catalog, schema, and warehouse +databricks_tool = DatabricksQueryTool( + default_catalog="my_catalog", + default_schema="my_schema", + default_warehouse_id="warehouse_id" +) + +# Example in a CrewAI agent +@agent +def data_analyst(self) -> Agent: + return Agent( + config=self.agents_config["data_analyst"], + allow_delegation=False, + tools=[databricks_tool] + ) +``` + +## Parameters + +When executing queries, you can provide the following parameters: + +- `query` (required): SQL query to execute against the Databricks workspace +- `catalog` (optional): Databricks catalog name +- `schema` (optional): Databricks schema name +- `warehouse_id` (optional): Databricks SQL warehouse ID +- `row_limit` (optional): Maximum number of rows to return (default: 1000) + +If not provided, the tool will use the default values set during initialization. \ No newline at end of file diff --git a/src/crewai_tools/tools/databricks_query_tool/__init__.py b/src/crewai_tools/tools/databricks_query_tool/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py new file mode 100644 index 000000000..74dd48fdf --- /dev/null +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -0,0 +1,736 @@ +import os +from typing import Any, Dict, List, Optional, Type, Union + +from crewai.tools import BaseTool +from databricks.sdk import WorkspaceClient +from pydantic import BaseModel, Field, model_validator + + +class DatabricksQueryToolSchema(BaseModel): + """Input schema for DatabricksQueryTool.""" + + query: str = Field( + ..., description="SQL query to execute against the Databricks workspace table" + ) + catalog: Optional[str] = Field( + None, description="Databricks catalog name (optional, defaults to configured catalog)" + ) + schema: Optional[str] = Field( + None, description="Databricks schema name (optional, defaults to configured schema)" + ) + warehouse_id: Optional[str] = Field( + None, description="Databricks SQL warehouse ID (optional, defaults to configured warehouse)" + ) + row_limit: Optional[int] = Field( + 1000, description="Maximum number of rows to return (default: 1000)" + ) + + @model_validator(mode='after') + def validate_input(self) -> 'DatabricksQueryToolSchema': + """Validate the input parameters.""" + # Ensure the query is not empty + if not self.query or not self.query.strip(): + raise ValueError("Query cannot be empty") + + # Add a LIMIT clause to the query if row_limit is provided and query doesn't have one + if self.row_limit and "limit" not in self.query.lower(): + self.query = f"{self.query.rstrip(';')} LIMIT {self.row_limit};" + + return self + + +class DatabricksQueryTool(BaseTool): + """ + A tool for querying Databricks workspace tables using SQL. + + This tool executes SQL queries against Databricks tables and returns the results. + It requires Databricks authentication credentials to be set as environment variables. + + Authentication can be provided via: + - Databricks CLI profile: Set DATABRICKS_CONFIG_PROFILE environment variable + - Direct credentials: Set DATABRICKS_HOST and DATABRICKS_TOKEN environment variables + + Example: + >>> tool = DatabricksQueryTool() + >>> results = tool.run(query="SELECT * FROM my_table LIMIT 10") + """ + + name: str = "Databricks SQL Query" + description: str = ( + "Execute SQL queries against Databricks workspace tables and return the results." + " Provide a 'query' parameter with the SQL query to execute." + ) + args_schema: Type[BaseModel] = DatabricksQueryToolSchema + + # Optional default parameters + default_catalog: Optional[str] = None + default_schema: Optional[str] = None + default_warehouse_id: Optional[str] = None + + _workspace_client: Optional[WorkspaceClient] = None + + def __init__( + self, + default_catalog: Optional[str] = None, + default_schema: Optional[str] = None, + default_warehouse_id: Optional[str] = None, + **kwargs: Any, + ) -> None: + """ + Initialize the DatabricksQueryTool. + + Args: + default_catalog (Optional[str]): Default catalog to use for queries. + default_schema (Optional[str]): Default schema to use for queries. + default_warehouse_id (Optional[str]): Default SQL warehouse ID to use. + **kwargs: Additional keyword arguments passed to BaseTool. + """ + super().__init__(**kwargs) + self.default_catalog = default_catalog + self.default_schema = default_schema + self.default_warehouse_id = default_warehouse_id + + # Validate that Databricks credentials are available + self._validate_credentials() + + def _validate_credentials(self) -> None: + """Validate that Databricks credentials are available.""" + has_profile = "DATABRICKS_CONFIG_PROFILE" in os.environ + has_direct_auth = "DATABRICKS_HOST" in os.environ and "DATABRICKS_TOKEN" in os.environ + + if not (has_profile or has_direct_auth): + raise ValueError( + "Databricks authentication credentials are required. " + "Set either DATABRICKS_CONFIG_PROFILE or both DATABRICKS_HOST and DATABRICKS_TOKEN environment variables." + ) + + @property + def workspace_client(self) -> WorkspaceClient: + """Get or create a Databricks WorkspaceClient instance.""" + if self._workspace_client is None: + self._workspace_client = WorkspaceClient() + return self._workspace_client + + def _format_results(self, results: List[Dict[str, Any]]) -> str: + """Format query results as a readable string.""" + if not results: + return "Query returned no results." + + # Get column names from the first row + if not results[0]: + return "Query returned empty rows with no columns." + + columns = list(results[0].keys()) + + # If we have rows but they're all empty, handle that case + if not columns: + return "Query returned rows but with no column data." + + # Calculate column widths based on data + col_widths = {col: len(col) for col in columns} + for row in results: + for col in columns: + # Convert value to string and get its length + # Handle None values gracefully + value_str = str(row[col]) if row[col] is not None else "NULL" + col_widths[col] = max(col_widths[col], len(value_str)) + + # Create header row + header = " | ".join(f"{col:{col_widths[col]}}" for col in columns) + separator = "-+-".join("-" * col_widths[col] for col in columns) + + # Format data rows + data_rows = [] + for row in results: + # Handle None values by displaying "NULL" + row_values = {col: str(row[col]) if row[col] is not None else "NULL" for col in columns} + data_row = " | ".join(f"{row_values[col]:{col_widths[col]}}" for col in columns) + data_rows.append(data_row) + + # Add row count information + result_info = f"({len(results)} row{'s' if len(results) != 1 else ''} returned)" + + # Combine all parts + return f"{header}\n{separator}\n" + "\n".join(data_rows) + f"\n\n{result_info}" + + def _run( + self, + **kwargs: Any, + ) -> str: + """ + Execute a SQL query against Databricks and return the results. + + Args: + query (str): SQL query to execute + catalog (Optional[str]): Databricks catalog name + schema (Optional[str]): Databricks schema name + warehouse_id (Optional[str]): SQL warehouse ID + row_limit (Optional[int]): Maximum number of rows to return + + Returns: + str: Formatted query results + """ + try: + # Get parameters with fallbacks to default values + query = kwargs.get("query") + catalog = kwargs.get("catalog") or self.default_catalog + schema = kwargs.get("schema") or self.default_schema + warehouse_id = kwargs.get("warehouse_id") or self.default_warehouse_id + row_limit = kwargs.get("row_limit", 1000) + + # Validate schema and query + validated_input = DatabricksQueryToolSchema( + query=query, + catalog=catalog, + schema=schema, + warehouse_id=warehouse_id, + row_limit=row_limit + ) + + # Extract validated parameters + query = validated_input.query + catalog = validated_input.catalog + schema = validated_input.schema + warehouse_id = validated_input.warehouse_id + + # Setup SQL context with catalog/schema if provided + context = {} + if catalog: + context["catalog"] = catalog + if schema: + context["schema"] = schema + + # Execute query + statement = self.workspace_client.statement_execution + + try: + # Execute the statement + execution = statement.execute_statement( + warehouse_id=warehouse_id, + statement=query, + **context + ) + + statement_id = execution.statement_id + except Exception as execute_error: + # Handle immediate execution errors + return f"Error starting query execution: {str(execute_error)}" + + # Poll for results with better error handling + import time + result = None + timeout = 300 # 5 minutes timeout + start_time = time.time() + poll_count = 0 + previous_state = None # Track previous state to detect changes + + print(f"Starting to poll for statement ID: {statement_id}") + + while time.time() - start_time < timeout: + poll_count += 1 + try: + # Get statement status + result = statement.get_statement(statement_id) + + # Debug info + if poll_count % 5 == 0: # Log every 5th poll + print(f"Poll #{poll_count}: State={result.status.state if hasattr(result, 'status') else 'Unknown'}") + + # Check if finished - be very explicit about state checking + if hasattr(result, 'status') and hasattr(result.status, 'state'): + state_value = str(result.status.state) # Convert to string to handle both string and enum + + # Track state changes for debugging + if previous_state != state_value: + print(f"State changed from {previous_state} to {state_value}") + previous_state = state_value + + # Check if state indicates completion + if "SUCCEEDED" in state_value: + print(f"Query succeeded after {poll_count} polls") + break + elif "FAILED" in state_value: + # Extract error message with more robust handling + error_info = "No detailed error info" + try: + # First try direct access to error.message + if hasattr(result.status, 'error') and result.status.error: + if hasattr(result.status.error, 'message'): + error_info = result.status.error.message + # Some APIs may have a different structure + elif hasattr(result.status.error, 'error_message'): + error_info = result.status.error.error_message + # Last resort, try to convert the whole error object to string + else: + error_info = str(result.status.error) + except Exception as err_extract_error: + # If all else fails, try to get any info we can + error_info = f"Error details unavailable: {str(err_extract_error)}" + + # Print error for debugging + print(f"Query failed after {poll_count} polls: {error_info}") + + # Output full status object for debugging + print(f"Full status object: {dir(result.status)}") + if hasattr(result.status, 'error'): + print(f"Error object details: {dir(result.status.error)}") + + # Return immediately on first FAILED state detection + print(f"Exiting polling loop after detecting FAILED state") + return f"Query execution failed: {error_info}" + elif "CANCELED" in state_value: + print(f"Query was canceled after {poll_count} polls") + return "Query was canceled" + else: + # Print state for debugging if not recognized + if poll_count % 5 == 0: + print(f"Current state: {state_value}") + else: + print(f"Warning: Result structure does not contain expected status attributes") + + except Exception as poll_error: + print(f"Error during polling (attempt #{poll_count}): {str(poll_error)}") + # Don't immediately fail - try again a few times + if poll_count > 3: + return f"Error checking query status: {str(poll_error)}" + + # Wait before polling again + time.sleep(2) + + # Check if we timed out + if result is None: + return "Query returned no result (likely timed out or failed)" + + if not hasattr(result, 'status') or not hasattr(result.status, 'state'): + return "Query completed but returned an invalid result structure" + + # Convert state to string for comparison + state_value = str(result.status.state) + if not any(state in state_value for state in ["SUCCEEDED", "FAILED", "CANCELED"]): + return f"Query timed out after 5 minutes (last state: {state_value})" + + # Get results - adapt this based on the actual structure of the result object + chunk_results = [] + + # Debug info - print the result structure to help debug + print(f"Result structure: {dir(result)}") + if hasattr(result, 'manifest'): + print(f"Manifest structure: {dir(result.manifest)}") + if hasattr(result, 'result'): + print(f"Result data structure: {dir(result.result)}") + + # Check if we have results and a schema in a very defensive way + has_schema = (hasattr(result, 'manifest') and result.manifest is not None and + hasattr(result.manifest, 'schema') and result.manifest.schema is not None) + has_result = (hasattr(result, 'result') and result.result is not None) + + if has_schema and has_result: + try: + # Get schema for column names + columns = [col.name for col in result.manifest.schema.columns] + + # Debug info for schema + print(f"Schema columns: {columns}") + print(f"Number of columns in schema: {len(columns)}") + print(f"Type of result.result: {type(result.result)}") + + # Keep track of all dynamic columns we create + all_columns = set(columns) + + # Dump the raw structure of result data to help troubleshoot + if hasattr(result.result, 'data_array'): + print(f"data_array structure: {type(result.result.data_array)}") + if result.result.data_array and len(result.result.data_array) > 0: + print(f"First chunk type: {type(result.result.data_array[0])}") + if len(result.result.data_array[0]) > 0: + print(f"First row type: {type(result.result.data_array[0][0])}") + print(f"First row value: {result.result.data_array[0][0]}") + + # IMPROVED DETECTION LOGIC: Check if we're possibly dealing with rows where each item + # contains a single value or character (which could indicate incorrect row structure) + is_likely_incorrect_row_structure = False + sample_size = min(20, len(result.result.data_array[0])) + + if sample_size > 0: + single_char_count = 0 + single_digit_count = 0 + total_items = 0 + + for i in range(sample_size): + val = result.result.data_array[0][i] + total_items += 1 + if isinstance(val, str) and len(val) == 1 and not val.isdigit(): + single_char_count += 1 + elif isinstance(val, str) and len(val) == 1 and val.isdigit(): + single_digit_count += 1 + + # If a significant portion of the first values are single characters or digits, + # this likely indicates data is being incorrectly structured + if total_items > 0 and (single_char_count + single_digit_count) / total_items > 0.5: + print(f"Detected potential incorrect row structure: {single_char_count} single chars, {single_digit_count} digits out of {total_items} total items") + is_likely_incorrect_row_structure = True + + # Additional check: if many rows have just 1 item when we expect multiple columns + rows_with_single_item = sum(1 for row in result.result.data_array[:sample_size] if isinstance(row, list) and len(row) == 1) + if rows_with_single_item > sample_size * 0.5 and len(columns) > 1: + print(f"Many rows ({rows_with_single_item}/{sample_size}) have only a single value when expecting {len(columns)} columns") + is_likely_incorrect_row_structure = True + + # Check if we're getting primarily single characters or the data structure seems off, + # we should use special handling + if is_likely_incorrect_row_structure: + print("Data appears to be malformed - will use special row reconstruction") + needs_special_string_handling = True + else: + needs_special_string_handling = False + + # Process results differently based on detection + if needs_special_string_handling: + # We're dealing with data where the rows may be incorrectly structured + print("Using row reconstruction processing mode") + + # Collect all values into a flat list + all_values = [] + if hasattr(result.result, 'data_array') and result.result.data_array: + # Flatten all values into a single list + for chunk in result.result.data_array: + for item in chunk: + if isinstance(item, (list, tuple)): + all_values.extend(item) + else: + all_values.append(item) + + # Print what we gathered + print(f"Collected {len(all_values)} total values") + if len(all_values) > 0: + print(f"Sample values: {all_values[:20]}") + + # Get the expected column count from schema + expected_column_count = len(columns) + print(f"Expected columns per row: {expected_column_count}") + + # Try to reconstruct rows using pattern recognition + reconstructed_rows = [] + + # PATTERN RECOGNITION APPROACH + # Look for likely indicators of row boundaries in the data + # For Netflix data, we expect IDs as numbers, titles as text strings, etc. + + # Use regex pattern to identify ID columns that likely start a new row + import re + id_pattern = re.compile(r'^\d{5,9}$') # Netflix IDs are often 5-9 digits + id_indices = [] + + for i, val in enumerate(all_values): + if isinstance(val, str) and id_pattern.match(val): + # This value looks like an ID, might be the start of a row + if i < len(all_values) - 1: + next_few_values = all_values[i+1:i+5] + # If following values look like they could be part of a title + if any(isinstance(v, str) and len(v) > 1 for v in next_few_values): + id_indices.append(i) + print(f"Found potential row start at index {i}: {val}") + + if id_indices: + print(f"Identified {len(id_indices)} potential row boundaries") + + # If we found potential row starts, use them to extract rows + for i in range(len(id_indices)): + start_idx = id_indices[i] + end_idx = id_indices[i+1] if i+1 < len(id_indices) else len(all_values) + + # Extract values for this row + row_values = all_values[start_idx:end_idx] + + # Special handling for Netflix title data + # Titles might be split into individual characters + if 'Title' in columns and len(row_values) > expected_column_count: + print(f"Row has {len(row_values)} values, likely contains split strings") + + # Try to reconstruct by looking for patterns + # We know ID is first, then Title (which may be split) + # Then other fields like Genre, etc. + + # Take first value as ID + row_dict = {columns[0]: row_values[0]} + + # Look for Genre or other non-title fields to determine where title ends + title_end_idx = 1 + for j in range(2, min(100, len(row_values))): + val = row_values[j] + # Check for common genres or non-title markers + if isinstance(val, str) and val in ['Comedy', 'Drama', 'Action', 'Horror', 'Thriller', 'Documentary']: + # Likely found the Genre field + title_end_idx = j + break + + # Reconstruct title from individual characters + if title_end_idx > 1: + title_chars = row_values[1:title_end_idx] + # Check if they're individual characters + if all(isinstance(c, str) and len(c) == 1 for c in title_chars): + title = ''.join(title_chars) + row_dict['Title'] = title + print(f"Reconstructed title: {title}") + + # Assign remaining values to columns + remaining_values = row_values[title_end_idx:] + for j, col_name in enumerate(columns[2:], 2): + if j-2 < len(remaining_values): + row_dict[col_name] = remaining_values[j-2] + else: + row_dict[col_name] = None + else: + # Fallback: simple mapping + for j, col_name in enumerate(columns): + if j < len(row_values): + row_dict[col_name] = row_values[j] + else: + row_dict[col_name] = None + else: + # Standard mapping + row_dict = {} + for j, col_name in enumerate(columns): + if j < len(row_values): + row_dict[col_name] = row_values[j] + else: + row_dict[col_name] = None + + reconstructed_rows.append(row_dict) + else: + # If pattern recognition didn't work, try more sophisticated reconstruction + print("Pattern recognition did not find row boundaries, trying alternative methods") + + # More intelligent chunking - try to detect where columns like Title might be split + try: + title_idx = columns.index('Title') if 'Title' in columns else -1 + + if title_idx >= 0: + print("Attempting title reconstruction method") + # Try to detect if title is split across multiple values + i = 0 + while i < len(all_values): + # Check if this could be an ID (start of a row) + if isinstance(all_values[i], str) and id_pattern.match(all_values[i]): + row_dict = {columns[0]: all_values[i]} + i += 1 + + # Try to reconstruct title if it appears to be split + title_chars = [] + while (i < len(all_values) and + isinstance(all_values[i], str) and + len(all_values[i]) <= 1 and + len(title_chars) < 100): # Cap title length + title_chars.append(all_values[i]) + i += 1 + + if title_chars: + row_dict[columns[title_idx]] = ''.join(title_chars) + print(f"Reconstructed title by joining characters: {row_dict[columns[title_idx]]}") + + # Add remaining fields + for j in range(title_idx + 1, len(columns)): + if i < len(all_values): + row_dict[columns[j]] = all_values[i] + i += 1 + else: + row_dict[columns[j]] = None + + reconstructed_rows.append(row_dict) + else: + i += 1 + except Exception as e: + print(f"Error during title reconstruction: {e}") + + # If we still don't have rows, use simple chunking as fallback + if not reconstructed_rows: + print("Falling back to basic chunking approach") + chunks = [all_values[i:i+expected_column_count] for i in range(0, len(all_values), expected_column_count)] + + for chunk in chunks: + # Skip chunks that seem to be partial/incomplete rows + if len(chunk) < expected_column_count * 0.75: # Allow for some missing values + continue + + row_dict = {} + + # Map values to column names + for i, col in enumerate(columns): + if i < len(chunk): + row_dict[col] = chunk[i] + else: + row_dict[col] = None + + reconstructed_rows.append(row_dict) + + # Apply post-processing to fix known issues + if reconstructed_rows and 'Title' in columns: + print("Applying post-processing to improve data quality") + for row in reconstructed_rows: + # Fix titles that might still have issues + if isinstance(row.get('Title'), str) and len(row.get('Title')) <= 1: + # This is likely still a fragmented title - mark as potentially incomplete + row['Title'] = f"[INCOMPLETE] {row.get('Title')}" + print(f"Found potentially incomplete title: {row.get('Title')}") + + # Ensure we respect the row limit + if row_limit and len(reconstructed_rows) > row_limit: + reconstructed_rows = reconstructed_rows[:row_limit] + print(f"Limited to {row_limit} rows as requested") + + print(f"Successfully reconstructed {len(reconstructed_rows)} rows") + chunk_results = reconstructed_rows + else: + # Process normal result structure as before + print("Using standard processing mode") + + # Check different result structures + if hasattr(result.result, 'data_array') and result.result.data_array: + # Check if data appears to be malformed within chunks + for chunk_idx, chunk in enumerate(result.result.data_array): + print(f"Processing chunk {chunk_idx} with {len(chunk)} values") + + # Check if chunk might actually contain individual columns of a single row + # This is another way data might be malformed - check the first few values + if len(chunk) > 0 and len(columns) > 1: + # If there seems to be a mismatch between chunk structure and expected columns + first_few_values = chunk[:min(5, len(chunk))] + if all(isinstance(val, (str, int, float)) and not isinstance(val, (list, dict)) for val in first_few_values): + if len(chunk) > len(columns) * 3: # Heuristic: if chunk has way more items than columns + print("Chunk appears to contain individual values rather than rows - switching to row reconstruction") + + # This chunk might actually be values of multiple rows - try to reconstruct + values = chunk # All values in this chunk + reconstructed_rows = [] + + # Try to create rows based on expected column count + for i in range(0, len(values), len(columns)): + if i + len(columns) <= len(values): # Ensure we have enough values + row_values = values[i:i+len(columns)] + row_dict = {col: val for col, val in zip(columns, row_values)} + reconstructed_rows.append(row_dict) + + if reconstructed_rows: + print(f"Reconstructed {len(reconstructed_rows)} rows from chunk") + chunk_results.extend(reconstructed_rows) + continue # Skip normal processing for this chunk + + # Special case: when chunk contains exactly the right number of values for a single row + # This handles the case where instead of a list of rows, we just got all values in a flat list + if all(isinstance(val, (str, int, float)) and not isinstance(val, (list, dict)) for val in chunk): + if len(chunk) == len(columns) or (len(chunk) > 0 and len(chunk) % len(columns) == 0): + print(f"Chunk appears to contain flat values - treating as rows with {len(columns)} columns each") + + # Process flat list of values as rows + for i in range(0, len(chunk), len(columns)): + row_values = chunk[i:i+len(columns)] + if len(row_values) == len(columns): # Only process complete rows + row_dict = {col: val for col, val in zip(columns, row_values)} + chunk_results.append(row_dict) + print(f"Created row from flat values: {row_dict}") + + # Skip regular row processing for this chunk + continue + + # Normal processing for typical row structure + for row_idx, row in enumerate(chunk): + # Ensure row is actually a collection of values + if not isinstance(row, (list, tuple, dict)): + print(f"Row {row_idx} is not a collection: {row} ({type(row)})") + # This might be a single value; skip it or handle specially + continue + + # Debug info for this row + if isinstance(row, (list, tuple)): + print(f"Row {row_idx} has {len(row)} values") + elif isinstance(row, dict): + print(f"Row {row_idx} already has column mapping: {list(row.keys())}") + + # Convert each row to a dictionary with column names as keys + row_dict = {} + + # Handle dict rows directly + if isinstance(row, dict): + # Use the existing column mapping + row_dict = dict(row) + elif isinstance(row, (list, tuple)): + # Map list of values to columns + for i, val in enumerate(row): + if i < len(columns): # Only process if we have a matching column + row_dict[columns[i]] = val + else: + # Extra values without column names + dynamic_col = f"Column_{i}" + row_dict[dynamic_col] = val + all_columns.add(dynamic_col) + + # If we have fewer values than columns, set missing values to None + for col in columns: + if col not in row_dict: + row_dict[col] = None + + chunk_results.append(row_dict) + + elif hasattr(result.result, 'data') and result.result.data: + # Alternative data structure + print(f"Processing data with {len(result.result.data)} rows") + + for row_idx, row in enumerate(result.result.data): + # Debug info + print(f"Row {row_idx} has {len(row)} values") + + # Safely create dictionary matching column names to values + row_dict = {} + for i, val in enumerate(row): + if i < len(columns): # Only process if we have a matching column + row_dict[columns[i]] = val + else: + # Extra values without column names + dynamic_col = f"Column_{i}" + row_dict[dynamic_col] = val + all_columns.add(dynamic_col) + + # If we have fewer values than columns, set missing values to None + for i, col in enumerate(columns): + if i >= len(row): + row_dict[col] = None + + chunk_results.append(row_dict) + + # After processing all rows, ensure all rows have all columns + print(f"All columns detected: {all_columns}") + normalized_results = [] + for row in chunk_results: + # Create a new row with all columns, defaulting to None for missing ones + normalized_row = {col: row.get(col, None) for col in all_columns} + normalized_results.append(normalized_row) + + # Replace the original results with normalized ones + chunk_results = normalized_results + + # Print the processed results for debugging + print(f"Processed {len(chunk_results)} rows") + for i, row in enumerate(chunk_results[:3]): # Show only first 3 rows to avoid log spam + print(f"Row {i}: {row}") + + except Exception as results_error: + # Enhanced error message with more context + import traceback + error_details = traceback.format_exc() + print(f"Error processing results: {error_details}") + return f"Error processing query results: {str(results_error)}\n\nDetails:\n{error_details}" + + # If we have no results but the query succeeded (e.g., for DDL statements) + if not chunk_results and hasattr(result, 'status'): + state_value = str(result.status.state) + if "SUCCEEDED" in state_value: + return "Query executed successfully (no results to display)" + + # Format and return results + return self._format_results(chunk_results) + + except Exception as e: + # Include more details in the error message to help with debugging + import traceback + error_details = traceback.format_exc() + return f"Error executing Databricks query: {str(e)}\n\nDetails:\n{error_details}" \ No newline at end of file From 9e68cbbb3d60ed9d5290befcaa0ddca06c0c66c6 Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Mon, 17 Mar 2025 15:13:28 -0300 Subject: [PATCH 304/391] Conditionally import Databricks library (#243) Databricks is an optional dependency, but the tool package is imported by default, leading to ImportError exceptions. Related: crewAIInc/crewAI#2390 --- .../databricks_query_tool.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py index 74dd48fdf..1761f56c0 100644 --- a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -1,10 +1,11 @@ import os -from typing import Any, Dict, List, Optional, Type, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union from crewai.tools import BaseTool -from databricks.sdk import WorkspaceClient from pydantic import BaseModel, Field, model_validator +if TYPE_CHECKING: + from databricks.sdk import WorkspaceClient class DatabricksQueryToolSchema(BaseModel): """Input schema for DatabricksQueryTool.""" @@ -67,7 +68,7 @@ class DatabricksQueryTool(BaseTool): default_schema: Optional[str] = None default_warehouse_id: Optional[str] = None - _workspace_client: Optional[WorkspaceClient] = None + _workspace_client: Optional["WorkspaceClient"] = None def __init__( self, @@ -89,8 +90,6 @@ class DatabricksQueryTool(BaseTool): self.default_catalog = default_catalog self.default_schema = default_schema self.default_warehouse_id = default_warehouse_id - - # Validate that Databricks credentials are available self._validate_credentials() def _validate_credentials(self) -> None: @@ -105,10 +104,16 @@ class DatabricksQueryTool(BaseTool): ) @property - def workspace_client(self) -> WorkspaceClient: + def workspace_client(self) -> "WorkspaceClient": """Get or create a Databricks WorkspaceClient instance.""" if self._workspace_client is None: - self._workspace_client = WorkspaceClient() + try: + from databricks.sdk import WorkspaceClient + self._workspace_client = WorkspaceClient() + except ImportError: + raise ImportError( + "`databricks-sdk` package not found, please run `uv add databricks-sdk`" + ) return self._workspace_client def _format_results(self, results: List[Dict[str, Any]]) -> str: @@ -733,4 +738,4 @@ class DatabricksQueryTool(BaseTool): # Include more details in the error message to help with debugging import traceback error_details = traceback.format_exc() - return f"Error executing Databricks query: {str(e)}\n\nDetails:\n{error_details}" \ No newline at end of file + return f"Error executing Databricks query: {str(e)}\n\nDetails:\n{error_details}" From 568aace62edc75b9c21dd99b9065b81271019f46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Fri, 14 Mar 2025 07:48:39 -0700 Subject: [PATCH 305/391] fix --- .../databricks_query_tool.py | 59 +++++++++++-------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py index 1761f56c0..24ed6e6a0 100644 --- a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -345,7 +345,12 @@ class DatabricksQueryTool(BaseTool): # Dump the raw structure of result data to help troubleshoot if hasattr(result.result, 'data_array'): print(f"data_array structure: {type(result.result.data_array)}") - if result.result.data_array and len(result.result.data_array) > 0: + # Add defensive check for None data_array + if result.result.data_array is None: + print("data_array is None - likely an empty result set or DDL query") + # Return empty result handling rather than trying to process null data + return "Query executed successfully (no data returned)" + elif result.result.data_array and len(result.result.data_array) > 0: print(f"First chunk type: {type(result.result.data_array[0])}") if len(result.result.data_array[0]) > 0: print(f"First row type: {type(result.result.data_array[0][0])}") @@ -354,43 +359,49 @@ class DatabricksQueryTool(BaseTool): # IMPROVED DETECTION LOGIC: Check if we're possibly dealing with rows where each item # contains a single value or character (which could indicate incorrect row structure) is_likely_incorrect_row_structure = False - sample_size = min(20, len(result.result.data_array[0])) - if sample_size > 0: - single_char_count = 0 - single_digit_count = 0 - total_items = 0 + # Only try to analyze sample if data_array exists and has content + if hasattr(result.result, 'data_array') and result.result.data_array and len(result.result.data_array) > 0 and len(result.result.data_array[0]) > 0: + sample_size = min(20, len(result.result.data_array[0])) - for i in range(sample_size): - val = result.result.data_array[0][i] - total_items += 1 - if isinstance(val, str) and len(val) == 1 and not val.isdigit(): - single_char_count += 1 - elif isinstance(val, str) and len(val) == 1 and val.isdigit(): - single_digit_count += 1 + if sample_size > 0: + single_char_count = 0 + single_digit_count = 0 + total_items = 0 - # If a significant portion of the first values are single characters or digits, - # this likely indicates data is being incorrectly structured - if total_items > 0 and (single_char_count + single_digit_count) / total_items > 0.5: - print(f"Detected potential incorrect row structure: {single_char_count} single chars, {single_digit_count} digits out of {total_items} total items") - is_likely_incorrect_row_structure = True + for i in range(sample_size): + val = result.result.data_array[0][i] + total_items += 1 + if isinstance(val, str) and len(val) == 1 and not val.isdigit(): + single_char_count += 1 + elif isinstance(val, str) and len(val) == 1 and val.isdigit(): + single_digit_count += 1 + + # If a significant portion of the first values are single characters or digits, + # this likely indicates data is being incorrectly structured + if total_items > 0 and (single_char_count + single_digit_count) / total_items > 0.5: + print(f"Detected potential incorrect row structure: {single_char_count} single chars, {single_digit_count} digits out of {total_items} total items") + is_likely_incorrect_row_structure = True # Additional check: if many rows have just 1 item when we expect multiple columns - rows_with_single_item = sum(1 for row in result.result.data_array[:sample_size] if isinstance(row, list) and len(row) == 1) - if rows_with_single_item > sample_size * 0.5 and len(columns) > 1: - print(f"Many rows ({rows_with_single_item}/{sample_size}) have only a single value when expecting {len(columns)} columns") - is_likely_incorrect_row_structure = True + rows_with_single_item = 0 + if hasattr(result.result, 'data_array') and result.result.data_array and len(result.result.data_array) > 0: + sample_size_for_rows = min(sample_size, len(result.result.data_array[0])) if 'sample_size' in locals() else min(20, len(result.result.data_array[0])) + rows_with_single_item = sum(1 for row in result.result.data_array[0][:sample_size_for_rows] if isinstance(row, list) and len(row) == 1) + if rows_with_single_item > sample_size_for_rows * 0.5 and len(columns) > 1: + print(f"Many rows ({rows_with_single_item}/{sample_size_for_rows}) have only a single value when expecting {len(columns)} columns") + is_likely_incorrect_row_structure = True # Check if we're getting primarily single characters or the data structure seems off, # we should use special handling - if is_likely_incorrect_row_structure: + if 'is_likely_incorrect_row_structure' in locals() and is_likely_incorrect_row_structure: print("Data appears to be malformed - will use special row reconstruction") needs_special_string_handling = True else: needs_special_string_handling = False # Process results differently based on detection - if needs_special_string_handling: + if 'needs_special_string_handling' in locals() and needs_special_string_handling: # We're dealing with data where the rows may be incorrectly structured print("Using row reconstruction processing mode") From 658c23547e0cba8d8d041ef1a35690540e6148ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Mon, 17 Mar 2025 18:58:45 -0700 Subject: [PATCH 306/391] prep new version --- .../databricks_query_tool.py | 143 ++++-------------- 1 file changed, 30 insertions(+), 113 deletions(-) diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py index 24ed6e6a0..e6381c8c5 100644 --- a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -229,30 +229,22 @@ class DatabricksQueryTool(BaseTool): poll_count = 0 previous_state = None # Track previous state to detect changes - print(f"Starting to poll for statement ID: {statement_id}") - while time.time() - start_time < timeout: poll_count += 1 try: # Get statement status result = statement.get_statement(statement_id) - # Debug info - if poll_count % 5 == 0: # Log every 5th poll - print(f"Poll #{poll_count}: State={result.status.state if hasattr(result, 'status') else 'Unknown'}") - # Check if finished - be very explicit about state checking if hasattr(result, 'status') and hasattr(result.status, 'state'): state_value = str(result.status.state) # Convert to string to handle both string and enum # Track state changes for debugging if previous_state != state_value: - print(f"State changed from {previous_state} to {state_value}") previous_state = state_value # Check if state indicates completion if "SUCCEEDED" in state_value: - print(f"Query succeeded after {poll_count} polls") break elif "FAILED" in state_value: # Extract error message with more robust handling @@ -272,29 +264,12 @@ class DatabricksQueryTool(BaseTool): # If all else fails, try to get any info we can error_info = f"Error details unavailable: {str(err_extract_error)}" - # Print error for debugging - print(f"Query failed after {poll_count} polls: {error_info}") - - # Output full status object for debugging - print(f"Full status object: {dir(result.status)}") - if hasattr(result.status, 'error'): - print(f"Error object details: {dir(result.status.error)}") - # Return immediately on first FAILED state detection - print(f"Exiting polling loop after detecting FAILED state") return f"Query execution failed: {error_info}" elif "CANCELED" in state_value: - print(f"Query was canceled after {poll_count} polls") return "Query was canceled" - else: - # Print state for debugging if not recognized - if poll_count % 5 == 0: - print(f"Current state: {state_value}") - else: - print(f"Warning: Result structure does not contain expected status attributes") except Exception as poll_error: - print(f"Error during polling (attempt #{poll_count}): {str(poll_error)}") # Don't immediately fail - try again a few times if poll_count > 3: return f"Error checking query status: {str(poll_error)}" @@ -317,13 +292,6 @@ class DatabricksQueryTool(BaseTool): # Get results - adapt this based on the actual structure of the result object chunk_results = [] - # Debug info - print the result structure to help debug - print(f"Result structure: {dir(result)}") - if hasattr(result, 'manifest'): - print(f"Manifest structure: {dir(result.manifest)}") - if hasattr(result, 'result'): - print(f"Result data structure: {dir(result.result)}") - # Check if we have results and a schema in a very defensive way has_schema = (hasattr(result, 'manifest') and result.manifest is not None and hasattr(result.manifest, 'schema') and result.manifest.schema is not None) @@ -335,26 +303,17 @@ class DatabricksQueryTool(BaseTool): columns = [col.name for col in result.manifest.schema.columns] # Debug info for schema - print(f"Schema columns: {columns}") - print(f"Number of columns in schema: {len(columns)}") - print(f"Type of result.result: {type(result.result)}") # Keep track of all dynamic columns we create all_columns = set(columns) # Dump the raw structure of result data to help troubleshoot if hasattr(result.result, 'data_array'): - print(f"data_array structure: {type(result.result.data_array)}") # Add defensive check for None data_array if result.result.data_array is None: print("data_array is None - likely an empty result set or DDL query") # Return empty result handling rather than trying to process null data return "Query executed successfully (no data returned)" - elif result.result.data_array and len(result.result.data_array) > 0: - print(f"First chunk type: {type(result.result.data_array[0])}") - if len(result.result.data_array[0]) > 0: - print(f"First row type: {type(result.result.data_array[0][0])}") - print(f"First row value: {result.result.data_array[0][0]}") # IMPROVED DETECTION LOGIC: Check if we're possibly dealing with rows where each item # contains a single value or character (which could indicate incorrect row structure) @@ -380,7 +339,6 @@ class DatabricksQueryTool(BaseTool): # If a significant portion of the first values are single characters or digits, # this likely indicates data is being incorrectly structured if total_items > 0 and (single_char_count + single_digit_count) / total_items > 0.5: - print(f"Detected potential incorrect row structure: {single_char_count} single chars, {single_digit_count} digits out of {total_items} total items") is_likely_incorrect_row_structure = True # Additional check: if many rows have just 1 item when we expect multiple columns @@ -389,7 +347,6 @@ class DatabricksQueryTool(BaseTool): sample_size_for_rows = min(sample_size, len(result.result.data_array[0])) if 'sample_size' in locals() else min(20, len(result.result.data_array[0])) rows_with_single_item = sum(1 for row in result.result.data_array[0][:sample_size_for_rows] if isinstance(row, list) and len(row) == 1) if rows_with_single_item > sample_size_for_rows * 0.5 and len(columns) > 1: - print(f"Many rows ({rows_with_single_item}/{sample_size_for_rows}) have only a single value when expecting {len(columns)} columns") is_likely_incorrect_row_structure = True # Check if we're getting primarily single characters or the data structure seems off, @@ -416,14 +373,8 @@ class DatabricksQueryTool(BaseTool): else: all_values.append(item) - # Print what we gathered - print(f"Collected {len(all_values)} total values") - if len(all_values) > 0: - print(f"Sample values: {all_values[:20]}") - # Get the expected column count from schema expected_column_count = len(columns) - print(f"Expected columns per row: {expected_column_count}") # Try to reconstruct rows using pattern recognition reconstructed_rows = [] @@ -445,10 +396,8 @@ class DatabricksQueryTool(BaseTool): # If following values look like they could be part of a title if any(isinstance(v, str) and len(v) > 1 for v in next_few_values): id_indices.append(i) - print(f"Found potential row start at index {i}: {val}") if id_indices: - print(f"Identified {len(id_indices)} potential row boundaries") # If we found potential row starts, use them to extract rows for i in range(len(id_indices)): @@ -461,7 +410,6 @@ class DatabricksQueryTool(BaseTool): # Special handling for Netflix title data # Titles might be split into individual characters if 'Title' in columns and len(row_values) > expected_column_count: - print(f"Row has {len(row_values)} values, likely contains split strings") # Try to reconstruct by looking for patterns # We know ID is first, then Title (which may be split) @@ -487,7 +435,6 @@ class DatabricksQueryTool(BaseTool): if all(isinstance(c, str) and len(c) == 1 for c in title_chars): title = ''.join(title_chars) row_dict['Title'] = title - print(f"Reconstructed title: {title}") # Assign remaining values to columns remaining_values = row_values[title_end_idx:] @@ -514,49 +461,42 @@ class DatabricksQueryTool(BaseTool): reconstructed_rows.append(row_dict) else: - # If pattern recognition didn't work, try more sophisticated reconstruction - print("Pattern recognition did not find row boundaries, trying alternative methods") - # More intelligent chunking - try to detect where columns like Title might be split - try: - title_idx = columns.index('Title') if 'Title' in columns else -1 + title_idx = columns.index('Title') if 'Title' in columns else -1 - if title_idx >= 0: - print("Attempting title reconstruction method") - # Try to detect if title is split across multiple values - i = 0 - while i < len(all_values): - # Check if this could be an ID (start of a row) - if isinstance(all_values[i], str) and id_pattern.match(all_values[i]): - row_dict = {columns[0]: all_values[i]} + if title_idx >= 0: + print("Attempting title reconstruction method") + # Try to detect if title is split across multiple values + i = 0 + while i < len(all_values): + # Check if this could be an ID (start of a row) + if isinstance(all_values[i], str) and id_pattern.match(all_values[i]): + row_dict = {columns[0]: all_values[i]} + i += 1 + + # Try to reconstruct title if it appears to be split + title_chars = [] + while (i < len(all_values) and + isinstance(all_values[i], str) and + len(all_values[i]) <= 1 and + len(title_chars) < 100): # Cap title length + title_chars.append(all_values[i]) i += 1 - # Try to reconstruct title if it appears to be split - title_chars = [] - while (i < len(all_values) and - isinstance(all_values[i], str) and - len(all_values[i]) <= 1 and - len(title_chars) < 100): # Cap title length - title_chars.append(all_values[i]) + if title_chars: + row_dict[columns[title_idx]] = ''.join(title_chars) + + # Add remaining fields + for j in range(title_idx + 1, len(columns)): + if i < len(all_values): + row_dict[columns[j]] = all_values[i] i += 1 + else: + row_dict[columns[j]] = None - if title_chars: - row_dict[columns[title_idx]] = ''.join(title_chars) - print(f"Reconstructed title by joining characters: {row_dict[columns[title_idx]]}") - - # Add remaining fields - for j in range(title_idx + 1, len(columns)): - if i < len(all_values): - row_dict[columns[j]] = all_values[i] - i += 1 - else: - row_dict[columns[j]] = None - - reconstructed_rows.append(row_dict) - else: - i += 1 - except Exception as e: - print(f"Error during title reconstruction: {e}") + reconstructed_rows.append(row_dict) + else: + i += 1 # If we still don't have rows, use simple chunking as fallback if not reconstructed_rows: @@ -587,14 +527,11 @@ class DatabricksQueryTool(BaseTool): if isinstance(row.get('Title'), str) and len(row.get('Title')) <= 1: # This is likely still a fragmented title - mark as potentially incomplete row['Title'] = f"[INCOMPLETE] {row.get('Title')}" - print(f"Found potentially incomplete title: {row.get('Title')}") # Ensure we respect the row limit if row_limit and len(reconstructed_rows) > row_limit: reconstructed_rows = reconstructed_rows[:row_limit] - print(f"Limited to {row_limit} rows as requested") - print(f"Successfully reconstructed {len(reconstructed_rows)} rows") chunk_results = reconstructed_rows else: # Process normal result structure as before @@ -604,7 +541,6 @@ class DatabricksQueryTool(BaseTool): if hasattr(result.result, 'data_array') and result.result.data_array: # Check if data appears to be malformed within chunks for chunk_idx, chunk in enumerate(result.result.data_array): - print(f"Processing chunk {chunk_idx} with {len(chunk)} values") # Check if chunk might actually contain individual columns of a single row # This is another way data might be malformed - check the first few values @@ -627,7 +563,6 @@ class DatabricksQueryTool(BaseTool): reconstructed_rows.append(row_dict) if reconstructed_rows: - print(f"Reconstructed {len(reconstructed_rows)} rows from chunk") chunk_results.extend(reconstructed_rows) continue # Skip normal processing for this chunk @@ -635,7 +570,6 @@ class DatabricksQueryTool(BaseTool): # This handles the case where instead of a list of rows, we just got all values in a flat list if all(isinstance(val, (str, int, float)) and not isinstance(val, (list, dict)) for val in chunk): if len(chunk) == len(columns) or (len(chunk) > 0 and len(chunk) % len(columns) == 0): - print(f"Chunk appears to contain flat values - treating as rows with {len(columns)} columns each") # Process flat list of values as rows for i in range(0, len(chunk), len(columns)): @@ -643,7 +577,6 @@ class DatabricksQueryTool(BaseTool): if len(row_values) == len(columns): # Only process complete rows row_dict = {col: val for col, val in zip(columns, row_values)} chunk_results.append(row_dict) - print(f"Created row from flat values: {row_dict}") # Skip regular row processing for this chunk continue @@ -652,16 +585,9 @@ class DatabricksQueryTool(BaseTool): for row_idx, row in enumerate(chunk): # Ensure row is actually a collection of values if not isinstance(row, (list, tuple, dict)): - print(f"Row {row_idx} is not a collection: {row} ({type(row)})") # This might be a single value; skip it or handle specially continue - # Debug info for this row - if isinstance(row, (list, tuple)): - print(f"Row {row_idx} has {len(row)} values") - elif isinstance(row, dict): - print(f"Row {row_idx} already has column mapping: {list(row.keys())}") - # Convert each row to a dictionary with column names as keys row_dict = {} @@ -689,11 +615,9 @@ class DatabricksQueryTool(BaseTool): elif hasattr(result.result, 'data') and result.result.data: # Alternative data structure - print(f"Processing data with {len(result.result.data)} rows") for row_idx, row in enumerate(result.result.data): # Debug info - print(f"Row {row_idx} has {len(row)} values") # Safely create dictionary matching column names to values row_dict = {} @@ -714,7 +638,6 @@ class DatabricksQueryTool(BaseTool): chunk_results.append(row_dict) # After processing all rows, ensure all rows have all columns - print(f"All columns detected: {all_columns}") normalized_results = [] for row in chunk_results: # Create a new row with all columns, defaulting to None for missing ones @@ -724,16 +647,10 @@ class DatabricksQueryTool(BaseTool): # Replace the original results with normalized ones chunk_results = normalized_results - # Print the processed results for debugging - print(f"Processed {len(chunk_results)} rows") - for i, row in enumerate(chunk_results[:3]): # Show only first 3 rows to avoid log spam - print(f"Row {i}: {row}") - except Exception as results_error: # Enhanced error message with more context import traceback error_details = traceback.format_exc() - print(f"Error processing results: {error_details}") return f"Error processing query results: {str(results_error)}\n\nDetails:\n{error_details}" # If we have no results but the query succeeded (e.g., for DDL statements) From 319423b70a08646df9e97115c8dfe1ff5e7d9084 Mon Sep 17 00:00:00 2001 From: Raju Rangan Date: Tue, 18 Mar 2025 10:24:08 -0400 Subject: [PATCH 307/391] added __init__ files as suggested. --- src/crewai_tools/aws/__init__.py | 8 +++++++- src/crewai_tools/aws/bedrock/__init__.py | 4 ++++ src/crewai_tools/aws/bedrock/agents/__init__.py | 3 +++ src/crewai_tools/aws/bedrock/knowledge_base/__init__.py | 3 +++ 4 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 src/crewai_tools/aws/bedrock/__init__.py create mode 100644 src/crewai_tools/aws/bedrock/agents/__init__.py create mode 100644 src/crewai_tools/aws/bedrock/knowledge_base/__init__.py diff --git a/src/crewai_tools/aws/__init__.py b/src/crewai_tools/aws/__init__.py index ea4626a32..dd01fd8fe 100644 --- a/src/crewai_tools/aws/__init__.py +++ b/src/crewai_tools/aws/__init__.py @@ -1,3 +1,9 @@ from .s3 import S3ReaderTool, S3WriterTool +from .bedrock import BedrockKBRetrieverTool, BedrockInvokeAgentTool -__all__ = ['S3ReaderTool', 'S3WriterTool'] \ No newline at end of file +__all__ = [ + 'S3ReaderTool', + 'S3WriterTool', + 'BedrockKBRetrieverTool', + 'BedrockInvokeAgentTool' +] \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/__init__.py b/src/crewai_tools/aws/bedrock/__init__.py new file mode 100644 index 000000000..ded472062 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/__init__.py @@ -0,0 +1,4 @@ +from .knowledge_base.retriever_tool import BedrockKBRetrieverTool +from .agents.invoke_agent_tool import BedrockInvokeAgentTool + +__all__ = ["BedrockKBRetrieverTool", "BedrockInvokeAgentTool"] diff --git a/src/crewai_tools/aws/bedrock/agents/__init__.py b/src/crewai_tools/aws/bedrock/agents/__init__.py new file mode 100644 index 000000000..b1f799872 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/agents/__init__.py @@ -0,0 +1,3 @@ +from .invoke_agent_tool import BedrockInvokeAgentTool + +__all__ = ["BedrockInvokeAgentTool"] diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/__init__.py b/src/crewai_tools/aws/bedrock/knowledge_base/__init__.py new file mode 100644 index 000000000..013d94cf3 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/knowledge_base/__init__.py @@ -0,0 +1,3 @@ +from .retriever_tool import BedrockKBRetrieverTool + +__all__ = ["BedrockKBRetrieverTool"] From 5cfcb5c74acc9c9925bb379e9b601e299de409ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Wed, 19 Mar 2025 11:30:26 -0700 Subject: [PATCH 308/391] new version --- src/crewai_tools/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7b2b2de89..c13ac5007 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -59,3 +59,10 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, ) + +from .aws import ( + S3ReaderTool, + S3WriterTool, + BedrockKBRetrieverTool, + BedrockInvokeAgentTool, +) From 5ded394e435bb34b955f0b21f7562e2949eb3024 Mon Sep 17 00:00:00 2001 From: Parth Patel <64201651+parthbs@users.noreply.github.com> Date: Tue, 25 Mar 2025 19:01:01 +0000 Subject: [PATCH 309/391] #249 feat: add support for local qdrant client --- .../qdrant_vector_search_tool/qdrant_search_tool.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index c59dd29d5..3ef467264 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -66,8 +66,8 @@ class QdrantVectorSearchTool(BaseTool): ..., description="The URL of the Qdrant server", ) - qdrant_api_key: str = Field( - ..., + qdrant_api_key: Optional[str] = Field( + default=None, description="The API key for the Qdrant server", ) custom_embedding_fn: Optional[callable] = Field( @@ -80,7 +80,7 @@ class QdrantVectorSearchTool(BaseTool): if QDRANT_AVAILABLE: self.client = QdrantClient( url=self.qdrant_url, - api_key=self.qdrant_api_key, + api_key=self.qdrant_api_key if self.qdrant_api_key else None, ) else: import click @@ -133,7 +133,7 @@ class QdrantVectorSearchTool(BaseTool): # Search in Qdrant using the built-in query method query_vector = ( - self._vectorize_query(query) + self._vectorize_query(query, embedding_model="text-embedding-3-large") if not self.custom_embedding_fn else self.custom_embedding_fn(query) ) @@ -158,11 +158,12 @@ class QdrantVectorSearchTool(BaseTool): return json.dumps(results, indent=2) - def _vectorize_query(self, query: str) -> list[float]: + def _vectorize_query(self, query: str, embedding_model: str) -> list[float]: """Default vectorization function with openai. Args: query (str): The query to vectorize + embedding_model (str): The embedding model to use Returns: list[float]: The vectorized query @@ -173,7 +174,7 @@ class QdrantVectorSearchTool(BaseTool): embedding = ( client.embeddings.create( input=[query], - model="text-embedding-3-small", + model=embedding_model, ) .data[0] .embedding From 4fd7db2e5384d73472692dbc3e2239f0174f544e Mon Sep 17 00:00:00 2001 From: Parth Patel <64201651+parthbs@users.noreply.github.com> Date: Tue, 25 Mar 2025 19:16:18 +0000 Subject: [PATCH 310/391] #249: update QdrantVectorSearchTool README --- src/crewai_tools/tools/qdrant_vector_search_tool/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md index 131dbca15..26ad9a15f 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/README.md +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/README.md @@ -26,7 +26,7 @@ tool = QdrantVectorSearchTool( collection_name="example_collections", limit=3, qdrant_url="https://your-qdrant-cluster-url.com", - qdrant_api_key="your-qdrant-api-key", + qdrant_api_key="your-qdrant-api-key", # (optional) ) @@ -43,7 +43,7 @@ rag_agent = Agent( - `collection_name` : The name of the collection to search within. (Required) - `qdrant_url` : The URL of the Qdrant cluster. (Required) -- `qdrant_api_key` : The API key for the Qdrant cluster. (Required) +- `qdrant_api_key` : The API key for the Qdrant cluster. (Optional) - `limit` : The number of results to return. (Optional) - `vectorizer` : The vectorizer to use. (Optional) From e0adb4695cdb30997616b4077f77f78f3d4755ac Mon Sep 17 00:00:00 2001 From: Shady Ali <121682078+SHIXOOM@users.noreply.github.com> Date: Fri, 28 Mar 2025 16:58:47 +0200 Subject: [PATCH 311/391] Addressed review comments and made further improvements --- .../firecrawl_crawl_website_tool/README.md | 36 ++++++++--- .../firecrawl_crawl_website_tool.py | 60 +++++++++++++------ 2 files changed, 68 insertions(+), 28 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md index f0bf66918..d8e8f1407 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md @@ -4,6 +4,10 @@ [Firecrawl](https://firecrawl.dev) is a platform for crawling and convert any website into clean markdown or structured data. +## Version Compatibility + +This implementation is compatible with FireCrawl API v1 + ## Installation - Get an API key from [firecrawl.dev](https://firecrawl.dev) and set it in environment variables (`FIRECRAWL_API_KEY`). @@ -27,13 +31,27 @@ tool = FirecrawlCrawlWebsiteTool(url='firecrawl.dev') - `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. - `url`: The base URL to start crawling from. -- `page_options`: Optional. - - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. - - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. -- `crawler_options`: Optional. Options for controlling the crawling behavior. - - `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on. - - `limit`: Optional. Maximum number of pages to crawl. - - `scrapeOptions`: Optional. Additional options for controlling the crawler. - - `formats`: Optional. Formats for the page's content to be returned (eg. markdown, html, screenshot, links). - - `timeout`: Optional. Timeout in milliseconds for the crawling operation. +- `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on. +- `limit`: Optional. Maximum number of pages to crawl. +- `allowExternalLinks`: Allows the crawler to follow links that point to external domains. +- `formats`: Optional. Formats for the page's content to be returned (eg. markdown, html, screenshot, links). +- `timeout`: Optional. Timeout in milliseconds for the crawling operation. +## Configurations Example + +This is the default configuration + +```python + DEFAULT_CRAWLING_OPTIONS = { + "maxDepth": 2, + "ignoreSitemap": True, + "limit": 100, + "allowBackwardLinks": False, + "allowExternalLinks": False, + "scrapeOptions": { + "formats": ["markdown", "screenshot", "links"], + "onlyMainContent": True, + "timeout": 30000 + } + } +``` diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 878063953..82bd913cd 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -12,9 +12,18 @@ except ImportError: class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - crawler_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for crawling" - ) + maxDepth: Optional[int] = Field( + default=2, + description="Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on.") + limit: Optional[int] = Field( + default=100, + description="Maximum number of pages to crawl.") + allowExternalLinks: Optional[bool] = Field( + default=False, + description="Allows the crawler to follow links that point to external domains.") + formats: Optional[list[str]] = Field( + default=["markdown", "screenshot", "links"], + description="Formats for the page's content to be returned (eg. markdown, html, screenshot, links).") timeout: Optional[int] = Field( default=30000, description="Timeout in milliseconds for the crawling operation. The default value is 30000.", @@ -30,6 +39,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) + def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -64,26 +74,38 @@ class FirecrawlCrawlWebsiteTool(BaseTool): def _run( self, url: str, - crawler_options: Optional[Dict[str, Any]] = None, + maxDepth: Optional[int] = 2, + limit: Optional[int] = 100, + allowExternalLinks: Optional[bool] = False, + formats: Optional[list[str]] = ["markdown", "screenshot", "links"], timeout: Optional[int] = 30000, ): - if crawler_options is None: - crawler_options = { - "maxDepth": 2, - "limit": 10, - "scrapeOptions": { - # same options as in /scrape - "formats": ["markdown", "screenshot", "links"], - "timeout": timeout - } - } - + # Default options for timeout and crawling + DEFAULT_TIMEOUT = 30000 + DEFAULT_CRAWLING_OPTIONS = { + "maxDepth": 2, + "ignoreSitemap": True, + "limit": 100, + "allowBackwardLinks": False, + "allowExternalLinks": False, + "scrapeOptions": { + "formats": ["markdown", "screenshot", "links"], + "onlyMainContent": True, + "timeout": DEFAULT_TIMEOUT + } + } - else: - crawler_options["scrapeOptions"]["timeout"] = timeout - + # Add default options not present as parameters + crawling_options = DEFAULT_CRAWLING_OPTIONS - return self._firecrawl.crawl_url(url, crawler_options) + # Update the values of parameters present + crawling_options["maxDepth"] = maxDepth + crawling_options["limit"] = limit + crawling_options["allowExternalLinks"] = allowExternalLinks + crawling_options["scrapeOptions"]["formats"] = formats + crawling_options["scrapeOptions"]["timeout"] = timeout + + return self._firecrawl.crawl_url(url, crawling_options) try: From 47acb5c3e47dbf9a9be2f944dd1d9a46965fe9c5 Mon Sep 17 00:00:00 2001 From: lucasgomide Date: Tue, 1 Apr 2025 09:55:01 -0300 Subject: [PATCH 312/391] feat: cleanup Pydantic warning A several warnings were addressed following by https://docs.pydantic.dev/2.10/migration --- .../patronus_eval_tool/patronus_local_evaluator_tool.py | 5 ++--- src/crewai_tools/tools/rag/rag_tool.py | 5 ++--- .../scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | 6 +++--- .../tools/selenium_scraping_tool/selenium_scraping_tool.py | 4 ++-- src/crewai_tools/tools/vision_tool/vision_tool.py | 4 ++-- 5 files changed, 11 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index dfc9e757f..602b45864 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,7 +1,7 @@ from typing import TYPE_CHECKING, Any, Type from crewai.tools import BaseTool -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field if TYPE_CHECKING: from patronus import Client, EvaluationResult @@ -40,8 +40,7 @@ class PatronusLocalEvaluatorTool(BaseTool): evaluator: str evaluated_model_gold_answer: str - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def __init__( self, diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index a9bbdab53..6d0320c0c 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -2,12 +2,11 @@ from abc import ABC, abstractmethod from typing import Any from crewai.tools import BaseTool -from pydantic import BaseModel, Field, model_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator class Adapter(BaseModel, ABC): - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod def query(self, question: str) -> str: diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 0c142de18..70764c294 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -1,9 +1,9 @@ import os -from typing import Any, Optional, Type, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional, Type from urllib.parse import urlparse from crewai.tools import BaseTool -from pydantic import BaseModel, Field, validator, ConfigDict +from pydantic import BaseModel, ConfigDict, Field, field_validator # Type checking import if TYPE_CHECKING: @@ -31,7 +31,7 @@ class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema): description="Prompt to guide the extraction of content", ) - @validator("website_url") + @field_validator("website_url") def validate_url(cls, v): """Validate URL format""" try: diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 240269756..27f7db132 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -4,7 +4,7 @@ from typing import Any, Optional, Type from urllib.parse import urlparse from crewai.tools import BaseTool -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, Field, field_validator class FixedSeleniumScrapingToolSchema(BaseModel): @@ -23,7 +23,7 @@ class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema): description="Mandatory css reference for element to scrape from the website", ) - @validator("website_url") + @field_validator("website_url") def validate_website_url(cls, v): if not v: raise ValueError("Website URL cannot be empty") diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index 594be0b22..a8daaabb9 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -4,7 +4,7 @@ from typing import Optional, Type from crewai.tools import BaseTool from openai import OpenAI -from pydantic import BaseModel, validator +from pydantic import BaseModel, field_validator class ImagePromptSchema(BaseModel): @@ -12,7 +12,7 @@ class ImagePromptSchema(BaseModel): image_path_url: str = "The image path or URL." - @validator("image_path_url") + @field_validator("image_path_url") def validate_image_path_url(cls, v: str) -> str: if v.startswith("http"): return v From 6b4453e1b138bff9e309f4fc18950b1545c7ed45 Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Tue, 1 Apr 2025 10:42:36 -0700 Subject: [PATCH 313/391] refactor: rename schema to db_schema in DatabricksQueryTool for clarity --- .../databricks_query_tool/databricks_query_tool.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py index e6381c8c5..428cea5d3 100644 --- a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -16,7 +16,7 @@ class DatabricksQueryToolSchema(BaseModel): catalog: Optional[str] = Field( None, description="Databricks catalog name (optional, defaults to configured catalog)" ) - schema: Optional[str] = Field( + db_schema: Optional[str] = Field( None, description="Databricks schema name (optional, defaults to configured schema)" ) warehouse_id: Optional[str] = Field( @@ -168,7 +168,7 @@ class DatabricksQueryTool(BaseTool): Args: query (str): SQL query to execute catalog (Optional[str]): Databricks catalog name - schema (Optional[str]): Databricks schema name + db_schema (Optional[str]): Databricks schema name warehouse_id (Optional[str]): SQL warehouse ID row_limit (Optional[int]): Maximum number of rows to return @@ -179,7 +179,7 @@ class DatabricksQueryTool(BaseTool): # Get parameters with fallbacks to default values query = kwargs.get("query") catalog = kwargs.get("catalog") or self.default_catalog - schema = kwargs.get("schema") or self.default_schema + db_schema = kwargs.get("db_schema") or self.default_schema warehouse_id = kwargs.get("warehouse_id") or self.default_warehouse_id row_limit = kwargs.get("row_limit", 1000) @@ -187,7 +187,7 @@ class DatabricksQueryTool(BaseTool): validated_input = DatabricksQueryToolSchema( query=query, catalog=catalog, - schema=schema, + db_schema=db_schema, warehouse_id=warehouse_id, row_limit=row_limit ) @@ -195,15 +195,15 @@ class DatabricksQueryTool(BaseTool): # Extract validated parameters query = validated_input.query catalog = validated_input.catalog - schema = validated_input.schema + db_schema = validated_input.db_schema warehouse_id = validated_input.warehouse_id # Setup SQL context with catalog/schema if provided context = {} if catalog: context["catalog"] = catalog - if schema: - context["schema"] = schema + if db_schema: + context["schema"] = db_schema # Execute query statement = self.workspace_client.statement_execution From 89394ef3e3d60966252b9c3782118594527daa6a Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Fri, 4 Apr 2025 11:42:32 -0700 Subject: [PATCH 314/391] Refactor: Clean up FirecrawlCrawlWebsiteTool schema field descriptions and formatting for improved readability --- .../firecrawl_crawl_website_tool.py | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 82bd913cd..f91ad3184 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional, Type +from typing import Any, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr @@ -14,16 +14,19 @@ class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") maxDepth: Optional[int] = Field( default=2, - description="Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on.") + description="Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on.", + ) limit: Optional[int] = Field( - default=100, - description="Maximum number of pages to crawl.") + default=100, description="Maximum number of pages to crawl." + ) allowExternalLinks: Optional[bool] = Field( default=False, - description="Allows the crawler to follow links that point to external domains.") + description="Allows the crawler to follow links that point to external domains.", + ) formats: Optional[list[str]] = Field( default=["markdown", "screenshot", "links"], - description="Formats for the page's content to be returned (eg. markdown, html, screenshot, links).") + description="Formats for the page's content to be returned (eg. markdown, html, screenshot, links).", + ) timeout: Optional[int] = Field( default=30000, description="Timeout in milliseconds for the crawling operation. The default value is 30000.", @@ -39,7 +42,6 @@ class FirecrawlCrawlWebsiteTool(BaseTool): args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) - def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -91,20 +93,20 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "scrapeOptions": { "formats": ["markdown", "screenshot", "links"], "onlyMainContent": True, - "timeout": DEFAULT_TIMEOUT - } + "timeout": DEFAULT_TIMEOUT, + }, } - + # Add default options not present as parameters crawling_options = DEFAULT_CRAWLING_OPTIONS - + # Update the values of parameters present crawling_options["maxDepth"] = maxDepth crawling_options["limit"] = limit crawling_options["allowExternalLinks"] = allowExternalLinks crawling_options["scrapeOptions"]["formats"] = formats crawling_options["scrapeOptions"]["timeout"] = timeout - + return self._firecrawl.crawl_url(url, crawling_options) From 257f4bf38513ae4900d9788d9a7f48a6764bdd95 Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Tue, 8 Apr 2025 13:20:11 -0400 Subject: [PATCH 315/391] Test optional dependencies are not required in runtime (#260) * Test optional dependencies are not required in runtime * Add dynamic imports to S3 tools * Setup CI --- .../aws/bedrock/agents/invoke_agent_tool.py | 9 +- .../bedrock/knowledge_base/retriever_tool.py | 9 +- src/crewai_tools/aws/s3/reader_tool.py | 10 +- src/crewai_tools/aws/s3/writer_tool.py | 11 +- tests/it/tools/snowflake_search_tool_test.py | 219 ------------------ tests/spider_tool_test.py | 46 ---- tests/test_optional_dependencies.py | 41 ++++ tests/tools/selenium_scraping_tool_test.py | 13 +- 8 files changed, 80 insertions(+), 278 deletions(-) delete mode 100644 tests/it/tools/snowflake_search_tool_test.py delete mode 100644 tests/spider_tool_test.py create mode 100644 tests/test_optional_dependencies.py diff --git a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py index 6c43480c0..c064b9b2d 100644 --- a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py +++ b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py @@ -8,10 +8,7 @@ from dotenv import load_dotenv from crewai.tools import BaseTool from pydantic import BaseModel, Field -import boto3 -from botocore.exceptions import ClientError -# Import custom exceptions from ..exceptions import BedrockAgentError, BedrockValidationError # Load environment variables from .env file @@ -92,6 +89,12 @@ class BedrockInvokeAgentTool(BaseTool): raise BedrockValidationError(f"Parameter validation failed: {str(e)}") def _run(self, query: str) -> str: + try: + import boto3 + from botocore.exceptions import ClientError + except ImportError: + raise ImportError("`boto3` package not found, please run `uv add boto3`") + try: # Initialize the Bedrock Agent Runtime client bedrock_agent = boto3.client( diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py index 55a15b621..15c74077c 100644 --- a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py +++ b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py @@ -5,10 +5,7 @@ from dotenv import load_dotenv from crewai.tools import BaseTool from pydantic import BaseModel, Field -import boto3 -from botocore.exceptions import ClientError -# Import custom exceptions from ..exceptions import BedrockKnowledgeBaseError, BedrockValidationError # Load environment variables from .env file @@ -179,6 +176,12 @@ class BedrockKBRetrieverTool(BaseTool): return result_object def _run(self, query: str) -> str: + try: + import boto3 + from botocore.exceptions import ClientError + except ImportError: + raise ImportError("`boto3` package not found, please run `uv add boto3`") + try: # Initialize the Bedrock Agent Runtime client bedrock_agent_runtime = boto3.client( diff --git a/src/crewai_tools/aws/s3/reader_tool.py b/src/crewai_tools/aws/s3/reader_tool.py index 7cd734081..4b3b9a394 100644 --- a/src/crewai_tools/aws/s3/reader_tool.py +++ b/src/crewai_tools/aws/s3/reader_tool.py @@ -1,10 +1,8 @@ -from typing import Type +from typing import Any, Type import os from crewai.tools import BaseTool from pydantic import BaseModel, Field -import boto3 -from botocore.exceptions import ClientError class S3ReaderToolInput(BaseModel): @@ -19,6 +17,12 @@ class S3ReaderTool(BaseTool): args_schema: Type[BaseModel] = S3ReaderToolInput def _run(self, file_path: str) -> str: + try: + import boto3 + from botocore.exceptions import ClientError + except ImportError: + raise ImportError("`boto3` package not found, please run `uv add boto3`") + try: bucket_name, object_key = self._parse_s3_path(file_path) diff --git a/src/crewai_tools/aws/s3/writer_tool.py b/src/crewai_tools/aws/s3/writer_tool.py index 0c4201e0f..f0aaddb28 100644 --- a/src/crewai_tools/aws/s3/writer_tool.py +++ b/src/crewai_tools/aws/s3/writer_tool.py @@ -1,22 +1,27 @@ -from typing import Type +from typing import Any, Type import os from crewai.tools import BaseTool from pydantic import BaseModel, Field -import boto3 -from botocore.exceptions import ClientError class S3WriterToolInput(BaseModel): """Input schema for S3WriterTool.""" file_path: str = Field(..., description="S3 file path (e.g., 's3://bucket-name/file-name')") content: str = Field(..., description="Content to write to the file") + class S3WriterTool(BaseTool): name: str = "S3 Writer Tool" description: str = "Writes content to a file in Amazon S3 given an S3 file path" args_schema: Type[BaseModel] = S3WriterToolInput def _run(self, file_path: str, content: str) -> str: + try: + import boto3 + from botocore.exceptions import ClientError + except ImportError: + raise ImportError("`boto3` package not found, please run `uv add boto3`") + try: bucket_name, object_key = self._parse_s3_path(file_path) diff --git a/tests/it/tools/snowflake_search_tool_test.py b/tests/it/tools/snowflake_search_tool_test.py deleted file mode 100644 index 70dc07953..000000000 --- a/tests/it/tools/snowflake_search_tool_test.py +++ /dev/null @@ -1,219 +0,0 @@ -import asyncio -import json -from decimal import Decimal - -import pytest -from snowflake.connector.errors import DatabaseError, OperationalError - -from crewai_tools import SnowflakeConfig, SnowflakeSearchTool - -# Test Data -MENU_ITEMS = [ - (10001, "Ice Cream", "Freezing Point", "Lemonade", "Beverage", "Cold Option", 1, 4), - ( - 10002, - "Ice Cream", - "Freezing Point", - "Vanilla Ice Cream", - "Dessert", - "Ice Cream", - 2, - 6, - ), -] - -INVALID_QUERIES = [ - ("SELECT * FROM nonexistent_table", "relation 'nonexistent_table' does not exist"), - ("SELECT invalid_column FROM menu", "invalid identifier 'invalid_column'"), - ("INVALID SQL QUERY", "SQL compilation error"), -] - - -# Integration Test Fixtures -@pytest.fixture -def config(): - """Create a Snowflake configuration with test credentials.""" - return SnowflakeConfig( - account="lwyhjun-wx11931", - user="crewgitci", - password="crewaiT00ls_publicCIpass123", - warehouse="COMPUTE_WH", - database="tasty_bytes_sample_data", - snowflake_schema="raw_pos", - ) - - -@pytest.fixture -def snowflake_tool(config): - """Create a SnowflakeSearchTool instance.""" - return SnowflakeSearchTool(config=config) - - -# Integration Tests with Real Snowflake Connection -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "menu_id,expected_type,brand,item_name,category,subcategory,cost,price", MENU_ITEMS -) -async def test_menu_items( - snowflake_tool, - menu_id, - expected_type, - brand, - item_name, - category, - subcategory, - cost, - price, -): - """Test menu items with parameterized data for multiple test cases.""" - results = await snowflake_tool._run( - query=f"SELECT * FROM menu WHERE menu_id = {menu_id}" - ) - assert len(results) == 1 - menu_item = results[0] - - # Validate all fields - assert menu_item["MENU_ID"] == menu_id - assert menu_item["MENU_TYPE"] == expected_type - assert menu_item["TRUCK_BRAND_NAME"] == brand - assert menu_item["MENU_ITEM_NAME"] == item_name - assert menu_item["ITEM_CATEGORY"] == category - assert menu_item["ITEM_SUBCATEGORY"] == subcategory - assert menu_item["COST_OF_GOODS_USD"] == cost - assert menu_item["SALE_PRICE_USD"] == price - - # Validate health metrics JSON structure - health_metrics = json.loads(menu_item["MENU_ITEM_HEALTH_METRICS_OBJ"]) - assert "menu_item_health_metrics" in health_metrics - metrics = health_metrics["menu_item_health_metrics"][0] - assert "ingredients" in metrics - assert isinstance(metrics["ingredients"], list) - assert all(isinstance(ingredient, str) for ingredient in metrics["ingredients"]) - assert metrics["is_dairy_free_flag"] in ["Y", "N"] - - -@pytest.mark.integration -@pytest.mark.asyncio -async def test_menu_categories_aggregation(snowflake_tool): - """Test complex aggregation query on menu categories with detailed validations.""" - results = await snowflake_tool._run( - query=""" - SELECT - item_category, - COUNT(*) as item_count, - AVG(sale_price_usd) as avg_price, - SUM(sale_price_usd - cost_of_goods_usd) as total_margin, - COUNT(DISTINCT menu_type) as menu_type_count, - MIN(sale_price_usd) as min_price, - MAX(sale_price_usd) as max_price - FROM menu - GROUP BY item_category - HAVING COUNT(*) > 1 - ORDER BY item_count DESC - """ - ) - - assert len(results) > 0 - for category in results: - # Basic presence checks - assert all( - key in category - for key in [ - "ITEM_CATEGORY", - "ITEM_COUNT", - "AVG_PRICE", - "TOTAL_MARGIN", - "MENU_TYPE_COUNT", - "MIN_PRICE", - "MAX_PRICE", - ] - ) - - # Value validations - assert category["ITEM_COUNT"] > 1 # Due to HAVING clause - assert category["MIN_PRICE"] <= category["MAX_PRICE"] - assert category["AVG_PRICE"] >= category["MIN_PRICE"] - assert category["AVG_PRICE"] <= category["MAX_PRICE"] - assert category["MENU_TYPE_COUNT"] >= 1 - assert isinstance(category["TOTAL_MARGIN"], (float, Decimal)) - - -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize("invalid_query,expected_error", INVALID_QUERIES) -async def test_invalid_queries(snowflake_tool, invalid_query, expected_error): - """Test error handling for invalid queries.""" - with pytest.raises((DatabaseError, OperationalError)) as exc_info: - await snowflake_tool._run(query=invalid_query) - assert expected_error.lower() in str(exc_info.value).lower() - - -@pytest.mark.integration -@pytest.mark.asyncio -async def test_concurrent_queries(snowflake_tool): - """Test handling of concurrent queries.""" - queries = [ - "SELECT COUNT(*) FROM menu", - "SELECT COUNT(DISTINCT menu_type) FROM menu", - "SELECT COUNT(DISTINCT item_category) FROM menu", - ] - - tasks = [snowflake_tool._run(query=query) for query in queries] - results = await asyncio.gather(*tasks) - - assert len(results) == 3 - assert all(isinstance(result, list) for result in results) - assert all(len(result) == 1 for result in results) - assert all(isinstance(result[0], dict) for result in results) - - -@pytest.mark.integration -@pytest.mark.asyncio -async def test_query_timeout(snowflake_tool): - """Test query timeout handling with a complex query.""" - with pytest.raises((DatabaseError, OperationalError)) as exc_info: - await snowflake_tool._run( - query=""" - WITH RECURSIVE numbers AS ( - SELECT 1 as n - UNION ALL - SELECT n + 1 - FROM numbers - WHERE n < 1000000 - ) - SELECT COUNT(*) FROM numbers - """ - ) - assert ( - "timeout" in str(exc_info.value).lower() - or "execution time" in str(exc_info.value).lower() - ) - - -@pytest.mark.integration -@pytest.mark.asyncio -async def test_caching_behavior(snowflake_tool): - """Test query caching behavior and performance.""" - query = "SELECT * FROM menu LIMIT 5" - - # First execution - start_time = asyncio.get_event_loop().time() - results1 = await snowflake_tool._run(query=query) - first_duration = asyncio.get_event_loop().time() - start_time - - # Second execution (should be cached) - start_time = asyncio.get_event_loop().time() - results2 = await snowflake_tool._run(query=query) - second_duration = asyncio.get_event_loop().time() - start_time - - # Verify results - assert results1 == results2 - assert len(results1) == 5 - assert second_duration < first_duration - - # Verify cache invalidation with different query - different_query = "SELECT * FROM menu LIMIT 10" - different_results = await snowflake_tool._run(query=different_query) - assert len(different_results) == 10 - assert different_results != results1 diff --git a/tests/spider_tool_test.py b/tests/spider_tool_test.py deleted file mode 100644 index 7f5613fe6..000000000 --- a/tests/spider_tool_test.py +++ /dev/null @@ -1,46 +0,0 @@ -from crewai import Agent, Crew, Task - -from crewai_tools.tools.spider_tool.spider_tool import SpiderTool - - -def test_spider_tool(): - spider_tool = SpiderTool() - - searcher = Agent( - role="Web Research Expert", - goal="Find related information from specific URL's", - backstory="An expert web researcher that uses the web extremely well", - tools=[spider_tool], - verbose=True, - cache=False, - ) - - choose_between_scrape_crawl = Task( - description="Scrape the page of spider.cloud and return a summary of how fast it is", - expected_output="spider.cloud is a fast scraping and crawling tool", - agent=searcher, - ) - - return_metadata = Task( - description="Scrape https://spider.cloud with a limit of 1 and enable metadata", - expected_output="Metadata and 10 word summary of spider.cloud", - agent=searcher, - ) - - css_selector = Task( - description="Scrape one page of spider.cloud with the `body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1` CSS selector", - expected_output="The content of the element with the css selector body > div > main > section.grid.md\:grid-cols-2.gap-10.place-items-center.md\:max-w-screen-xl.mx-auto.pb-8.pt-20 > div:nth-child(1) > h1", - agent=searcher, - ) - - crew = Crew( - agents=[searcher], - tasks=[choose_between_scrape_crawl, return_metadata, css_selector], - verbose=True, - ) - - crew.kickoff() - - -if __name__ == "__main__": - test_spider_tool() diff --git a/tests/test_optional_dependencies.py b/tests/test_optional_dependencies.py new file mode 100644 index 000000000..b2d691a61 --- /dev/null +++ b/tests/test_optional_dependencies.py @@ -0,0 +1,41 @@ +import subprocess +import tempfile +from pathlib import Path + +import pytest + + +@pytest.fixture +def temp_project(): + temp_dir = tempfile.TemporaryDirectory() + project_dir = Path(temp_dir.name) / "test_project" + project_dir.mkdir() + + pyproject_content = f""" + [project] + name = "test-project" + version = "0.1.0" + description = "Test project" + requires-python = ">=3.10" + """ + + (project_dir / "pyproject.toml").write_text(pyproject_content) + run_command(["uv", "add", "--editable", f"file://{Path.cwd().absolute()}"], project_dir) + run_command(["uv", "sync"], project_dir) + yield project_dir + + +def run_command(cmd, cwd): + return subprocess.run(cmd, cwd=cwd, capture_output=True, text=True) + + +def test_no_optional_dependencies_in_init(temp_project): + """ + Test that crewai-tools can be imported without optional dependencies. + + The package defines optional dependencies in pyproject.toml, but the base + package should be importable without any of these optional dependencies + being installed. + """ + result = run_command(["uv", "run", "python", "-c", "import crewai_tools"], temp_project) + assert result.returncode == 0, f"Import failed with error: {result.stderr}" \ No newline at end of file diff --git a/tests/tools/selenium_scraping_tool_test.py b/tests/tools/selenium_scraping_tool_test.py index 271047449..4e0b890b5 100644 --- a/tests/tools/selenium_scraping_tool_test.py +++ b/tests/tools/selenium_scraping_tool_test.py @@ -1,4 +1,6 @@ from unittest.mock import MagicMock, patch +import tempfile +import os from bs4 import BeautifulSoup @@ -27,7 +29,11 @@ def initialize_tool_with(mock_driver): return tool -def test_tool_initialization(): +@patch("selenium.webdriver.Chrome") +def test_tool_initialization(mocked_chrome): + temp_dir = tempfile.mkdtemp() + mocked_chrome.return_value = MagicMock() + tool = SeleniumScrapingTool() assert tool.website_url is None @@ -35,6 +41,11 @@ def test_tool_initialization(): assert tool.cookie is None assert tool.wait_time == 3 assert tool.return_html is False + + try: + os.rmdir(temp_dir) + except: + pass @patch("selenium.webdriver.Chrome") From c2cb8e06be8c6d3097157a48102c9b000b32467d Mon Sep 17 00:00:00 2001 From: Nathan Chapman Date: Thu, 10 Apr 2025 17:38:28 -0500 Subject: [PATCH 316/391] Fix typo seach -> search in tools --- src/crewai_tools/tools/__init__.py | 4 ++-- .../tools/{mdx_seach_tool => mdx_search_tool}/README.md | 0 .../{mdx_seach_tool => mdx_search_tool}/mdx_search_tool.py | 0 .../tools/{pg_seach_tool => pg_search_tool}/README.md | 0 .../tools/{pg_seach_tool => pg_search_tool}/pg_search_tool.py | 0 5 files changed, 2 insertions(+), 2 deletions(-) rename src/crewai_tools/tools/{mdx_seach_tool => mdx_search_tool}/README.md (100%) rename src/crewai_tools/tools/{mdx_seach_tool => mdx_search_tool}/mdx_search_tool.py (100%) rename src/crewai_tools/tools/{pg_seach_tool => pg_search_tool}/README.md (100%) rename src/crewai_tools/tools/{pg_seach_tool => pg_search_tool}/pg_search_tool.py (100%) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 2f08bf9d7..bae21a1c6 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -26,7 +26,7 @@ from .hyperbrowser_load_tool.hyperbrowser_load_tool import HyperbrowserLoadTool from .json_search_tool.json_search_tool import JSONSearchTool from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool -from .mdx_seach_tool.mdx_search_tool import MDXSearchTool +from .mdx_search_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool @@ -36,7 +36,7 @@ from .patronus_eval_tool import ( PatronusPredefinedCriteriaEvalTool, ) from .pdf_search_tool.pdf_search_tool import PDFSearchTool -from .pg_seach_tool.pg_search_tool import PGSearchTool +from .pg_search_tool.pg_search_tool import PGSearchTool from .qdrant_vector_search_tool.qdrant_search_tool import QdrantVectorSearchTool from .rag.rag_tool import RagTool from .scrape_element_from_website.scrape_element_from_website import ( diff --git a/src/crewai_tools/tools/mdx_seach_tool/README.md b/src/crewai_tools/tools/mdx_search_tool/README.md similarity index 100% rename from src/crewai_tools/tools/mdx_seach_tool/README.md rename to src/crewai_tools/tools/mdx_search_tool/README.md diff --git a/src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py similarity index 100% rename from src/crewai_tools/tools/mdx_seach_tool/mdx_search_tool.py rename to src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py diff --git a/src/crewai_tools/tools/pg_seach_tool/README.md b/src/crewai_tools/tools/pg_search_tool/README.md similarity index 100% rename from src/crewai_tools/tools/pg_seach_tool/README.md rename to src/crewai_tools/tools/pg_search_tool/README.md diff --git a/src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py similarity index 100% rename from src/crewai_tools/tools/pg_seach_tool/pg_search_tool.py rename to src/crewai_tools/tools/pg_search_tool/pg_search_tool.py From 8cbdaeaff50891615a2559433469c822ddd1e1ab Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 15 Apr 2025 11:50:40 -0300 Subject: [PATCH 317/391] refactor(selenium): improve driver management and add headless mode (#268) - Refactor Selenium scraping tool to use single driver instance - Add headless mode configuration for Chrome - Improve error handling with try/finally - Simplify code structure and improve maintainability --- .../selenium_scraping_tool.py | 47 +++++++++---------- tests/tools/selenium_scraping_tool_test.py | 21 +++++++-- 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 27f7db132..57211e64e 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -57,7 +57,6 @@ class SeleniumScrapingTool(BaseTool): wait_time: Optional[int] = 3 css_element: Optional[str] = None return_html: Optional[bool] = False - _options: Optional[dict] = None _by: Optional[Any] = None def __init__( @@ -91,8 +90,10 @@ class SeleniumScrapingTool(BaseTool): raise ImportError( "`selenium` and `webdriver-manager` package not found, please run `uv add selenium webdriver-manager`" ) - self.driver = webdriver.Chrome() - self._options = Options() + + options: Options = Options() + options.add_argument("--headless") + self.driver = webdriver.Chrome(options=options) self._by = By if cookie is not None: self.cookie = cookie @@ -116,28 +117,30 @@ class SeleniumScrapingTool(BaseTool): website_url = kwargs.get("website_url", self.website_url) css_element = kwargs.get("css_element", self.css_element) return_html = kwargs.get("return_html", self.return_html) - driver = self._create_driver(website_url, self.cookie, self.wait_time) + try: + self._make_request(website_url, self.cookie, self.wait_time) + content = self._get_content(css_element, return_html) + return "\n".join(content) + except Exception as e: + return f"Error scraping website: {str(e)}" + finally: + self.driver.close() - content = self._get_content(driver, css_element, return_html) - driver.close() - - return "\n".join(content) - - def _get_content(self, driver, css_element, return_html): + def _get_content(self, css_element, return_html): content = [] if self._is_css_element_empty(css_element): - content.append(self._get_body_content(driver, return_html)) + content.append(self._get_body_content(return_html)) else: - content.extend(self._get_elements_content(driver, css_element, return_html)) + content.extend(self._get_elements_content(css_element, return_html)) return content def _is_css_element_empty(self, css_element): return css_element is None or css_element.strip() == "" - def _get_body_content(self, driver, return_html): - body_element = driver.find_element(self._by.TAG_NAME, "body") + def _get_body_content(self, return_html): + body_element = self.driver.find_element(self._by.TAG_NAME, "body") return ( body_element.get_attribute("outerHTML") @@ -145,17 +148,17 @@ class SeleniumScrapingTool(BaseTool): else body_element.text ) - def _get_elements_content(self, driver, css_element, return_html): + def _get_elements_content(self, css_element, return_html): elements_content = [] - for element in driver.find_elements(self._by.CSS_SELECTOR, css_element): + for element in self.driver.find_elements(self._by.CSS_SELECTOR, css_element): elements_content.append( element.get_attribute("outerHTML") if return_html else element.text ) return elements_content - def _create_driver(self, url, cookie, wait_time): + def _make_request(self, url, cookie, wait_time): if not url: raise ValueError("URL cannot be empty") @@ -163,17 +166,13 @@ class SeleniumScrapingTool(BaseTool): if not re.match(r"^https?://", url): raise ValueError("URL must start with http:// or https://") - options = self._options - options.add_argument("--headless") - driver = self.driver(options=options) - driver.get(url) + self.driver.get(url) time.sleep(wait_time) if cookie: - driver.add_cookie(cookie) + self.driver.add_cookie(cookie) time.sleep(wait_time) - driver.get(url) + self.driver.get(url) time.sleep(wait_time) - return driver def close(self): self.driver.close() diff --git a/tests/tools/selenium_scraping_tool_test.py b/tests/tools/selenium_scraping_tool_test.py index 4e0b890b5..b360df3a1 100644 --- a/tests/tools/selenium_scraping_tool_test.py +++ b/tests/tools/selenium_scraping_tool_test.py @@ -1,7 +1,8 @@ -from unittest.mock import MagicMock, patch -import tempfile import os +import tempfile +from unittest.mock import MagicMock, patch +import pytest from bs4 import BeautifulSoup from crewai_tools.tools.selenium_scraping_tool.selenium_scraping_tool import ( @@ -24,7 +25,7 @@ def mock_driver_with_html(html_content): def initialize_tool_with(mock_driver): tool = SeleniumScrapingTool() - tool.driver = MagicMock(return_value=mock_driver) + tool.driver = mock_driver return tool @@ -33,7 +34,7 @@ def initialize_tool_with(mock_driver): def test_tool_initialization(mocked_chrome): temp_dir = tempfile.mkdtemp() mocked_chrome.return_value = MagicMock() - + tool = SeleniumScrapingTool() assert tool.website_url is None @@ -41,7 +42,7 @@ def test_tool_initialization(mocked_chrome): assert tool.cookie is None assert tool.wait_time == 3 assert tool.return_html is False - + try: os.rmdir(temp_dir) except: @@ -102,3 +103,13 @@ def test_scrape_with_return_html_false(_mocked_chrome_driver): mock_driver.get.assert_called_once_with("https://example.com") mock_driver.find_element.assert_called_with("tag name", "body") mock_driver.close.assert_called_once() + + +@patch("selenium.webdriver.Chrome") +def test_scrape_with_driver_error(_mocked_chrome_driver): + mock_driver = MagicMock() + mock_driver.find_element.side_effect = Exception("WebDriver error occurred") + tool = initialize_tool_with(mock_driver) + result = tool._run(website_url="https://example.com") + assert result == "Error scraping website: WebDriver error occurred" + mock_driver.close.assert_called_once() From a2707423192a14c7d196602f1ab56e877f9a9585 Mon Sep 17 00:00:00 2001 From: Guillaume Raille Date: Wed, 16 Apr 2025 19:18:07 +0200 Subject: [PATCH 318/391] mcp server proposal (#267) * mcp server proposal * Refactor MCP server implementation: rename MCPServer to MCPServerAdapter and update usage examples. Adjust error message for optional dependencies installation. * Update MCPServerAdapter usage examples to remove unnecessary parameters in context manager instantiation. * Refactor MCPServerAdapter to move optional dependency imports inside the class constructor, improving error handling for missing dependencies. * Enhance MCPServerAdapter by adding type hinting for server parameters and improving error handling during server startup. Optional dependency imports are now conditionally loaded, ensuring clearer error messages for missing packages. * Refactor MCPServerAdapter to improve error handling for missing 'mcp' package. Conditional imports are now used, prompting users to install the package if not found, enhancing user experience during server initialization. * Refactor MCPServerAdapter to ensure proper cleanup after usage. Removed redundant exception handling and ensured that the server stops in a finally block, improving resource management. * add documentation * fix typo close -> stop * add tests and fix double call with context manager * Enhance MCPServerAdapter with logging capabilities and improved error handling during initialization. Added logging for cleanup errors and refined the structure for handling missing 'mcp' package dependencies. --------- Co-authored-by: lorenzejay --- README.md | 117 +++++++++++++++++++ src/crewai_tools/__init__.py | 4 + src/crewai_tools/adapters/mcp_adapter.py | 142 +++++++++++++++++++++++ tests/adapters/mcp_adapter.py | 104 +++++++++++++++++ 4 files changed, 367 insertions(+) create mode 100644 src/crewai_tools/adapters/mcp_adapter.py create mode 100644 tests/adapters/mcp_adapter.py diff --git a/README.md b/README.md index d68d5ff73..dd2e304e5 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,123 @@ def my_custom_function(input): --- +## CrewAI Tools and MCP + +CrewAI Tools supports the Model Context Protocol (MCP). It gives you access to thousands of tools from the hundreds of MCP servers out there built by the community. + +Before you start using MCP with CrewAI tools, you need to install the `mcp` extra dependencies: + +```bash +pip install crewai-tools[mcp] +# or +uv add crewai-tools --extra mcp +``` + +To quickly get started with MCP in CrewAI you have 2 options: + +### Option 1: Fully managed connection + +In this scenario we use a contextmanager (`with` statement) to start and stop the the connection with the MCP server. +This is done in the background and you only get to interact with the CrewAI tools corresponding to the MCP server's tools. + +For an STDIO based MCP server: + +```python +from mcp import StdioServerParameters +from crewai_tools import MCPServerAdapter + +serverparams = StdioServerParameters( + command="uvx", + args=["--quiet", "pubmedmcp@0.1.3"], + env={"UV_PYTHON": "3.12", **os.environ}, +) + +with MCPServerAdapter(serverparams) as tools: + # tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools + agent = Agent(..., tools=tools) + task = Task(...) + crew = Crew(..., agents=[agent], tasks=[task]) + crew.kickoff(...) +``` +For an SSE based MCP server: + +```python +serverparams = {"url": "http://localhost:8000/sse"} +with MCPServerAdapter(serverparams) as tools: + # tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools + agent = Agent(..., tools=tools) + task = Task(...) + crew = Crew(..., agents=[agent], tasks=[task]) + crew.kickoff(...) +``` + +### Option 2: More control over the MCP connection + +If you need more control over the MCP connection, you can instanciate the MCPServerAdapter into an `mcp_server_adapter` object which can be used to manage the connection with the MCP server and access the available tools. + +**important**: in this case you need to call `mcp_server_adapter.stop()` to make sure the connection is correctly stopped. We recommend that you use a `try ... finally` block run to make sure the `.stop()` is called even in case of errors. + +Here is the same example for an STDIO MCP Server: + +```python +from mcp import StdioServerParameters +from crewai_tools import MCPServerAdapter + +serverparams = StdioServerParameters( + command="uvx", + args=["--quiet", "pubmedmcp@0.1.3"], + env={"UV_PYTHON": "3.12", **os.environ}, +) + +try: + mcp_server_adapter = MCPServerAdapter(serverparams) + tools = mcp_server_adapter.tools + # tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools + agent = Agent(..., tools=tools) + task = Task(...) + crew = Crew(..., agents=[agent], tasks=[task]) + crew.kickoff(...) + +# ** important ** don't forget to stop the connection +finally: + mcp_server_adapter.stop() +``` + +And finally the same thing but for an SSE MCP Server: + +```python +from mcp import StdioServerParameters +from crewai_tools import MCPServerAdapter + +serverparams = {"url": "http://localhost:8000/sse"} + +try: + mcp_server_adapter = MCPServerAdapter(serverparams) + tools = mcp_server_adapter.tools + # tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools + agent = Agent(..., tools=tools) + task = Task(...) + crew = Crew(..., agents=[agent], tasks=[task]) + crew.kickoff(...) + +# ** important ** don't forget to stop the connection +finally: + mcp_server_adapter.stop() +``` + +### Considerations & Limitations + +#### Staying Safe with MCP + +Always make sure that you trust the MCP Server before using it. Using an STDIO server will execute code on your machine. Using SSE is still not a silver bullet with many injection possible into your application from a malicious MCP server. + +#### Limitations + +* At this time we only support tools from MCP Server not other type of primitives like prompts, resources... +* We only return the first text output returned by the MCP Server tool using `.content[0].text` + +--- + ## Why Use CrewAI Tools? - **Simplicity & Flexibility**: Easy-to-use yet powerful enough for complex workflows. diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index c13ac5007..3e3cdc019 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -66,3 +66,7 @@ from .aws import ( BedrockKBRetrieverTool, BedrockInvokeAgentTool, ) + +from .adapters.mcp_adapter import ( + MCPServerAdapter, +) diff --git a/src/crewai_tools/adapters/mcp_adapter.py b/src/crewai_tools/adapters/mcp_adapter.py new file mode 100644 index 000000000..2f5cc71f8 --- /dev/null +++ b/src/crewai_tools/adapters/mcp_adapter.py @@ -0,0 +1,142 @@ +from __future__ import annotations +import logging +from typing import Any, TYPE_CHECKING +from crewai.tools import BaseTool + +""" +MCPServer for CrewAI. + + +""" +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from mcp import StdioServerParameters + from mcpadapt.core import MCPAdapt + from mcpadapt.crewai_adapter import CrewAIAdapter + + +try: + from mcp import StdioServerParameters + from mcpadapt.core import MCPAdapt + from mcpadapt.crewai_adapter import CrewAIAdapter + + MCP_AVAILABLE = True +except ImportError: + MCP_AVAILABLE = False + + +class MCPServerAdapter: + """Manages the lifecycle of an MCP server and make its tools available to CrewAI. + + Note: tools can only be accessed after the server has been started with the + `start()` method. + + Attributes: + tools: The CrewAI tools available from the MCP server. + + Usage: + # context manager + stdio + with MCPServerAdapter(...) as tools: + # tools is now available + + # context manager + sse + with MCPServerAdapter({"url": "http://localhost:8000/sse"}) as tools: + # tools is now available + + # manually stop mcp server + try: + mcp_server = MCPServerAdapter(...) + tools = mcp_server.tools + ... + finally: + mcp_server.stop() + + # Best practice is ensure cleanup is done after use. + mcp_server.stop() # run after crew().kickoff() + """ + + def __init__( + self, + serverparams: StdioServerParameters | dict[str, Any], + ): + """Initialize the MCP Server + + Args: + serverparams: The parameters for the MCP server it supports either a + `StdioServerParameters` or a `dict` respectively for STDIO and SSE. + + """ + + super().__init__() + self._adapter = None + self._tools = None + + if not MCP_AVAILABLE: + import click + + if click.confirm( + "You are missing the 'mcp' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "mcp crewai-tools[mcp]"], check=True) + + except subprocess.CalledProcessError: + raise ImportError("Failed to install mcp package") + else: + raise ImportError( + "`mcp` package not found, please run `uv add crewai-tools[mcp]`" + ) + + try: + self._serverparams = serverparams + self._adapter = MCPAdapt(self._serverparams, CrewAIAdapter()) + self.start() + + except Exception as e: + if self._adapter is not None: + try: + self.stop() + except Exception as stop_e: + logger.error(f"Error during stop cleanup: {stop_e}") + raise RuntimeError(f"Failed to initialize MCP Adapter: {e}") from e + + def start(self): + """Start the MCP server and initialize the tools.""" + self._tools = self._adapter.__enter__() + + def stop(self): + """Stop the MCP server""" + self._adapter.__exit__(None, None, None) + + @property + def tools(self) -> list[BaseTool]: + """The CrewAI tools available from the MCP server. + + Raises: + ValueError: If the MCP server is not started. + + Returns: + The CrewAI tools available from the MCP server. + """ + if self._tools is None: + raise ValueError( + "MCP server not started, run `mcp_server.start()` first before accessing `tools`" + ) + return self._tools + + def __enter__(self): + """ + Enter the context manager. Note that `__init__()` already starts the MCP server. + So tools should already be available. + """ + return self.tools + + def __exit__(self, exc_type, exc_value, traceback): + """Exit the context manager.""" + return self._adapter.__exit__(exc_type, exc_value, traceback) diff --git a/tests/adapters/mcp_adapter.py b/tests/adapters/mcp_adapter.py new file mode 100644 index 000000000..569a10ae6 --- /dev/null +++ b/tests/adapters/mcp_adapter.py @@ -0,0 +1,104 @@ +from textwrap import dedent + +import pytest +from mcp import StdioServerParameters + +from crewai_tools import MCPServerAdapter + + +@pytest.fixture +def echo_server_script(): + return dedent( + ''' + from mcp.server.fastmcp import FastMCP + + mcp = FastMCP("Echo Server") + + @mcp.tool() + def echo_tool(text: str) -> str: + """Echo the input text""" + return f"Echo: {text}" + + mcp.run() + ''' + ) + + +@pytest.fixture +def echo_server_sse_script(): + return dedent( + ''' + from mcp.server.fastmcp import FastMCP + + mcp = FastMCP("Echo Server", host="127.0.0.1", port=8000) + + @mcp.tool() + def echo_tool(text: str) -> str: + """Echo the input text""" + return f"Echo: {text}" + + mcp.run("sse") + ''' + ) + + +@pytest.fixture +def echo_sse_server(echo_server_sse_script): + import subprocess + import time + + # Start the SSE server process with its own process group + process = subprocess.Popen( + ["python", "-c", echo_server_sse_script], + ) + + # Give the server a moment to start up + time.sleep(1) + + try: + yield {"url": "http://127.0.0.1:8000/sse"} + finally: + # Clean up the process when test is done + process.kill() + process.wait() + + +def test_context_manager_syntax(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + with MCPServerAdapter(serverparams) as tools: + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + assert tools[0].run(text="hello") == "Echo: hello" + +def test_context_manager_syntax_sse(echo_sse_server): + sse_serverparams = echo_sse_server + with MCPServerAdapter(sse_serverparams) as tools: + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + assert tools[0].run(text="hello") == "Echo: hello" + +def test_try_finally_syntax(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + try: + mcp_server_adapter = MCPServerAdapter(serverparams) + tools = mcp_server_adapter.tools + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + assert tools[0].run(text="hello") == "Echo: hello" + finally: + mcp_server_adapter.stop() + +def test_try_finally_syntax_sse(echo_sse_server): + sse_serverparams = echo_sse_server + mcp_server_adapter = MCPServerAdapter(sse_serverparams) + try: + tools = mcp_server_adapter.tools + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + assert tools[0].run(text="hello") == "Echo: hello" + finally: + mcp_server_adapter.stop() From 7973c163f3846a390f762ed7e5d4b89e2b4031c1 Mon Sep 17 00:00:00 2001 From: Milad Noroozi Date: Tue, 22 Apr 2025 17:48:29 +0330 Subject: [PATCH 319/391] Add chunk reading functionality to FileReadTool (#266) * Add chunk reading functionality to FileReadTool - Added start_line parameter to specify which line to start reading from - Added line_count parameter to specify how many lines to read - Updated documentation with new parameters and examples * [FIX] Bugs and Disscutions Fixed: start_line negative value Improved: File Reading Operations * [IMPROVE] Simplify line selection * [REFACTOR] use mock_open while preserving essential filesystem tests --- .../tools/file_read_tool/README.md | 13 +- .../tools/file_read_tool/file_read_tool.py | 36 ++++- tests/file_read_tool_test.py | 144 +++++++++++++----- 3 files changed, 150 insertions(+), 43 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/README.md b/src/crewai_tools/tools/file_read_tool/README.md index d877d13f4..7b8a15488 100644 --- a/src/crewai_tools/tools/file_read_tool/README.md +++ b/src/crewai_tools/tools/file_read_tool/README.md @@ -1,9 +1,13 @@ # FileReadTool ## Description + The FileReadTool is a versatile component of the crewai_tools package, designed to streamline the process of reading and retrieving content from files. It is particularly useful in scenarios such as batch text file processing, runtime configuration file reading, and data importation for analytics. This tool supports various text-based file formats including `.txt`, `.csv`, `.json`, and adapts its functionality based on the file type, for instance, converting JSON content into a Python dictionary for easy use. +The tool also supports reading specific chunks of a file by specifying a starting line and the number of lines to read, which is helpful when working with large files that don't need to be loaded entirely into memory. + ## Installation + Install the crewai_tools package to use the FileReadTool in your projects: ```shell @@ -11,6 +15,7 @@ pip install 'crewai[tools]' ``` ## Example + To get started with the FileReadTool: ```python @@ -23,7 +28,13 @@ file_read_tool = FileReadTool() # Initialize the tool with a specific file path, so the agent can only read the content of the specified file file_read_tool = FileReadTool(file_path='path/to/your/file.txt') + +# Read a specific chunk of the file (lines 100-149) +partial_content = file_read_tool.run(file_path='path/to/your/file.txt', start_line=100, line_count=50) ``` ## Arguments -- `file_path`: The path to the file you want to read. It accepts both absolute and relative paths. Ensure the file exists and you have the necessary permissions to access it. \ No newline at end of file + +- `file_path`: The path to the file you want to read. It accepts both absolute and relative paths. Ensure the file exists and you have the necessary permissions to access it. +- `start_line`: (Optional) The line number to start reading from (1-indexed). Defaults to 1 (the first line). +- `line_count`: (Optional) The number of lines to read. If not provided, reads from the start_line to the end of the file. diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 55fb5d490..3447cb0d6 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -8,6 +8,8 @@ class FileReadToolSchema(BaseModel): """Input for FileReadTool.""" file_path: str = Field(..., description="Mandatory file full path to read the file") + start_line: Optional[int] = Field(1, description="Line number to start reading from (1-indexed)") + line_count: Optional[int] = Field(None, description="Number of lines to read. If None, reads the entire file") class FileReadTool(BaseTool): @@ -31,10 +33,11 @@ class FileReadTool(BaseTool): >>> tool = FileReadTool(file_path="/path/to/file.txt") >>> content = tool.run() # Reads /path/to/file.txt >>> content = tool.run(file_path="/path/to/other.txt") # Reads other.txt + >>> content = tool.run(file_path="/path/to/file.txt", start_line=100, line_count=50) # Reads lines 100-149 """ name: str = "Read a file's content" - description: str = "A tool that reads the content of a file. To use this tool, provide a 'file_path' parameter with the path to the file you want to read." + description: str = "A tool that reads the content of a file. To use this tool, provide a 'file_path' parameter with the path to the file you want to read. Optionally, provide 'start_line' to start reading from a specific line and 'line_count' to limit the number of lines read." args_schema: Type[BaseModel] = FileReadToolSchema file_path: Optional[str] = None @@ -47,8 +50,10 @@ class FileReadTool(BaseTool): **kwargs: Additional keyword arguments passed to BaseTool. """ if file_path is not None: - kwargs['description'] = f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file." - + kwargs["description"] = ( + f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file. You can also specify 'start_line' and 'line_count' to read specific parts of the file." + ) + super().__init__(**kwargs) self.file_path = file_path @@ -57,15 +62,34 @@ class FileReadTool(BaseTool): **kwargs: Any, ) -> str: file_path = kwargs.get("file_path", self.file_path) + start_line = kwargs.get("start_line", 1) + line_count = kwargs.get("line_count", None) + if file_path is None: - return "Error: No file path provided. Please provide a file path either in the constructor or as an argument." + return ( + "Error: No file path provided. Please provide a file path either in the constructor or as an argument." + ) try: with open(file_path, "r") as file: - return file.read() + if start_line == 1 and line_count is None: + return file.read() + + start_idx = max(start_line - 1, 0) + + selected_lines = [ + line + for i, line in enumerate(file) + if i >= start_idx and (line_count is None or i < start_idx + line_count) + ] + + if not selected_lines and start_idx > 0: + return f"Error: Start line {start_line} exceeds the number of lines in the file." + + return "".join(selected_lines) except FileNotFoundError: return f"Error: File not found at path: {file_path}" except PermissionError: return f"Error: Permission denied when trying to read file: {file_path}" except Exception as e: - return f"Error: Failed to read file {file_path}. {str(e)}" \ No newline at end of file + return f"Error: Failed to read file {file_path}. {str(e)}" diff --git a/tests/file_read_tool_test.py b/tests/file_read_tool_test.py index 5957f863b..a0f2c695e 100644 --- a/tests/file_read_tool_test.py +++ b/tests/file_read_tool_test.py @@ -1,4 +1,5 @@ import os +from unittest.mock import mock_open, patch from crewai_tools import FileReadTool @@ -22,19 +23,15 @@ def test_file_read_tool_constructor(): def test_file_read_tool_run(): """Test FileReadTool _run method with file_path at runtime.""" - # Create a temporary test file test_file = "/tmp/test_file.txt" test_content = "Hello, World!" - with open(test_file, "w") as f: - f.write(test_content) - # Test reading file with runtime file_path - tool = FileReadTool() - result = tool._run(file_path=test_file) - assert result == test_content - - # Clean up - os.remove(test_file) + # Use mock_open to mock file operations + with patch("builtins.open", mock_open(read_data=test_content)): + # Test reading file with runtime file_path + tool = FileReadTool() + result = tool._run(file_path=test_file) + assert result == test_content def test_file_read_tool_error_handling(): @@ -48,41 +45,116 @@ def test_file_read_tool_error_handling(): result = tool._run(file_path="/nonexistent/file.txt") assert "Error: File not found at path:" in result - # Test permission error (create a file without read permissions) - test_file = "/tmp/no_permission.txt" - with open(test_file, "w") as f: - f.write("test") - os.chmod(test_file, 0o000) - - result = tool._run(file_path=test_file) - assert "Error: Permission denied" in result - - # Clean up - os.chmod(test_file, 0o666) # Restore permissions to delete - os.remove(test_file) + # Test permission error + with patch("builtins.open", side_effect=PermissionError()): + result = tool._run(file_path="/tmp/no_permission.txt") + assert "Error: Permission denied" in result def test_file_read_tool_constructor_and_run(): """Test FileReadTool using both constructor and runtime file paths.""" - # Create two test files test_file1 = "/tmp/test1.txt" test_file2 = "/tmp/test2.txt" content1 = "File 1 content" content2 = "File 2 content" - with open(test_file1, "w") as f1, open(test_file2, "w") as f2: - f1.write(content1) - f2.write(content2) + # First test with content1 + with patch("builtins.open", mock_open(read_data=content1)): + tool = FileReadTool(file_path=test_file1) + result = tool._run() + assert result == content1 - # Test that constructor file_path works - tool = FileReadTool(file_path=test_file1) - result = tool._run() - assert result == content1 + # Then test with content2 (should override constructor file_path) + with patch("builtins.open", mock_open(read_data=content2)): + result = tool._run(file_path=test_file2) + assert result == content2 - # Test that runtime file_path overrides constructor - result = tool._run(file_path=test_file2) - assert result == content2 - # Clean up - os.remove(test_file1) - os.remove(test_file2) +def test_file_read_tool_chunk_reading(): + """Test FileReadTool reading specific chunks of a file.""" + test_file = "/tmp/multiline_test.txt" + lines = [ + "Line 1\n", + "Line 2\n", + "Line 3\n", + "Line 4\n", + "Line 5\n", + "Line 6\n", + "Line 7\n", + "Line 8\n", + "Line 9\n", + "Line 10\n", + ] + file_content = "".join(lines) + + with patch("builtins.open", mock_open(read_data=file_content)): + tool = FileReadTool() + + # Test reading a specific chunk (lines 3-5) + result = tool._run(file_path=test_file, start_line=3, line_count=3) + expected = "".join(lines[2:5]) # Lines are 0-indexed in the array + assert result == expected + + # Test reading from a specific line to the end + result = tool._run(file_path=test_file, start_line=8) + expected = "".join(lines[7:]) + assert result == expected + + # Test with default values (should read entire file) + result = tool._run(file_path=test_file) + expected = "".join(lines) + assert result == expected + + # Test when start_line is 1 but line_count is specified + result = tool._run(file_path=test_file, start_line=1, line_count=5) + expected = "".join(lines[0:5]) + assert result == expected + + +def test_file_read_tool_chunk_error_handling(): + """Test error handling for chunk reading.""" + test_file = "/tmp/short_test.txt" + lines = ["Line 1\n", "Line 2\n", "Line 3\n"] + file_content = "".join(lines) + + with patch("builtins.open", mock_open(read_data=file_content)): + tool = FileReadTool() + + # Test start_line exceeding file length + result = tool._run(file_path=test_file, start_line=10) + assert "Error: Start line 10 exceeds the number of lines in the file" in result + + # Test reading partial chunk when line_count exceeds available lines + result = tool._run(file_path=test_file, start_line=2, line_count=10) + expected = "".join(lines[1:]) # Should return from line 2 to end + assert result == expected + + +def test_file_read_tool_zero_or_negative_start_line(): + """Test that start_line values of 0 or negative read from the start of the file.""" + test_file = "/tmp/negative_test.txt" + lines = ["Line 1\n", "Line 2\n", "Line 3\n", "Line 4\n", "Line 5\n"] + file_content = "".join(lines) + + with patch("builtins.open", mock_open(read_data=file_content)): + tool = FileReadTool() + + # Test with start_line = 0 + result = tool._run(file_path=test_file, start_line=0) + expected = "".join(lines) # Should read the entire file + assert result == expected + + # Test with start_line = 0 and limited line count + result = tool._run(file_path=test_file, start_line=0, line_count=3) + expected = "".join(lines[0:3]) # Should read first 3 lines + assert result == expected + + # Test with negative start_line + result = tool._run(file_path=test_file, start_line=-5) + expected = "".join(lines) # Should read the entire file + assert result == expected + + # Test with negative start_line and limited line count + result = tool._run(file_path=test_file, start_line=-10, line_count=2) + expected = "".join(lines[0:2]) # Should read first 2 lines + assert result == expected From 78d0ec501db4de0a8a573a4cf6538db130c83502 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 23 Apr 2025 11:29:07 -0300 Subject: [PATCH 320/391] fix: do not use deprecated distutils in FileWriterTool (#280) --- .../file_writer_tool/file_writer_tool.py | 21 ++- .../tests/test_file_writer_tool.py | 138 ++++++++++++++++++ 2 files changed, 155 insertions(+), 4 deletions(-) create mode 100644 src/crewai_tools/tools/file_writer_tool/tests/test_file_writer_tool.py diff --git a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py index f975d3301..8b9ca5225 100644 --- a/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py +++ b/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py @@ -1,21 +1,34 @@ import os -from distutils.util import strtobool from typing import Any, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel +def strtobool(val) -> bool: + if isinstance(val, bool): + return val + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid value to cast to bool: {val!r}") + + class FileWriterToolInput(BaseModel): filename: str directory: Optional[str] = "./" - overwrite: str = "False" + overwrite: str | bool = False content: str class FileWriterTool(BaseTool): name: str = "File Writer Tool" - description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." + description: str = ( + "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input." + ) args_schema: Type[BaseModel] = FileWriterToolInput def _run(self, **kwargs: Any) -> str: @@ -28,7 +41,7 @@ class FileWriterTool(BaseTool): filepath = os.path.join(kwargs.get("directory") or "", kwargs["filename"]) # Convert overwrite to boolean - kwargs["overwrite"] = bool(strtobool(kwargs["overwrite"])) + kwargs["overwrite"] = strtobool(kwargs["overwrite"]) # Check if file exists and overwrite is not allowed if os.path.exists(filepath) and not kwargs["overwrite"]: diff --git a/src/crewai_tools/tools/file_writer_tool/tests/test_file_writer_tool.py b/src/crewai_tools/tools/file_writer_tool/tests/test_file_writer_tool.py new file mode 100644 index 000000000..d75ed30f2 --- /dev/null +++ b/src/crewai_tools/tools/file_writer_tool/tests/test_file_writer_tool.py @@ -0,0 +1,138 @@ +import os +import shutil +import tempfile + +import pytest + +from crewai_tools.tools.file_writer_tool.file_writer_tool import FileWriterTool + + +@pytest.fixture +def tool(): + return FileWriterTool() + + +@pytest.fixture +def temp_env(): + temp_dir = tempfile.mkdtemp() + test_file = "test.txt" + test_content = "Hello, World!" + + yield { + "temp_dir": temp_dir, + "test_file": test_file, + "test_content": test_content, + } + + shutil.rmtree(temp_dir, ignore_errors=True) + + +def get_test_path(filename, directory): + return os.path.join(directory, filename) + + +def read_file(path): + with open(path, "r") as f: + return f.read() + + +def test_basic_file_write(tool, temp_env): + result = tool._run( + filename=temp_env["test_file"], + directory=temp_env["temp_dir"], + content=temp_env["test_content"], + overwrite=True, + ) + + path = get_test_path(temp_env["test_file"], temp_env["temp_dir"]) + assert os.path.exists(path) + assert read_file(path) == temp_env["test_content"] + assert "successfully written" in result + + +def test_directory_creation(tool, temp_env): + new_dir = os.path.join(temp_env["temp_dir"], "nested_dir") + result = tool._run( + filename=temp_env["test_file"], + directory=new_dir, + content=temp_env["test_content"], + overwrite=True, + ) + + path = get_test_path(temp_env["test_file"], new_dir) + assert os.path.exists(new_dir) + assert os.path.exists(path) + assert "successfully written" in result + + +@pytest.mark.parametrize( + "overwrite", + ["y", "yes", "t", "true", "on", "1", True], +) +def test_overwrite_true(tool, temp_env, overwrite): + path = get_test_path(temp_env["test_file"], temp_env["temp_dir"]) + with open(path, "w") as f: + f.write("Original content") + + result = tool._run( + filename=temp_env["test_file"], + directory=temp_env["temp_dir"], + content="New content", + overwrite=overwrite, + ) + + assert read_file(path) == "New content" + assert "successfully written" in result + + +def test_invalid_overwrite_value(tool, temp_env): + result = tool._run( + filename=temp_env["test_file"], + directory=temp_env["temp_dir"], + content=temp_env["test_content"], + overwrite="invalid", + ) + assert "invalid value" in result + + +def test_missing_required_fields(tool, temp_env): + result = tool._run( + directory=temp_env["temp_dir"], + content=temp_env["test_content"], + overwrite=True, + ) + assert "An error occurred while accessing key: 'filename'" in result + + +def test_empty_content(tool, temp_env): + result = tool._run( + filename=temp_env["test_file"], + directory=temp_env["temp_dir"], + content="", + overwrite=True, + ) + + path = get_test_path(temp_env["test_file"], temp_env["temp_dir"]) + assert os.path.exists(path) + assert read_file(path) == "" + assert "successfully written" in result + + +@pytest.mark.parametrize( + "overwrite", + ["n", "no", "f", "false", "off", "0", False], +) +def test_file_exists_error_handling(tool, temp_env, overwrite): + path = get_test_path(temp_env["test_file"], temp_env["temp_dir"]) + with open(path, "w") as f: + f.write("Pre-existing content") + + result = tool._run( + filename=temp_env["test_file"], + directory=temp_env["temp_dir"], + content="Should not be written", + overwrite=overwrite, + ) + + assert "already exists and overwrite option was not passed" in result + assert read_file(path) == "Pre-existing content" From edc9b44c474d069f246e37bf88ab0c86f5b74e24 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 23 Apr 2025 16:42:05 -0300 Subject: [PATCH 321/391] Add secure Python Sandbox and Enhanced Logging in CodeInterpreterTool (#281) * feat: add a safety sandbox to run Python code This sandbox blocks a bunch of dangerous imports and built-in functions * feat: add more logs and warning about code execution * test: add tests to cover sandbox code execution * docs: add Google-style docstrings and type hints to printer and code_interpreter * chore: renaming globals and locals paramenters --------- Co-authored-by: Greyson Lalonde --- src/crewai_tools/printer.py | 131 +++++++++ .../code_interpreter_tool.py | 259 +++++++++++++++++- tests/tools/test_code_interpreter_tool.py | 198 ++++++++++--- 3 files changed, 534 insertions(+), 54 deletions(-) create mode 100644 src/crewai_tools/printer.py diff --git a/src/crewai_tools/printer.py b/src/crewai_tools/printer.py new file mode 100644 index 000000000..c67005ddd --- /dev/null +++ b/src/crewai_tools/printer.py @@ -0,0 +1,131 @@ +"""Utility for colored console output.""" + +from typing import Optional + + +class Printer: + """Handles colored console output formatting.""" + + @staticmethod + def print(content: str, color: Optional[str] = None) -> None: + """Prints content with optional color formatting. + + Args: + content: The string to be printed. + color: Optional color name to format the output. If provided, + must match one of the _print_* methods available in this class. + If not provided or if the color is not supported, prints without + formatting. + """ + if hasattr(Printer, f"_print_{color}"): + getattr(Printer, f"_print_{color}")(content) + else: + print(content) + + @staticmethod + def _print_bold_purple(content: str) -> None: + """Prints content in bold purple color. + + Args: + content: The string to be printed in bold purple. + """ + print("\033[1m\033[95m {}\033[00m".format(content)) + + @staticmethod + def _print_bold_green(content: str) -> None: + """Prints content in bold green color. + + Args: + content: The string to be printed in bold green. + """ + print("\033[1m\033[92m {}\033[00m".format(content)) + + @staticmethod + def _print_purple(content: str) -> None: + """Prints content in purple color. + + Args: + content: The string to be printed in purple. + """ + print("\033[95m {}\033[00m".format(content)) + + @staticmethod + def _print_red(content: str) -> None: + """Prints content in red color. + + Args: + content: The string to be printed in red. + """ + print("\033[91m {}\033[00m".format(content)) + + @staticmethod + def _print_bold_blue(content: str) -> None: + """Prints content in bold blue color. + + Args: + content: The string to be printed in bold blue. + """ + print("\033[1m\033[94m {}\033[00m".format(content)) + + @staticmethod + def _print_yellow(content: str) -> None: + """Prints content in yellow color. + + Args: + content: The string to be printed in yellow. + """ + print("\033[93m {}\033[00m".format(content)) + + @staticmethod + def _print_bold_yellow(content: str) -> None: + """Prints content in bold yellow color. + + Args: + content: The string to be printed in bold yellow. + """ + print("\033[1m\033[93m {}\033[00m".format(content)) + + @staticmethod + def _print_cyan(content: str) -> None: + """Prints content in cyan color. + + Args: + content: The string to be printed in cyan. + """ + print("\033[96m {}\033[00m".format(content)) + + @staticmethod + def _print_bold_cyan(content: str) -> None: + """Prints content in bold cyan color. + + Args: + content: The string to be printed in bold cyan. + """ + print("\033[1m\033[96m {}\033[00m".format(content)) + + @staticmethod + def _print_magenta(content: str) -> None: + """Prints content in magenta color. + + Args: + content: The string to be printed in magenta. + """ + print("\033[35m {}\033[00m".format(content)) + + @staticmethod + def _print_bold_magenta(content: str) -> None: + """Prints content in bold magenta color. + + Args: + content: The string to be printed in bold magenta. + """ + print("\033[1m\033[35m {}\033[00m".format(content)) + + @staticmethod + def _print_green(content: str) -> None: + """Prints content in green color. + + Args: + content: The string to be printed in green. + """ + print("\033[32m {}\033[00m".format(content)) diff --git a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py index 2a0f9ffe6..95559f2a7 100644 --- a/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py +++ b/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py @@ -1,18 +1,31 @@ +"""Code Interpreter Tool for executing Python code in isolated environments. + +This module provides a tool for executing Python code either in a Docker container for +safe isolation or directly in a restricted sandbox. It includes mechanisms for blocking +potentially unsafe operations and importing restricted modules. +""" + import importlib.util import os -from typing import List, Optional, Type +from types import ModuleType +from typing import Any, Dict, List, Optional, Type from crewai.tools import BaseTool -from docker import from_env as docker_from_env from docker import DockerClient -from docker.models.containers import Container +from docker import from_env as docker_from_env from docker.errors import ImageNotFound, NotFound from docker.models.containers import Container from pydantic import BaseModel, Field +from crewai_tools.printer import Printer + class CodeInterpreterSchema(BaseModel): - """Input for CodeInterpreterTool.""" + """Schema for defining inputs to the CodeInterpreterTool. + + This schema defines the required parameters for code execution, + including the code to run and any libraries that need to be installed. + """ code: str = Field( ..., @@ -25,7 +38,102 @@ class CodeInterpreterSchema(BaseModel): ) +class SandboxPython: + """A restricted Python execution environment for running code safely. + + This class provides methods to safely execute Python code by restricting access to + potentially dangerous modules and built-in functions. It creates a sandboxed + environment where harmful operations are blocked. + """ + + BLOCKED_MODULES = { + "os", + "sys", + "subprocess", + "shutil", + "importlib", + "inspect", + "tempfile", + "sysconfig", + "builtins", + } + + UNSAFE_BUILTINS = { + "exec", + "eval", + "open", + "compile", + "input", + "globals", + "locals", + "vars", + "help", + "dir", + } + + @staticmethod + def restricted_import( + name: str, + custom_globals: Optional[Dict[str, Any]] = None, + custom_locals: Optional[Dict[str, Any]] = None, + fromlist: Optional[List[str]] = None, + level: int = 0, + ) -> ModuleType: + """A restricted import function that blocks importing of unsafe modules. + + Args: + name: The name of the module to import. + custom_globals: Global namespace to use. + custom_locals: Local namespace to use. + fromlist: List of items to import from the module. + level: The level value passed to __import__. + + Returns: + The imported module if allowed. + + Raises: + ImportError: If the module is in the blocked modules list. + """ + if name in SandboxPython.BLOCKED_MODULES: + raise ImportError(f"Importing '{name}' is not allowed.") + return __import__(name, custom_globals, custom_locals, fromlist or (), level) + + @staticmethod + def safe_builtins() -> Dict[str, Any]: + """Creates a dictionary of built-in functions with unsafe ones removed. + + Returns: + A dictionary of safe built-in functions and objects. + """ + import builtins + + safe_builtins = { + k: v + for k, v in builtins.__dict__.items() + if k not in SandboxPython.UNSAFE_BUILTINS + } + safe_builtins["__import__"] = SandboxPython.restricted_import + return safe_builtins + + @staticmethod + def exec(code: str, locals: Dict[str, Any]) -> None: + """Executes Python code in a restricted environment. + + Args: + code: The Python code to execute as a string. + locals: A dictionary that will be used for local variable storage. + """ + exec(code, {"__builtins__": SandboxPython.safe_builtins()}, locals) + + class CodeInterpreterTool(BaseTool): + """A tool for executing Python code in isolated environments. + + This tool provides functionality to run Python code either in a Docker container + for safe isolation or directly in a restricted sandbox. It can handle installing + Python packages and executing arbitrary Python code. + """ + name: str = "Code Interpreter" description: str = "Interprets Python3 code strings with a final print statement." args_schema: Type[BaseModel] = CodeInterpreterSchema @@ -36,18 +144,28 @@ class CodeInterpreterTool(BaseTool): unsafe_mode: bool = False @staticmethod - def _get_installed_package_path(): + def _get_installed_package_path() -> str: + """Gets the installation path of the crewai_tools package. + + Returns: + The directory path where the package is installed. + """ spec = importlib.util.find_spec("crewai_tools") return os.path.dirname(spec.origin) def _verify_docker_image(self) -> None: - """ - Verify if the Docker image is available. Optionally use a user-provided Dockerfile. + """Verifies if the Docker image is available or builds it if necessary. + + Checks if the required Docker image exists. If not, builds it using either a + user-provided Dockerfile or the default one included with the package. + + Raises: + FileNotFoundError: If the Dockerfile cannot be found. """ client = ( docker_from_env() - if self.user_docker_base_url == None + if self.user_docker_base_url is None else DockerClient(base_url=self.user_docker_base_url) ) @@ -74,22 +192,41 @@ class CodeInterpreterTool(BaseTool): ) def _run(self, **kwargs) -> str: + """Runs the code interpreter tool with the provided arguments. + + Args: + **kwargs: Keyword arguments that should include 'code' and 'libraries_used'. + + Returns: + The output of the executed code as a string. + """ code = kwargs.get("code", self.code) libraries_used = kwargs.get("libraries_used", []) if self.unsafe_mode: return self.run_code_unsafe(code, libraries_used) else: - return self.run_code_in_docker(code, libraries_used) + return self.run_code_safety(code, libraries_used) def _install_libraries(self, container: Container, libraries: List[str]) -> None: - """ - Install missing libraries in the Docker container + """Installs required Python libraries in the Docker container. + + Args: + container: The Docker container where libraries will be installed. + libraries: A list of library names to install using pip. """ for library in libraries: container.exec_run(["pip", "install", library]) def _init_docker_container(self) -> Container: + """Initializes and returns a Docker container for code execution. + + Stops and removes any existing container with the same name before creating + a new one. Maps the current working directory to /workspace in the container. + + Returns: + A Docker container object ready for code execution. + """ container_name = "code-interpreter" client = docker_from_env() current_path = os.getcwd() @@ -111,7 +248,68 @@ class CodeInterpreterTool(BaseTool): volumes={current_path: {"bind": "/workspace", "mode": "rw"}}, # type: ignore ) + def _check_docker_available(self) -> bool: + """Checks if Docker is available and running on the system. + + Attempts to run the 'docker info' command to verify Docker availability. + Prints appropriate messages if Docker is not installed or not running. + + Returns: + True if Docker is available and running, False otherwise. + """ + import subprocess + + try: + subprocess.run( + ["docker", "info"], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=1, + ) + return True + except (subprocess.CalledProcessError, subprocess.TimeoutExpired): + Printer.print( + "Docker is installed but not running or inaccessible.", + color="bold_purple", + ) + return False + except FileNotFoundError: + Printer.print("Docker is not installed", color="bold_purple") + return False + + def run_code_safety(self, code: str, libraries_used: List[str]) -> str: + """Runs code in the safest available environment. + + Attempts to run code in Docker if available, falls back to a restricted + sandbox if Docker is not available. + + Args: + code: The Python code to execute as a string. + libraries_used: A list of Python library names to install before execution. + + Returns: + The output of the executed code as a string. + """ + if self._check_docker_available(): + return self.run_code_in_docker(code, libraries_used) + else: + return self.run_code_in_restricted_sandbox(code) + def run_code_in_docker(self, code: str, libraries_used: List[str]) -> str: + """Runs Python code in a Docker container for safe isolation. + + Creates a Docker container, installs the required libraries, executes the code, + and then cleans up by stopping and removing the container. + + Args: + code: The Python code to execute as a string. + libraries_used: A list of Python library names to install before execution. + + Returns: + The output of the executed code as a string, or an error message if execution failed. + """ + Printer.print("Running code in Docker environment", color="bold_blue") self._verify_docker_image() container = self._init_docker_container() self._install_libraries(container, libraries_used) @@ -125,10 +323,43 @@ class CodeInterpreterTool(BaseTool): return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}" return exec_result.output.decode("utf-8") + def run_code_in_restricted_sandbox(self, code: str) -> str: + """Runs Python code in a restricted sandbox environment. + + Executes the code with restricted access to potentially dangerous modules and + built-in functions for basic safety when Docker is not available. + + Args: + code: The Python code to execute as a string. + + Returns: + The value of the 'result' variable from the executed code, + or an error message if execution failed. + """ + Printer.print("Running code in restricted sandbox", color="yellow") + exec_locals = {} + try: + SandboxPython.exec(code=code, locals=exec_locals) + return exec_locals.get("result", "No result variable found.") + except Exception as e: + return f"An error occurred: {str(e)}" + def run_code_unsafe(self, code: str, libraries_used: List[str]) -> str: + """Runs code directly on the host machine without any safety restrictions. + + WARNING: This mode is unsafe and should only be used in trusted environments + with code from trusted sources. + + Args: + code: The Python code to execute as a string. + libraries_used: A list of Python library names to install before execution. + + Returns: + The value of the 'result' variable from the executed code, + or an error message if execution failed. """ - Run the code directly on the host machine (unsafe mode). - """ + + Printer.print("WARNING: Running code in unsafe mode", color="bold_magenta") # Install libraries on the host machine for library in libraries_used: os.system(f"pip install {library}") @@ -139,4 +370,4 @@ class CodeInterpreterTool(BaseTool): exec(code, {}, exec_locals) return exec_locals.get("result", "No result variable found.") except Exception as e: - return f"An error occurred: {str(e)}" \ No newline at end of file + return f"An error occurred: {str(e)}" diff --git a/tests/tools/test_code_interpreter_tool.py b/tests/tools/test_code_interpreter_tool.py index e281fffaf..e46c8bde4 100644 --- a/tests/tools/test_code_interpreter_tool.py +++ b/tests/tools/test_code_interpreter_tool.py @@ -1,57 +1,175 @@ -import unittest from unittest.mock import patch +import pytest + from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( CodeInterpreterTool, + SandboxPython, ) -class TestCodeInterpreterTool(unittest.TestCase): - @patch( - "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" +@pytest.fixture +def printer_mock(): + with patch("crewai_tools.printer.Printer.print") as mock: + yield mock + + +@pytest.fixture +def docker_unavailable_mock(): + with patch( + "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.CodeInterpreterTool._check_docker_available", + return_value=False, + ) as mock: + yield mock + + +@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") +def test_run_code_in_docker(docker_mock, printer_mock): + tool = CodeInterpreterTool() + code = "print('Hello, World!')" + libraries_used = ["numpy", "pandas"] + expected_output = "Hello, World!\n" + + docker_mock().containers.run().exec_run().exit_code = 0 + docker_mock().containers.run().exec_run().output = expected_output.encode() + + result = tool.run_code_in_docker(code, libraries_used) + assert result == expected_output + printer_mock.assert_called_with( + "Running code in Docker environment", color="bold_blue" ) - def test_run_code_in_docker(self, docker_mock): - tool = CodeInterpreterTool() - code = "print('Hello, World!')" - libraries_used = ["numpy", "pandas"] - expected_output = "Hello, World!\n" - docker_mock().containers.run().exec_run().exit_code = 0 - docker_mock().containers.run().exec_run().output = expected_output.encode() - result = tool.run_code_in_docker(code, libraries_used) - self.assertEqual(result, expected_output) +@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") +def test_run_code_in_docker_with_error(docker_mock, printer_mock): + tool = CodeInterpreterTool() + code = "print(1/0)" + libraries_used = ["numpy", "pandas"] + expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n" - @patch( - "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" + docker_mock().containers.run().exec_run().exit_code = 1 + docker_mock().containers.run().exec_run().output = ( + b"ZeroDivisionError: division by zero\n" ) - def test_run_code_in_docker_with_error(self, docker_mock): - tool = CodeInterpreterTool() - code = "print(1/0)" - libraries_used = ["numpy", "pandas"] - expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n" - docker_mock().containers.run().exec_run().exit_code = 1 - docker_mock().containers.run().exec_run().output = ( - b"ZeroDivisionError: division by zero\n" - ) - result = tool.run_code_in_docker(code, libraries_used) - - self.assertEqual(result, expected_output) - - @patch( - "crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env" + result = tool.run_code_in_docker(code, libraries_used) + assert result == expected_output + printer_mock.assert_called_with( + "Running code in Docker environment", color="bold_blue" ) - def test_run_code_in_docker_with_script(self, docker_mock): - tool = CodeInterpreterTool() - code = """print("This is line 1") + + +@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env") +def test_run_code_in_docker_with_script(docker_mock, printer_mock): + tool = CodeInterpreterTool() + code = """print("This is line 1") print("This is line 2")""" - libraries_used = [] # No additional libraries needed for this test - expected_output = "This is line 1\nThis is line 2\n" + libraries_used = [] + expected_output = "This is line 1\nThis is line 2\n" - # Mock Docker responses - docker_mock().containers.run().exec_run().exit_code = 0 - docker_mock().containers.run().exec_run().output = expected_output.encode() + docker_mock().containers.run().exec_run().exit_code = 0 + docker_mock().containers.run().exec_run().output = expected_output.encode() - result = tool.run_code_in_docker(code, libraries_used) - self.assertEqual(result, expected_output) + result = tool.run_code_in_docker(code, libraries_used) + assert result == expected_output + printer_mock.assert_called_with( + "Running code in Docker environment", color="bold_blue" + ) + + +def test_restricted_sandbox_basic_code_execution(printer_mock, docker_unavailable_mock): + """Test basic code execution.""" + tool = CodeInterpreterTool() + code = """ +result = 2 + 2 +print(result) +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "Running code in restricted sandbox", color="yellow" + ) + assert result == 4 + + +def test_restricted_sandbox_running_with_blocked_modules( + printer_mock, docker_unavailable_mock +): + """Test that restricted modules cannot be imported.""" + tool = CodeInterpreterTool() + restricted_modules = SandboxPython.BLOCKED_MODULES + + for module in restricted_modules: + code = f""" +import {module} +result = "Import succeeded" +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "Running code in restricted sandbox", color="yellow" + ) + + assert f"An error occurred: Importing '{module}' is not allowed" in result + + +def test_restricted_sandbox_running_with_blocked_builtins( + printer_mock, docker_unavailable_mock +): + """Test that restricted builtins are not available.""" + tool = CodeInterpreterTool() + restricted_builtins = SandboxPython.UNSAFE_BUILTINS + + for builtin in restricted_builtins: + code = f""" +{builtin}("test") +result = "Builtin available" +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "Running code in restricted sandbox", color="yellow" + ) + assert f"An error occurred: name '{builtin}' is not defined" in result + + +def test_restricted_sandbox_running_with_no_result_variable( + printer_mock, docker_unavailable_mock +): + """Test behavior when no result variable is set.""" + tool = CodeInterpreterTool() + code = """ +x = 10 +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "Running code in restricted sandbox", color="yellow" + ) + assert result == "No result variable found." + + +def test_unsafe_mode_running_with_no_result_variable( + printer_mock, docker_unavailable_mock +): + """Test behavior when no result variable is set.""" + tool = CodeInterpreterTool(unsafe_mode=True) + code = """ +x = 10 +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "WARNING: Running code in unsafe mode", color="bold_magenta" + ) + assert result == "No result variable found." + + +def test_unsafe_mode_running_unsafe_code(printer_mock, docker_unavailable_mock): + """Test behavior when no result variable is set.""" + tool = CodeInterpreterTool(unsafe_mode=True) + code = """ +import os +os.system("ls -la") +result = eval("5/1") +""" + result = tool.run(code=code, libraries_used=[]) + printer_mock.assert_called_with( + "WARNING: Running code in unsafe mode", color="bold_magenta" + ) + assert 5.0 == result From 4d86da80c3516e151d332d8d4a7a888f158818b7 Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Thu, 24 Apr 2025 14:20:40 -0700 Subject: [PATCH 322/391] feat: add EnterpriseActionKitToolAdapter and EnterpriseActionTool for enterprise action execution - Introduced EnterpriseActionTool to execute specific enterprise actions with dynamic parameter validation. - Added EnterpriseActionKitToolAdapter to manage and create tool instances for available enterprise actions. - Implemented methods for fetching action schemas from the API and creating corresponding tools. - Enhanced error handling and provided detailed descriptions for tool parameters. - Included a main execution block for testing the adapter with a sample agent and task setup. --- .../adapters/enterprise_adapter.py | 259 ++++++++++++++++++ 1 file changed, 259 insertions(+) create mode 100644 src/crewai_tools/adapters/enterprise_adapter.py diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py new file mode 100644 index 000000000..9a3d1860a --- /dev/null +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -0,0 +1,259 @@ +import requests +from pydantic import Field, create_model +from typing import List, Any, Dict, Optional +import json +from crewai.tools import BaseTool +from crewai import Agent, Task, Crew + + +ENTERPRISE_ACTION_KIT_PROJECT_ID = "dd525517-df22-49d2-a69e-6a0eed211166" + + +class EnterpriseActionTool(BaseTool): + """A tool that executes a specific enterprise action.""" + + enterprise_action_token: str = Field( + default="", description="The enterprise action token" + ) + action_name: str = Field(default="", description="The name of the action") + action_schema: Dict[str, Any] = Field( + default={}, description="The schema of the action" + ) + + def __init__( + self, + name: str, + description: str, + enterprise_action_token: str, + action_name: str, + action_schema: Dict[str, Any], + ): + schema_props = ( + action_schema.get("function", {}) + .get("parameters", {}) + .get("properties", {}) + ) + required = ( + action_schema.get("function", {}).get("parameters", {}).get("required", []) + ) + + # Define field definitions for the model + field_definitions = {} + for param_name, param_details in schema_props.items(): + param_type = str # Default to string type + param_desc = param_details.get("description", "") + is_required = param_name in required + + # Basic type mapping (can be extended) + if param_details.get("type") == "integer": + param_type = int + elif param_details.get("type") == "number": + param_type = float + elif param_details.get("type") == "boolean": + param_type = bool + + # Create field with appropriate type and config + field_definitions[param_name] = ( + param_type if is_required else Optional[param_type], + Field(description=param_desc), + ) + + # Create the model + if field_definitions: + args_schema = create_model( + f"{name.capitalize()}Schema", **field_definitions + ) + else: + # Fallback for empty schema + args_schema = create_model( + f"{name.capitalize()}Schema", + input_text=(str, Field(description="Input for the action")), + ) + + super().__init__(name=name, description=description, args_schema=args_schema) + self.enterprise_action_token = enterprise_action_token + self.action_name = action_name + self.action_schema = action_schema + + def _run(self, **kwargs) -> str: + """Execute the specific enterprise action with validated parameters.""" + try: + params = {k: v for k, v in kwargs.items() if v is not None} + + api_url = f"https://worker-actionkit.tools.crewai.com/projects/{ENTERPRISE_ACTION_KIT_PROJECT_ID}/actions" + headers = { + "Authorization": f"Bearer {self.enterprise_action_token}", + "Content-Type": "application/json", + } + payload = {"action": self.action_name, "parameters": params} + + response = requests.post( + url=api_url, headers=headers, json=payload, timeout=60 + ) + + data = response.json() + if not response.ok: + error_message = data.get("error", {}).get("message", json.dumps(data)) + return f"API request failed: {error_message}" + + return json.dumps(data, indent=2) + + except Exception as e: + return f"Error executing action {self.action_name}: {str(e)}" + + +class EnterpriseActionKitToolAdapter: + """Adapter that creates BaseTool instances for enterprise actions.""" + + def __init__(self, enterprise_action_token: str): + """Initialize the adapter with an enterprise action token.""" + if not enterprise_action_token: + raise ValueError("enterprise_action_token is required") + + self.enterprise_action_token = enterprise_action_token + self._actions_schema = {} + self._tools = None + + def tools(self) -> List[BaseTool]: + """Get the list of tools created from enterprise actions. + + Returns: + List of BaseTool instances, one for each enterprise action. + """ + if self._tools is None: + self._fetch_actions() + self._create_tools() + return self._tools + + def _fetch_actions(self): + """Fetch available actions from the API.""" + try: + actions_url = f"https://worker-actionkit.tools.crewai.com/projects/{ENTERPRISE_ACTION_KIT_PROJECT_ID}/actions" + headers = {"Authorization": f"Bearer {self.enterprise_action_token}"} + params = {"format": "json_schema"} + + response = requests.get( + actions_url, headers=headers, params=params, timeout=30 + ) + response.raise_for_status() + + raw_data = response.json() + if "actions" not in raw_data: + print(f"Unexpected API response structure: {raw_data}") + return + + # Parse the actions schema + parsed_schema = {} + action_categories = raw_data["actions"] + + for category, action_list in action_categories.items(): + if isinstance(action_list, list): + for action in action_list: + func_details = action.get("function") + if func_details and "name" in func_details: + action_name = func_details["name"] + parsed_schema[action_name] = action + + self._actions_schema = parsed_schema + + except Exception as e: + print(f"Error fetching actions: {e}") + import traceback + + traceback.print_exc() + + def _create_tools(self): + """Create BaseTool instances for each action.""" + tools = [] + + for action_name, action_schema in self._actions_schema.items(): + function_details = action_schema.get("function", {}) + description = function_details.get("description", f"Execute {action_name}") + + # Get parameter info for a better description + parameters = function_details.get("parameters", {}).get("properties", {}) + param_info = [] + for param_name, param_details in parameters.items(): + param_desc = param_details.get("description", "") + required = param_name in function_details.get("parameters", {}).get( + "required", [] + ) + param_info.append( + f"- {param_name}: {param_desc} {'(required)' if required else '(optional)'}" + ) + + full_description = f"{description}\n\nParameters:\n" + "\n".join(param_info) + + tool = EnterpriseActionTool( + name=action_name.lower().replace(" ", "_"), + description=full_description, + action_name=action_name, + action_schema=action_schema, + enterprise_action_token=self.enterprise_action_token, + ) + + tools.append(tool) + + self._tools = tools + + # Adding context manager support for convenience, but direct usage is also supported + def __enter__(self): + return self.tools() + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +if __name__ == "__main__": + # IMPORTANT: Replace 'YOUR_TOKEN_HERE' with your actual valid token + # You can also load it from an environment variable for better security + import os + + token = os.environ.get( + "ENTERPRISE_TOOL_TOKEN", + ) # Replace YOUR_TOKEN_HERE if not using env var + + if token == "YOUR_TOKEN_HERE" or not token: + print("Error: ENTERPRISE_TOOL_TOKEN is not set.") + print( + "Please replace 'YOUR_TOKEN_HERE' in the code or set the ENTERPRISE_TOOL_TOKEN environment variable." + ) + else: + try: + print("Initializing EnterpriseActionKitTool...") + adapter = EnterpriseActionKitToolAdapter(enterprise_action_token=token) + available_tools = adapter.tools() + + agent = Agent( + model="gpt-4o", + tools=available_tools, + role="You are are expert at google sheets", + goal="Get the sheet with the data x", + backstory="You are a expert at google sheets", + verbose=True, + ) + + task = Task( + description="return data from the sheet with the id: {spreadsheetId}, with the limit: {limit}", + expected_output="The data from the sheet with the id: {spreadsheetId} with the limit: {limit}", + agent=agent, + ) + crew = Crew( + agents=[agent], + tasks=[task], + verbose=True, + ) + result = crew.kickoff( + inputs={ + "spreadsheetId": "1DHDIWGdhUXqXeYOO8yA44poiY222qHPQEUu28olipKs", + "limit": 2, + } + ) + + except ValueError as e: + print(f"\nConfiguration Error: {e}") + except Exception as e: + print(f"\nAn unexpected error occurred during execution: {e}") + import traceback + + traceback.print_exc() From 40dd22ce2ce3b40a42c9f03c1155af2f904bc0d9 Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Thu, 24 Apr 2025 14:39:55 -0700 Subject: [PATCH 323/391] refactor: remove main execution block from EnterpriseActionKitToolAdapter - Removed the main execution block that included token validation and agent/task setup for testing. - This change streamlines the adapter's code, focusing on its core functionality without execution logic. --- .../adapters/enterprise_adapter.py | 56 ------------------- 1 file changed, 56 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 9a3d1860a..1d93894b7 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -3,7 +3,6 @@ from pydantic import Field, create_model from typing import List, Any, Dict, Optional import json from crewai.tools import BaseTool -from crewai import Agent, Task, Crew ENTERPRISE_ACTION_KIT_PROJECT_ID = "dd525517-df22-49d2-a69e-6a0eed211166" @@ -202,58 +201,3 @@ class EnterpriseActionKitToolAdapter: def __exit__(self, exc_type, exc_val, exc_tb): pass - - -if __name__ == "__main__": - # IMPORTANT: Replace 'YOUR_TOKEN_HERE' with your actual valid token - # You can also load it from an environment variable for better security - import os - - token = os.environ.get( - "ENTERPRISE_TOOL_TOKEN", - ) # Replace YOUR_TOKEN_HERE if not using env var - - if token == "YOUR_TOKEN_HERE" or not token: - print("Error: ENTERPRISE_TOOL_TOKEN is not set.") - print( - "Please replace 'YOUR_TOKEN_HERE' in the code or set the ENTERPRISE_TOOL_TOKEN environment variable." - ) - else: - try: - print("Initializing EnterpriseActionKitTool...") - adapter = EnterpriseActionKitToolAdapter(enterprise_action_token=token) - available_tools = adapter.tools() - - agent = Agent( - model="gpt-4o", - tools=available_tools, - role="You are are expert at google sheets", - goal="Get the sheet with the data x", - backstory="You are a expert at google sheets", - verbose=True, - ) - - task = Task( - description="return data from the sheet with the id: {spreadsheetId}, with the limit: {limit}", - expected_output="The data from the sheet with the id: {spreadsheetId} with the limit: {limit}", - agent=agent, - ) - crew = Crew( - agents=[agent], - tasks=[task], - verbose=True, - ) - result = crew.kickoff( - inputs={ - "spreadsheetId": "1DHDIWGdhUXqXeYOO8yA44poiY222qHPQEUu28olipKs", - "limit": 2, - } - ) - - except ValueError as e: - print(f"\nConfiguration Error: {e}") - except Exception as e: - print(f"\nAn unexpected error occurred during execution: {e}") - import traceback - - traceback.print_exc() From 7c1a87e5abd330a7b12d5f2188bf0ef561cf4c82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Moura?= Date: Thu, 24 Apr 2025 23:05:50 -0700 Subject: [PATCH 324/391] new tool around adapter --- src/crewai_tools/__init__.py | 6 +++ src/crewai_tools/tools/__init__.py | 1 + .../crewai_enterprise_tools.py | 40 +++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 3e3cdc019..f42750593 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -6,6 +6,7 @@ from .tools import ( CodeDocsSearchTool, CodeInterpreterTool, ComposioTool, + CrewaiEnterpriseTools, CSVSearchTool, DallETool, DatabricksQueryTool, @@ -70,3 +71,8 @@ from .aws import ( from .adapters.mcp_adapter import ( MCPServerAdapter, ) + + +from .adapters.enterprise_adapter import ( + EnterpriseActionTool +) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index bae21a1c6..d95d08c78 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -5,6 +5,7 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool +from .crewai_enterprise_tools.crewai_enterprise_tools import CrewaiEnterpriseTools from .csv_search_tool.csv_search_tool import CSVSearchTool from .dalle_tool.dalle_tool import DallETool from .databricks_query_tool.databricks_query_tool import DatabricksQueryTool diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py new file mode 100644 index 000000000..8086d81b5 --- /dev/null +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -0,0 +1,40 @@ +""" +Crewai Enterprise Tools +""" + +import os +import typing as t +from crewai.tools import BaseTool +from crewai_tools.adapters.enterprise_adapter import EnterpriseActionKitToolAdapter + + +def CrewaiEnterpriseTools( + enterprise_token: t.Optional[str] = None, + actions_list: t.Optional[t.List[str]] = None +) -> t.List[BaseTool]: + """Factory function that returns crewai enterprise tools. + + Args: + enterprise_token: The token for accessing enterprise actions. + If not provided, will try to use CREWAI_ENTEPRISE_TOOLS_TOKEN env var. + actions_list: Optional list of specific tool names to include. + If provided, only tools with these names will be returned. + + Returns: + A list of BaseTool instances for enterprise actions + """ + if enterprise_token is None: + enterprise_token = os.environ.get("CREWAI_ENTEPRISE_TOOLS_TOKEN") + if enterprise_token is None: + raise ValueError( + "No enterprise token provided. Please provide a token or set the CREWAI_ENTEPRISE_TOOLS_TOKEN environment variable." + ) + + adapter = EnterpriseActionKitToolAdapter(enterprise_token) + all_tools = adapter.tools() + + if actions_list is None: + return all_tools + + # Filter tools based on the provided list + return [tool for tool in all_tools if tool.name in actions_list] From 82d0209ce277c3cf41c2331417e43a6b9c390e1c Mon Sep 17 00:00:00 2001 From: benzakritesteur <101302443+benzakritesteur@users.noreply.github.com> Date: Mon, 28 Apr 2025 19:57:03 +0200 Subject: [PATCH 325/391] Fix firecrawl tool (Too many positional arguments) (#275) * Corrected to adapt to firecrawl package use Was leading to an error too many arguments when calling the craw_url() function * Corrected to adapt to firecrawl package use Corrected to avoid too many arguments error when calling firecrawl scrape_url function * Corrected to adapt to firecrawl package use Corrected to avoid error too many arguments when calling firecrawl search() function * fix: fix firecrawl integration * feat: support define Firecrawl using any config Currently we pre-defined the available paramenters to call Firecrawl, this commit adds support to receive any parameter and propagate them * docs: added doc string to Firecrawls classes --------- Co-authored-by: Lucas Gomide --- .../firecrawl_crawl_website_tool/README.md | 45 ++++----- .../firecrawl_crawl_website_tool.py | 92 ++++++++----------- .../firecrawl_scrape_website_tool/README.md | 28 ++++-- .../firecrawl_scrape_website_tool.py | 43 ++++++--- .../tools/firecrawl_search_tool/README.md | 25 +++-- .../firecrawl_search_tool.py | 75 +++++++-------- 6 files changed, 158 insertions(+), 150 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md index d8e8f1407..3edb73f02 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/README.md @@ -23,35 +23,38 @@ Utilize the FirecrawlScrapeFromWebsiteTool as follows to allow your agent to loa ```python from crewai_tools import FirecrawlCrawlWebsiteTool +from firecrawl import ScrapeOptions -tool = FirecrawlCrawlWebsiteTool(url='firecrawl.dev') +tool = FirecrawlCrawlWebsiteTool( + config={ + "limit": 100, + "scrape_options": ScrapeOptions(formats=["markdown", "html"]), + "poll_interval": 30, + } +) +tool.run(url="firecrawl.dev") ``` ## Arguments - `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. -- `url`: The base URL to start crawling from. -- `maxDepth`: Optional. Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on. -- `limit`: Optional. Maximum number of pages to crawl. -- `allowExternalLinks`: Allows the crawler to follow links that point to external domains. -- `formats`: Optional. Formats for the page's content to be returned (eg. markdown, html, screenshot, links). -- `timeout`: Optional. Timeout in milliseconds for the crawling operation. - -## Configurations Example +- `config`: Optional. It contains Firecrawl API parameters. This is the default configuration ```python - DEFAULT_CRAWLING_OPTIONS = { - "maxDepth": 2, - "ignoreSitemap": True, - "limit": 100, - "allowBackwardLinks": False, - "allowExternalLinks": False, - "scrapeOptions": { - "formats": ["markdown", "screenshot", "links"], - "onlyMainContent": True, - "timeout": 30000 - } - } +from firecrawl import ScrapeOptions + +{ + "max_depth": 2, + "ignore_sitemap": True, + "limit": 100, + "allow_backward_links": False, + "allow_external_links": False, + "scrape_options": ScrapeOptions( + formats=["markdown", "screenshot", "links"], + only_main_content=True, + timeout=30000, + ), +} ``` diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index f91ad3184..ee7e5e3d9 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -3,37 +3,36 @@ from typing import Any, Optional, Type from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr - try: - from firecrawl import FirecrawlApp + from firecrawl import FirecrawlApp, ScrapeOptions except ImportError: FirecrawlApp = Any class FirecrawlCrawlWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - maxDepth: Optional[int] = Field( - default=2, - description="Maximum depth to crawl. Depth 1 is the base URL, depth 2 includes the base URL and its direct children and so on.", - ) - limit: Optional[int] = Field( - default=100, description="Maximum number of pages to crawl." - ) - allowExternalLinks: Optional[bool] = Field( - default=False, - description="Allows the crawler to follow links that point to external domains.", - ) - formats: Optional[list[str]] = Field( - default=["markdown", "screenshot", "links"], - description="Formats for the page's content to be returned (eg. markdown, html, screenshot, links).", - ) - timeout: Optional[int] = Field( - default=30000, - description="Timeout in milliseconds for the crawling operation. The default value is 30000.", - ) class FirecrawlCrawlWebsiteTool(BaseTool): + """ + Tool for crawling websites using Firecrawl. To run this tool, you need to have a Firecrawl API key. + + Args: + api_key (str): Your Firecrawl API key. + config (dict): Optional. It contains Firecrawl API parameters. + + Default configuration options: + max_depth (int): Maximum depth to crawl. Default: 2 + ignore_sitemap (bool): Whether to ignore sitemap. Default: True + limit (int): Maximum number of pages to crawl. Default: 100 + allow_backward_links (bool): Allow crawling backward links. Default: False + allow_external_links (bool): Allow crawling external links. Default: False + scrape_options (ScrapeOptions): Options for scraping content + - formats (list[str]): Content formats to return. Default: ["markdown", "screenshot", "links"] + - only_main_content (bool): Only return main content. Default: True + - timeout (int): Timeout in milliseconds. Default: 30000 + """ + model_config = ConfigDict( arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) @@ -41,6 +40,20 @@ class FirecrawlCrawlWebsiteTool(BaseTool): description: str = "Crawl webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlCrawlWebsiteToolSchema api_key: Optional[str] = None + config: Optional[dict[str, Any]] = Field( + default_factory=lambda: { + "max_depth": 2, + "ignore_sitemap": True, + "limit": 100, + "allow_backward_links": False, + "allow_external_links": False, + "scrape_options": ScrapeOptions( + formats=["markdown", "screenshot", "links"], + only_main_content=True, + timeout=30000, + ), + } + ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): @@ -73,41 +86,8 @@ class FirecrawlCrawlWebsiteTool(BaseTool): "`firecrawl-py` package not found, please run `uv add firecrawl-py`" ) - def _run( - self, - url: str, - maxDepth: Optional[int] = 2, - limit: Optional[int] = 100, - allowExternalLinks: Optional[bool] = False, - formats: Optional[list[str]] = ["markdown", "screenshot", "links"], - timeout: Optional[int] = 30000, - ): - # Default options for timeout and crawling - DEFAULT_TIMEOUT = 30000 - DEFAULT_CRAWLING_OPTIONS = { - "maxDepth": 2, - "ignoreSitemap": True, - "limit": 100, - "allowBackwardLinks": False, - "allowExternalLinks": False, - "scrapeOptions": { - "formats": ["markdown", "screenshot", "links"], - "onlyMainContent": True, - "timeout": DEFAULT_TIMEOUT, - }, - } - - # Add default options not present as parameters - crawling_options = DEFAULT_CRAWLING_OPTIONS - - # Update the values of parameters present - crawling_options["maxDepth"] = maxDepth - crawling_options["limit"] = limit - crawling_options["allowExternalLinks"] = allowExternalLinks - crawling_options["scrapeOptions"]["formats"] = formats - crawling_options["scrapeOptions"]["timeout"] = timeout - - return self._firecrawl.crawl_url(url, crawling_options) + def _run(self, url: str): + return self._firecrawl.crawl_url(url, **self.config) try: diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md b/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md index 93570f06b..ebcea2f53 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/README.md @@ -20,19 +20,27 @@ Utilize the FirecrawlScrapeWebsiteTool as follows to allow your agent to load we ```python from crewai_tools import FirecrawlScrapeWebsiteTool -tool = FirecrawlScrapeWebsiteTool(url='firecrawl.dev') +tool = FirecrawlScrapeWebsiteTool(config={"formats": ['html']}) +tool.run(url="firecrawl.dev") ``` ## Arguments - `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. -- `url`: The URL to scrape. -- `page_options`: Optional. - - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. - - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. -- `extractor_options`: Optional. Options for LLM-based extraction of structured information from the page content - - `mode`: The extraction mode to use, currently supports 'llm-extraction' - - `extractionPrompt`: Optional. A prompt describing what information to extract from the page - - `extractionSchema`: Optional. The schema for the data to be extracted -- `timeout`: Optional. Timeout in milliseconds for the request +- `config`: Optional. It contains Firecrawl API parameters. + + +This is the default configuration + +```python +{ + "formats": ["markdown"], + "only_main_content": True, + "include_tags": [], + "exclude_tags": [], + "headers": {}, + "wait_for": 0, +} +``` + diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 8530aa71d..954136341 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -18,6 +18,21 @@ class FirecrawlScrapeWebsiteToolSchema(BaseModel): class FirecrawlScrapeWebsiteTool(BaseTool): + """ + Tool for scraping webpages using Firecrawl. To run this tool, you need to have a Firecrawl API key. + + Args: + api_key (str): Your Firecrawl API key. + config (dict): Optional. It contains Firecrawl API parameters. + + Default configuration options: + formats (list[str]): Content formats to return. Default: ["markdown"] + only_main_content (bool): Only return main content. Default: True + include_tags (list[str]): Tags to include. Default: [] + exclude_tags (list[str]): Tags to exclude. Default: [] + headers (dict): Headers to include. Default: {} + """ + model_config = ConfigDict( arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) @@ -25,6 +40,17 @@ class FirecrawlScrapeWebsiteTool(BaseTool): description: str = "Scrape webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema api_key: Optional[str] = None + config: Optional[dict[str, Any]] = Field( + default_factory=lambda: { + "formats": ["markdown"], + "only_main_content": True, + "include_tags": [], + "exclude_tags": [], + "headers": {}, + "wait_for": 0, + } + ) + _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): @@ -50,21 +76,8 @@ class FirecrawlScrapeWebsiteTool(BaseTool): self._firecrawl = FirecrawlApp(api_key=api_key) - def _run( - self, - url: str, - timeout: Optional[int] = 30000, - ): - options = { - "formats": ["markdown"], - "onlyMainContent": True, - "includeTags": [], - "excludeTags": [], - "headers": {}, - "waitFor": 0, - "timeout": timeout, - } - return self._firecrawl.scrape_url(url, options) + def _run(self, url: str): + return self._firecrawl.scrape_url(url, **self.config) try: diff --git a/src/crewai_tools/tools/firecrawl_search_tool/README.md b/src/crewai_tools/tools/firecrawl_search_tool/README.md index effb3f3d4..a2037e951 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/README.md +++ b/src/crewai_tools/tools/firecrawl_search_tool/README.md @@ -20,16 +20,25 @@ Utilize the FirecrawlSearchTool as follows to allow your agent to load websites: ```python from crewai_tools import FirecrawlSearchTool -tool = FirecrawlSearchTool(query='what is firecrawl?') +tool = FirecrawlSearchTool(config={"limit": 5}) +tool.run(query="firecrawl web scraping") ``` ## Arguments - `api_key`: Optional. Specifies Firecrawl API key. Defaults is the `FIRECRAWL_API_KEY` environment variable. -- `query`: The search query string to be used for searching. -- `page_options`: Optional. Options for result formatting. - - `onlyMainContent`: Optional. Only return the main content of the page excluding headers, navs, footers, etc. - - `includeHtml`: Optional. Include the raw HTML content of the page. Will output a html key in the response. - - `fetchPageContent`: Optional. Fetch the full content of the page. -- `search_options`: Optional. Options for controlling the crawling behavior. - - `limit`: Optional. Maximum number of pages to crawl. \ No newline at end of file +- `config`: Optional. It contains Firecrawl API parameters. + + +This is the default configuration + +```python +{ + "limit": 5, + "tbs": None, + "lang": "en", + "country": "us", + "location": None, + "timeout": 60000, +} +``` diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index f7f4f3677..8b563778c 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -17,26 +17,25 @@ except ImportError: class FirecrawlSearchToolSchema(BaseModel): query: str = Field(description="Search query") - limit: Optional[int] = Field( - default=5, description="Maximum number of results to return" - ) - tbs: Optional[str] = Field(default=None, description="Time-based search parameter") - lang: Optional[str] = Field( - default="en", description="Language code for search results" - ) - country: Optional[str] = Field( - default="us", description="Country code for search results" - ) - location: Optional[str] = Field( - default=None, description="Location parameter for search results" - ) - timeout: Optional[int] = Field(default=60000, description="Timeout in milliseconds") - scrape_options: Optional[Dict[str, Any]] = Field( - default=None, description="Options for scraping search results" - ) class FirecrawlSearchTool(BaseTool): + """ + Tool for searching webpages using Firecrawl. To run this tool, you need to have a Firecrawl API key. + + Args: + api_key (str): Your Firecrawl API key. + config (dict): Optional. It contains Firecrawl API parameters. + + Default configuration options: + limit (int): Maximum number of pages to crawl. Default: 5 + tbs (str): Time before search. Default: None + lang (str): Language. Default: "en" + country (str): Country. Default: "us" + location (str): Location. Default: None + timeout (int): Timeout in milliseconds. Default: 60000 + """ + model_config = ConfigDict( arbitrary_types_allowed=True, validate_assignment=True, frozen=False ) @@ -47,6 +46,16 @@ class FirecrawlSearchTool(BaseTool): description: str = "Search webpages using Firecrawl and return the results" args_schema: Type[BaseModel] = FirecrawlSearchToolSchema api_key: Optional[str] = None + config: Optional[dict[str, Any]] = Field( + default_factory=lambda: { + "limit": 5, + "tbs": None, + "lang": "en", + "country": "us", + "location": None, + "timeout": 60000, + } + ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) def __init__(self, api_key: Optional[str] = None, **kwargs): @@ -56,10 +65,9 @@ class FirecrawlSearchTool(BaseTool): def _initialize_firecrawl(self) -> None: try: - if FIRECRAWL_AVAILABLE: - self._firecrawl = FirecrawlApp(api_key=self.api_key) - else: - raise ImportError + from firecrawl import FirecrawlApp # type: ignore + + self._firecrawl = FirecrawlApp(api_key=self.api_key) except ImportError: import click @@ -72,7 +80,7 @@ class FirecrawlSearchTool(BaseTool): subprocess.run(["uv", "add", "firecrawl-py"], check=True) from firecrawl import FirecrawlApp - self.firecrawl = FirecrawlApp(api_key=self.api_key) + self._firecrawl = FirecrawlApp(api_key=self.api_key) except subprocess.CalledProcessError: raise ImportError("Failed to install firecrawl-py package") else: @@ -83,27 +91,14 @@ class FirecrawlSearchTool(BaseTool): def _run( self, query: str, - limit: Optional[int] = 5, - tbs: Optional[str] = None, - lang: Optional[str] = "en", - country: Optional[str] = "us", - location: Optional[str] = None, - timeout: Optional[int] = 60000, - scrape_options: Optional[Dict[str, Any]] = None, ) -> Any: - if not self.firecrawl: + if not self._firecrawl: raise RuntimeError("FirecrawlApp not properly initialized") - options = { - "limit": limit, - "tbs": tbs, - "lang": lang, - "country": country, - "location": location, - "timeout": timeout, - "scrapeOptions": scrape_options or {}, - } - return self.firecrawl.search(**options) + return self._firecrawl.search( + query=query, + **self.config, + ) try: From 67be0c674d57c838b19deada46a797d4a2455ee6 Mon Sep 17 00:00:00 2001 From: Daniel Barreto Date: Mon, 28 Apr 2025 19:53:00 -0300 Subject: [PATCH 326/391] Allow setting custom LLM for the vision tool (#294) * Allow setting custom LLM for the vision tool Defaults to gpt-4o-mini otherwise * Enhance VisionTool with model management and improved initialization - Added support for setting a custom model identifier with a default of "gpt-4o-mini". - Introduced properties for model management, allowing dynamic updates and resetting of the LLM instance. - Updated the initialization method to accept an optional LLM and model parameter. - Refactored the image processing logic for clarity and efficiency. * docstrings * Add stop config --------- Co-authored-by: lorenzejay --- .../tools/vision_tool/vision_tool.py | 68 ++++++++++++++----- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index a8daaabb9..cd4f5e74c 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -2,9 +2,9 @@ import base64 from pathlib import Path from typing import Optional, Type +from crewai import LLM from crewai.tools import BaseTool -from openai import OpenAI -from pydantic import BaseModel, field_validator +from pydantic import BaseModel, PrivateAttr, field_validator class ImagePromptSchema(BaseModel): @@ -32,19 +32,52 @@ class ImagePromptSchema(BaseModel): class VisionTool(BaseTool): + """Tool for analyzing images using vision models. + + Args: + llm: Optional LLM instance to use + model: Model identifier to use if no LLM is provided + """ + name: str = "Vision Tool" description: str = ( "This tool uses OpenAI's Vision API to describe the contents of an image." ) args_schema: Type[BaseModel] = ImagePromptSchema - _client: Optional[OpenAI] = None + + _model: str = PrivateAttr(default="gpt-4o-mini") + _llm: Optional[LLM] = PrivateAttr(default=None) + + def __init__(self, llm: Optional[LLM] = None, model: str = "gpt-4o-mini", **kwargs): + """Initialize the vision tool. + + Args: + llm: Optional LLM instance to use + model: Model identifier to use if no LLM is provided + **kwargs: Additional arguments for the base tool + """ + super().__init__(**kwargs) + self._model = model + self._llm = llm @property - def client(self) -> OpenAI: - """Cached OpenAI client instance.""" - if self._client is None: - self._client = OpenAI() - return self._client + def model(self) -> str: + """Get the current model identifier.""" + return self._model + + @model.setter + def model(self, value: str) -> None: + """Set the model identifier and reset LLM if it was auto-created.""" + self._model = value + if self._llm is not None and self._llm._model != value: + self._llm = None + + @property + def llm(self) -> LLM: + """Get the LLM instance, creating one if needed.""" + if self._llm is None: + self._llm = LLM(model=self._model, stop=["STOP", "END"]) + return self._llm def _run(self, **kwargs) -> str: try: @@ -52,7 +85,6 @@ class VisionTool(BaseTool): if not image_path_url: return "Image Path or URL is required." - # Validate input using Pydantic ImagePromptSchema(image_path_url=image_path_url) if image_path_url.startswith("http"): @@ -64,8 +96,7 @@ class VisionTool(BaseTool): except Exception as e: return f"Error processing image: {str(e)}" - response = self.client.chat.completions.create( - model="gpt-4o-mini", + response = self.llm.call( messages=[ { "role": "user", @@ -76,16 +107,21 @@ class VisionTool(BaseTool): "image_url": {"url": image_data}, }, ], - } + }, ], - max_tokens=300, ) - - return response.choices[0].message.content - + return response except Exception as e: return f"An error occurred: {str(e)}" def _encode_image(self, image_path: str) -> str: + """Encode an image file as base64. + + Args: + image_path: Path to the image file + + Returns: + Base64-encoded image data + """ with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") From 93d043bcd41fedaa49ac343c11dcc1618424cc58 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Sun, 4 May 2025 10:41:54 -0700 Subject: [PATCH 327/391] Enhance EnterpriseActionKitToolAdapter to support custom project IDs (#297) * Enhance EnterpriseActionKitToolAdapter to support custom project IDs - Updated the EnterpriseActionKitToolAdapter and EnterpriseActionTool classes to accept an optional project_id parameter, allowing for greater flexibility in API interactions. - Modified API URL construction to utilize the provided project_id instead of a hardcoded default. - Updated the CrewaiEnterpriseTools factory function to accept and pass the project_id to the adapter. * for factory in mind --- .../adapters/enterprise_adapter.py | 31 ++++++++++++++++--- .../crewai_enterprise_tools.py | 17 ++++++++-- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 1d93894b7..e6e64647a 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -4,8 +4,9 @@ from typing import List, Any, Dict, Optional import json from crewai.tools import BaseTool - +# DEFAULTS ENTERPRISE_ACTION_KIT_PROJECT_ID = "dd525517-df22-49d2-a69e-6a0eed211166" +ENTERPRISE_ACTION_KIT_PROJECT_URL = "https://worker-actionkit.tools.crewai.com/projects" class EnterpriseActionTool(BaseTool): @@ -18,6 +19,12 @@ class EnterpriseActionTool(BaseTool): action_schema: Dict[str, Any] = Field( default={}, description="The schema of the action" ) + enterprise_action_kit_project_id: str = Field( + default=ENTERPRISE_ACTION_KIT_PROJECT_ID, description="The project id" + ) + enterprise_action_kit_project_url: str = Field( + default=ENTERPRISE_ACTION_KIT_PROJECT_URL, description="The project url" + ) def __init__( self, @@ -26,6 +33,8 @@ class EnterpriseActionTool(BaseTool): enterprise_action_token: str, action_name: str, action_schema: Dict[str, Any], + enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, + enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, ): schema_props = ( action_schema.get("function", {}) @@ -74,12 +83,17 @@ class EnterpriseActionTool(BaseTool): self.action_name = action_name self.action_schema = action_schema + if enterprise_action_kit_project_id is not None: + self.enterprise_action_kit_project_id = enterprise_action_kit_project_id + if enterprise_action_kit_project_url is not None: + self.enterprise_action_kit_project_url = enterprise_action_kit_project_url + def _run(self, **kwargs) -> str: """Execute the specific enterprise action with validated parameters.""" try: params = {k: v for k, v in kwargs.items() if v is not None} - api_url = f"https://worker-actionkit.tools.crewai.com/projects/{ENTERPRISE_ACTION_KIT_PROJECT_ID}/actions" + api_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" headers = { "Authorization": f"Bearer {self.enterprise_action_token}", "Content-Type": "application/json", @@ -104,7 +118,12 @@ class EnterpriseActionTool(BaseTool): class EnterpriseActionKitToolAdapter: """Adapter that creates BaseTool instances for enterprise actions.""" - def __init__(self, enterprise_action_token: str): + def __init__( + self, + enterprise_action_token: str, + enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, + enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, + ): """Initialize the adapter with an enterprise action token.""" if not enterprise_action_token: raise ValueError("enterprise_action_token is required") @@ -112,6 +131,8 @@ class EnterpriseActionKitToolAdapter: self.enterprise_action_token = enterprise_action_token self._actions_schema = {} self._tools = None + self.enterprise_action_kit_project_id = enterprise_action_kit_project_id + self.enterprise_action_kit_project_url = enterprise_action_kit_project_url def tools(self) -> List[BaseTool]: """Get the list of tools created from enterprise actions. @@ -127,7 +148,7 @@ class EnterpriseActionKitToolAdapter: def _fetch_actions(self): """Fetch available actions from the API.""" try: - actions_url = f"https://worker-actionkit.tools.crewai.com/projects/{ENTERPRISE_ACTION_KIT_PROJECT_ID}/actions" + actions_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" headers = {"Authorization": f"Bearer {self.enterprise_action_token}"} params = {"format": "json_schema"} @@ -189,6 +210,8 @@ class EnterpriseActionKitToolAdapter: action_name=action_name, action_schema=action_schema, enterprise_action_token=self.enterprise_action_token, + enterprise_action_kit_project_id=self.enterprise_action_kit_project_id, + enterprise_action_kit_project_url=self.enterprise_action_kit_project_url, ) tools.append(tool) diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 8086d81b5..a1dc2970b 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -10,7 +10,9 @@ from crewai_tools.adapters.enterprise_adapter import EnterpriseActionKitToolAdap def CrewaiEnterpriseTools( enterprise_token: t.Optional[str] = None, - actions_list: t.Optional[t.List[str]] = None + actions_list: t.Optional[t.List[str]] = None, + enterprise_action_kit_project_id: t.Optional[str] = None, + enterprise_action_kit_project_url: t.Optional[str] = None, ) -> t.List[BaseTool]: """Factory function that returns crewai enterprise tools. @@ -30,7 +32,18 @@ def CrewaiEnterpriseTools( "No enterprise token provided. Please provide a token or set the CREWAI_ENTEPRISE_TOOLS_TOKEN environment variable." ) - adapter = EnterpriseActionKitToolAdapter(enterprise_token) + adapter_kwargs = {"enterprise_action_token": enterprise_token} + + if enterprise_action_kit_project_id is not None: + adapter_kwargs["enterprise_action_kit_project_id"] = ( + enterprise_action_kit_project_id + ) + if enterprise_action_kit_project_url is not None: + adapter_kwargs["enterprise_action_kit_project_url"] = ( + enterprise_action_kit_project_url + ) + + adapter = EnterpriseActionKitToolAdapter(**adapter_kwargs) all_tools = adapter.tools() if actions_list is None: From fd4ef4f47a17d12c17c6ee203fce99f2096d01d9 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Mon, 5 May 2025 15:15:50 -0300 Subject: [PATCH 328/391] fix: Remove kwargs from all RagTools (#285) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: remove kwargs from all (except mysql & pg) RagTools The agent uses the tool description to decide what to propagate when a tool with **kwargs is found, but this often leads to failures during the tool invocation step. This happens because the final description ends up like this: ``` CrewStructuredTool(name='Knowledge base', description='Tool Name: Knowledge base Tool Arguments: {'query': {'description': None, 'type': 'str'}, 'kwargs': {'description': None, 'type': 'Any'}} Tool Description: A knowledge base that can be used to answer questions.') ``` The agent then tries to infer and pass a kwargs parameter, which isn’t supported by the schema at all. * feat: adding test to search tools * feat: add db (chromadb folder) to .gitignore * fix: fix github search integration A few attributes were missing when calling the .add method: data_type and loader. Also, update the query search according to the EmbedChain documentation, the query must include the type and repo keys * fix: rollback YoutubeChannel paramenter * chore: fix type hinting for CodeDocs search * fix: ensure proper configuration when call `add` According to the documentation, some search methods must be defined as either a loader or a data_type. This commit ensures that. * build: add optional-dependencies for github and xml search * test: mocking external requests from search_tool tests * build: add pytest-recording as devDependencie --- .../code_docs_search_tool.py | 25 +- .../tools/csv_search_tool/csv_search_tool.py | 25 +- .../directory_search_tool.py | 30 +- .../docx_search_tool/docx_search_tool.py | 27 +- .../github_search_tool/github_search_tool.py | 59 +- .../json_search_tool/json_search_tool.py | 24 +- .../tools/mdx_search_tool/mdx_search_tool.py | 25 +- .../tools/pdf_search_tool/pdf_search_tool.py | 36 +- .../pdf_text_writing_tool.py | 5 +- src/crewai_tools/tools/rag/rag_tool.py | 8 +- .../serply_api_tool/serply_job_search_tool.py | 13 +- .../serply_webpage_to_markdown_tool.py | 10 +- .../tools/txt_search_tool/txt_search_tool.py | 27 +- .../website_search/website_search_tool.py | 29 +- .../tools/xml_search_tool/xml_search_tool.py | 24 +- .../youtube_channel_search_tool.py | 24 +- .../youtube_video_search_tool.py | 29 +- .../test_csv_search_tool.yaml | 251 ++++++++ .../test_directory_search_tool.yaml | 544 ++++++++++++++++++ .../test_json_search_tool.yaml | 300 ++++++++++ .../test_mdx_search_tool.yaml | 255 ++++++++ .../test_txt_search_tool.yaml | 251 ++++++++ tests/tools/test_search_tools.py | 309 ++++++++++ 23 files changed, 2051 insertions(+), 279 deletions(-) create mode 100644 tests/tools/cassettes/test_search_tools/test_csv_search_tool.yaml create mode 100644 tests/tools/cassettes/test_search_tools/test_directory_search_tool.yaml create mode 100644 tests/tools/cassettes/test_search_tools/test_json_search_tool.yaml create mode 100644 tests/tools/cassettes/test_search_tools/test_mdx_search_tool.yaml create mode 100644 tests/tools/cassettes/test_search_tools/test_txt_search_tool.yaml create mode 100644 tests/tools/test_search_tools.py diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index ae51adf54..05711d7bc 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -31,30 +31,19 @@ class CodeDocsSearchTool(RagTool): def __init__(self, docs_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) if docs_url is not None: - kwargs["data_type"] = DataType.DOCS_SITE self.add(docs_url) self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content." self.args_schema = FixedCodeDocsSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "docs_url" in kwargs: - self.add(kwargs["docs_url"]) + def add(self, docs_url: str) -> None: + super().add(docs_url, data_type=DataType.DOCS_SITE) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + docs_url: Optional[str] = None, + ) -> str: + if docs_url is not None: + self.add(docs_url) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index e255df6b5..4567df201 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -31,30 +31,19 @@ class CSVSearchTool(RagTool): def __init__(self, csv: Optional[str] = None, **kwargs): super().__init__(**kwargs) if csv is not None: - kwargs["data_type"] = DataType.CSV self.add(csv) self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content." self.args_schema = FixedCSVSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "csv" in kwargs: - self.add(kwargs["csv"]) + def add(self, csv: str) -> None: + super().add(csv, data_type=DataType.CSV) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + csv: Optional[str] = None, + ) -> str: + if csv is not None: + self.add(csv) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index ffd132c0e..20d21731a 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Optional, Type from embedchain.loaders.directory_loader import DirectoryLoader from pydantic import BaseModel, Field @@ -31,30 +31,22 @@ class DirectorySearchTool(RagTool): def __init__(self, directory: Optional[str] = None, **kwargs): super().__init__(**kwargs) if directory is not None: - kwargs["loader"] = DirectoryLoader(config=dict(recursive=True)) self.add(directory) self.description = f"A tool that can be used to semantic search a query the {directory} directory's content." self.args_schema = FixedDirectorySearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "directory" in kwargs: - self.add(kwargs["directory"]) + def add(self, directory: str) -> None: + super().add( + directory, + loader=DirectoryLoader(config=dict(recursive=True)), + ) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + directory: Optional[str] = None, + ) -> str: + if directory is not None: + self.add(directory) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 9f6c49df7..cdd76c29d 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -37,36 +37,19 @@ class DOCXSearchTool(RagTool): def __init__(self, docx: Optional[str] = None, **kwargs): super().__init__(**kwargs) if docx is not None: - kwargs["data_type"] = DataType.DOCX self.add(docx) self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content." self.args_schema = FixedDOCXSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "docx" in kwargs: - self.add(kwargs["docx"]) + def add(self, docx: str) -> None: + super().add(docx, data_type=DataType.DOCX) def _run( self, - **kwargs: Any, + search_query: str, + docx: Optional[str] = None, ) -> Any: - search_query = kwargs.get("search_query") - if search_query is None: - search_query = kwargs.get("query") - - docx = kwargs.get("docx") if docx is not None: self.add(docx) - return super()._run(query=search_query, **kwargs) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 6ba7b919c..51fe4033c 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -1,7 +1,7 @@ -from typing import Any, List, Optional, Type +from typing import List, Optional, Type from embedchain.loaders.github import GithubLoader -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, PrivateAttr from ..rag.rag_tool import RagTool @@ -27,19 +27,29 @@ class GithubSearchToolSchema(FixedGithubSearchToolSchema): class GithubSearchTool(RagTool): name: str = "Search a github repo's content" - description: str = "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." + description: str = ( + "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." + ) summarize: bool = False gh_token: str args_schema: Type[BaseModel] = GithubSearchToolSchema - content_types: List[str] + content_types: List[str] = Field( + default_factory=lambda: ["code", "repo", "pr", "issue"], + description="Content types you want to be included search, options: [code, repo, pr, issue]", + ) + _loader: GithubLoader | None = PrivateAttr(default=None) - def __init__(self, github_repo: Optional[str] = None, **kwargs): + def __init__( + self, + github_repo: Optional[str] = None, + content_types: Optional[List[str]] = None, + **kwargs, + ): super().__init__(**kwargs) - if github_repo is not None: - kwargs["data_type"] = "github" - kwargs["loader"] = GithubLoader(config={"token": self.gh_token}) + self._loader = GithubLoader(config={"token": self.gh_token}) - self.add(repo=github_repo) + if github_repo and content_types: + self.add(repo=github_repo, content_types=content_types) self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities." self.args_schema = FixedGithubSearchToolSchema self._generate_description() @@ -47,26 +57,25 @@ class GithubSearchTool(RagTool): def add( self, repo: str, - content_types: List[str] | None = None, - **kwargs: Any, + content_types: Optional[List[str]] = None, ) -> None: content_types = content_types or self.content_types - super().add(f"repo:{repo} type:{','.join(content_types)}", **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "github_repo" in kwargs: - self.add( - repo=kwargs["github_repo"], content_types=kwargs.get("content_types") - ) + super().add( + f"repo:{repo} type:{','.join(content_types)}", + data_type="github", + loader=self._loader, + ) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + github_repo: Optional[str] = None, + content_types: Optional[List[str]] = None, + ) -> str: + if github_repo: + self.add( + repo=github_repo, + content_types=content_types, + ) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 422f2f175..5d832c6b9 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -31,30 +31,16 @@ class JSONSearchTool(RagTool): def __init__(self, json_path: Optional[str] = None, **kwargs): super().__init__(**kwargs) if json_path is not None: - kwargs["data_type"] = DataType.JSON self.add(json_path) self.description = f"A tool that can be used to semantic search a query the {json_path} JSON's content." self.args_schema = FixedJSONSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "json_path" in kwargs: - self.add(kwargs["json_path"]) - def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + json_path: Optional[str] = None, + ) -> str: + if json_path is not None: + self.add(json_path) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py index dd1c2bb9d..dfab255b0 100644 --- a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py @@ -31,30 +31,19 @@ class MDXSearchTool(RagTool): def __init__(self, mdx: Optional[str] = None, **kwargs): super().__init__(**kwargs) if mdx is not None: - kwargs["data_type"] = DataType.MDX self.add(mdx) self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content." self.args_schema = FixedMDXSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "mdx" in kwargs: - self.add(kwargs["mdx"]) + def add(self, mdx: str) -> None: + super().add(mdx, data_type=DataType.MDX) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + mdx: Optional[str] = None, + ) -> str: + if mdx is not None: + self.add(mdx) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index fc11306ce..d56219785 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -30,39 +30,19 @@ class PDFSearchTool(RagTool): def __init__(self, pdf: Optional[str] = None, **kwargs): super().__init__(**kwargs) if pdf is not None: - kwargs["data_type"] = DataType.PDF_FILE self.add(pdf) self.description = f"A tool that can be used to semantic search a query the {pdf} PDF's content." self.args_schema = FixedPDFSearchToolSchema self._generate_description() - @model_validator(mode="after") - def _set_default_adapter(self): - if isinstance(self.adapter, RagTool._AdapterPlaceholder): - from embedchain import App + def add(self, pdf: str) -> None: + super().add(pdf, data_type=DataType.PDF_FILE) - from crewai_tools.adapters.pdf_embedchain_adapter import ( - PDFEmbedchainAdapter, - ) - - app = App.from_config(config=self.config) if self.config else App() - self.adapter = PDFEmbedchainAdapter( - embedchain_app=app, summarize=self.summarize - ) - - return self - - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( + def _run( self, query: str, - **kwargs: Any, - ) -> Any: - if "pdf" in kwargs: - self.add(kwargs["pdf"]) + pdf: Optional[str] = None, + ) -> str: + if pdf is not None: + self.add(pdf) + return super()._run(query=query) diff --git a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py index ad4d847b6..1d8f3ffd8 100644 --- a/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py +++ b/src/crewai_tools/tools/pdf_text_writing_tool/pdf_text_writing_tool.py @@ -32,7 +32,9 @@ class PDFTextWritingTool(RagTool): """A tool to add text to specific positions in a PDF, with custom font support.""" name: str = "PDF Text Writing Tool" - description: str = "A tool that can write text to a specific position in a PDF document, with optional custom font embedding." + description: str = ( + "A tool that can write text to a specific position in a PDF document, with optional custom font embedding." + ) args_schema: Type[BaseModel] = PDFTextWritingToolSchema def run( @@ -45,7 +47,6 @@ class PDFTextWritingTool(RagTool): font_name: str = "F1", font_file: Optional[str] = None, page_number: int = 0, - **kwargs, ) -> str: reader = PdfReader(pdf_path) writer = PdfWriter() diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 6d0320c0c..900a6ef36 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -59,11 +59,5 @@ class RagTool(BaseTool): def _run( self, query: str, - **kwargs: Any, - ) -> Any: - self._before_run(query, **kwargs) - + ) -> str: return f"Relevant Content:\n{self.adapter.query(query)}" - - def _before_run(self, query, **kwargs): - pass diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py index b0474262a..1c0c665b5 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -41,14 +41,15 @@ class SerplyJobSearchTool(RagTool): def _run( self, - **kwargs: Any, - ) -> Any: + query: Optional[str] = None, + search_query: Optional[str] = None, + ) -> str: query_payload = {} - if "query" in kwargs: - query_payload["q"] = kwargs["query"] - elif "search_query" in kwargs: - query_payload["q"] = kwargs["search_query"] + if query is not None: + query_payload["q"] = query + elif search_query is not None: + query_payload["q"] = search_query # build the url url = f"{self.request_url}{urlencode(query_payload)}" diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index 4010236cc..c7678f852 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -18,7 +18,9 @@ class SerplyWebpageToMarkdownToolSchema(BaseModel): class SerplyWebpageToMarkdownTool(RagTool): name: str = "Webpage to Markdown" - description: str = "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" + description: str = ( + "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand" + ) args_schema: Type[BaseModel] = SerplyWebpageToMarkdownToolSchema request_url: str = "https://api.serply.io/v1/request" proxy_location: Optional[str] = "US" @@ -39,9 +41,9 @@ class SerplyWebpageToMarkdownTool(RagTool): def _run( self, - **kwargs: Any, - ) -> Any: - data = {"url": kwargs["url"], "method": "GET", "response_type": "markdown"} + url: str, + ) -> str: + data = {"url": url, "method": "GET", "response_type": "markdown"} response = requests.request( "POST", self.request_url, headers=self.headers, json=data ) diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 95b353f45..ebbde1223 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -1,6 +1,5 @@ -from typing import Any, Optional, Type +from typing import Optional, Type -from embedchain.models.data_type import DataType from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -31,30 +30,16 @@ class TXTSearchTool(RagTool): def __init__(self, txt: Optional[str] = None, **kwargs): super().__init__(**kwargs) if txt is not None: - kwargs["data_type"] = DataType.TEXT_FILE self.add(txt) self.description = f"A tool that can be used to semantic search a query the {txt} txt's content." self.args_schema = FixedTXTSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "txt" in kwargs: - self.add(kwargs["txt"]) - def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + txt: Optional[str] = None, + ) -> str: + if txt is not None: + self.add(txt) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 842462546..b89af6656 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -25,36 +25,27 @@ class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema): class WebsiteSearchTool(RagTool): name: str = "Search in a specific website" - description: str = "A tool that can be used to semantic search a query from a specific URL content." + description: str = ( + "A tool that can be used to semantic search a query from a specific URL content." + ) args_schema: Type[BaseModel] = WebsiteSearchToolSchema def __init__(self, website: Optional[str] = None, **kwargs): super().__init__(**kwargs) if website is not None: - kwargs["data_type"] = DataType.WEB_PAGE self.add(website) self.description = f"A tool that can be used to semantic search a query from {website} website content." self.args_schema = FixedWebsiteSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "website" in kwargs: - self.add(kwargs["website"]) + def add(self, website: str) -> None: + super().add(website, data_type=DataType.WEB_PAGE) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + website: Optional[str] = None, + ) -> str: + if website is not None: + self.add(website) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 95a382299..2e0d26a88 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -31,30 +31,16 @@ class XMLSearchTool(RagTool): def __init__(self, xml: Optional[str] = None, **kwargs): super().__init__(**kwargs) if xml is not None: - kwargs["data_type"] = DataType.XML self.add(xml) self.description = f"A tool that can be used to semantic search a query the {xml} XML's content." self.args_schema = FixedXMLSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "xml" in kwargs: - self.add(kwargs["xml"]) - def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + xml: Optional[str] = None, + ) -> str: + if xml is not None: + self.add(xml) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 81ecc30c3..9de4b568f 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -25,13 +25,14 @@ class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema): class YoutubeChannelSearchTool(RagTool): name: str = "Search a Youtube Channels content" - description: str = "A tool that can be used to semantic search a query from a Youtube Channels content." + description: str = ( + "A tool that can be used to semantic search a query from a Youtube Channels content." + ) args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs): super().__init__(**kwargs) if youtube_channel_handle is not None: - kwargs["data_type"] = DataType.YOUTUBE_CHANNEL self.add(youtube_channel_handle) self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content." self.args_schema = FixedYoutubeChannelSearchToolSchema @@ -40,23 +41,16 @@ class YoutubeChannelSearchTool(RagTool): def add( self, youtube_channel_handle: str, - **kwargs: Any, ) -> None: if not youtube_channel_handle.startswith("@"): youtube_channel_handle = f"@{youtube_channel_handle}" - super().add(youtube_channel_handle, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "youtube_channel_handle" in kwargs: - self.add(kwargs["youtube_channel_handle"]) + super().add(youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + youtube_channel_handle: Optional[str] = None, + ) -> str: + if youtube_channel_handle is not None: + self.add(youtube_channel_handle) + return super()._run(query=search_query) diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 1ad8434c8..639f1a266 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -25,36 +25,27 @@ class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema): class YoutubeVideoSearchTool(RagTool): name: str = "Search a Youtube Video content" - description: str = "A tool that can be used to semantic search a query from a Youtube Video content." + description: str = ( + "A tool that can be used to semantic search a query from a Youtube Video content." + ) args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema def __init__(self, youtube_video_url: Optional[str] = None, **kwargs): super().__init__(**kwargs) if youtube_video_url is not None: - kwargs["data_type"] = DataType.YOUTUBE_VIDEO self.add(youtube_video_url) self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content." self.args_schema = FixedYoutubeVideoSearchToolSchema self._generate_description() - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - super().add(*args, **kwargs) - - def _before_run( - self, - query: str, - **kwargs: Any, - ) -> Any: - if "youtube_video_url" in kwargs: - self.add(kwargs["youtube_video_url"]) + def add(self, youtube_video_url: str) -> None: + super().add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO) def _run( self, search_query: str, - **kwargs: Any, - ) -> Any: - return super()._run(query=search_query, **kwargs) + youtube_video_url: Optional[str] = None, + ) -> str: + if youtube_video_url is not None: + self.add(youtube_video_url) + return super()._run(query=search_query) diff --git a/tests/tools/cassettes/test_search_tools/test_csv_search_tool.yaml b/tests/tools/cassettes/test_search_tools/test_csv_search_tool.yaml new file mode 100644 index 000000000..4247ba7bb --- /dev/null +++ b/tests/tools/cassettes/test_search_tools/test_csv_search_tool.yaml @@ -0,0 +1,251 @@ +interactions: +- request: + body: '{"input": ["name: test, description: This is a test CSV file"], "model": + "text-embedding-ada-002", "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '127' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"eM8FvBM/VTsslBQ70U5uvHkoKLsMMAw9BARUvAGpQLw3YJe8xAHzvOqjrTx1Ekw8rN0NvMSoUDxFGqC8MnCgPGRWUjw/0Qa9mvm9PN/Xqrz2g5u8widYPF7IAbxpgJk8/84lvIr4DDxycgE9qvruvJSwpDzyxmE8E42QPKzdjbyn+P07EtLHvFgdQrw7be+7TIsPPK2jPbyjkgM9sf7Qu3wqGTweC1i8jZhXOxK+XLyuXoY7wQgGu+/E8Dvew7+8LrPmOz9vYDxgjrE81oygPFQH5rzdVjK7yHBxu6T/EDyS1gm8MVy1O1sfM7tRUzC8PDwjOxCfCrxifLe89oMbPCIhNLx5KCi8w5RlO3cA0rs7be+6jmeLO2hsLjzlxyE8/+KQumsM+TstRlk8WrIlvNFi2by2PAO8/nUDPKefW7x8PgQ8/H55vH4Yn7y0AEK8q3CAPBEMGDzG5BG7iHeUPIDyubyUCce7VcKuPE2WdjwoEas8aZSEPMi+LLz4ttg75AzZPGYwbT2FE/25eM8Fve2RsztES2y8UhnguwcGRbxKnQm9IQ1Ju939jzy0u4q7PDwjOwFkCTy4Kgk71owgOQaFzLukWLO84ES4O6ERCzzygao7JA+6vMzUCLyETU26yBfPO9Idojz3XbY7/4BqvZJ0Y7sMia489KD8umMu/Lx4uxo8NjjBu7UULT2v1no8XlJwPGbXSrsgR5m8s9jrPO4SLL2TnDk8DU/evJFg+DtplIQ75FoUPUkneLsvgho7OLk5uqIccrpa91w7MnCguzFcNTxJO+M6Hx9DPKWAibtODAg99chSvO8mF7zVvew8m2bLPECXNroqROi7QauhvHT+4DsWhn08o4l/vHZ/2bwBAuM8NjhBPZzTWDy+uFk6/fSKvFkxLbwMiS480PXLOz/RBr0A7nc8myEUPL8lZzx5gco7OgDiu7mDqzt0pb47cn3ovPY14Dxw8Yg8RoctPcUpyTuVz3a8CUKGPJT127z0qYA8TgyIO8XQpjyzOhI90+NRPDwouDsdRSi/f4WsvKGv5LrOwo68bg5qPLsYDz0Cg1s8/fQKPEG/DLv7Eew6xby7vDdMrDu8hRw76vzPu8bkkbxzkVO8on6YPD4WPjsnpJ275jQvPTjNpLyJgvs8Er7cO4P0Kjq/h407fl1Wu5mMMLy+BpW7MO+nPAaZt7xIYUi9nyMFPO3+QLqiw8+6yBfPPCgRKzthtoe8AO53OgIquTyhaq08be+XvDtt77y53M27s9hrPCJ6VrxnnXq7AFCePH4EtDsus2Y8w/aLPO65iTp3WXS8Dfa7POpKi7vVZEo7w5RlutnTyDygkBK8VXTzPBmIbjzRsJS8hRwBPcqssjz68pm8u7ZovOjJkrvZjpG8t6mQu/SgfDm4Fh69u7boO+FYo7uAN3E8iGOpvDPxGD1P0jc9yqwyPN8cYrrz2sw7RXPCPCT7zjudQGY36koLPEUaILw4dII8pu2WOydC97x+BDS8+A97vJBBJryksdU7UWebu97DPzxA3O27SSd4PBr1+7pHCCa8dpPEu8pTkDwubq+85XlmPBgbYbudQOa7LrNmPBV7ljz7Xyc8K8XgvHtb5TtjpA09WAlXvHEQWzmuEMs63ZvpvM1Mfbws2Uu7qdscvaDpNDxN+Bw8YOdTPAqbKLzybb88cKNNPAth2Dw5Jse7jfH5OysTnDyFE308U+gTvPzMtLx6qSC66xA7PJNDl7uK+Aw91b3svKw2MLy3qRC8i3kFPQthWDwMMIw8L9s8uyOOwbwWhv07hbrau1bqBDvxWVS8HzOuu7RZ5Ly/JWc8I+fjuVYvvDocMb28ka4zvP/OJbyrcIC6cKPNvKFWQrzm24w8VAfmvJ1AZrzJK7q8mj51vKD9nzy3R+o5bIKKvIhjKTsBZIk88oEqvCgRqzxasiW8r9b6vNV4NTzua0673HyXu8zUCLzrwn88pu2WPN39D70cMT08homOPBhpHLwU+p07kQdWPCbV6bwj5+M7povwPOLFsDwJQoa779jbOuBEOLwj52M8qkiqvKMw3TuNP7W8YsHuNwZAlbxFc8I8eYHKvDPdLTwzSru7OGt+PLrwODzt/sA868uDOo1TILyBrYI8xAHzvOu3GLn9ps+7AFCeO0tjOTxRwL08TfgcvRr1+7yhEYs8Yny3u3zcXbr+YRi80JwpuvcEFLyyJic8N/5wOn/KYzv84J87NKPdOzi5ubwCg9s8V7C0PIRNzTzhbA489oObvNsPijwjSYo8cV6WPOIe07sfM648c+p1u/Mz7zy/4K+8Pr0bPVQHZju1KJg7TOQxPIKHHT3qSgu9334IvFOa2LvzlZU8j4ZdPPbwKLx/yuM8vDfhu/oGhTw0o128alq0OyENSTyz2Ou7ct8OvRXUODw3TKw80OHgPBkvzDwjjkG64P+Autblwjzrwn88be+XvKT/kLoU+p08Z516vE/SN70sJwe8qY3huxjCvjuwTIy7lLCku/aDmzw9lcW7J6SdPNghhDyPhl26g5uIvCwnh7wC0RY8KBGrPPsR7Lsm1em7T3mVvPaDm7v9TS28GH0HvPrymTzMcmI81LIFvJ6tc7orExw76A7KPHu9Cz2/JWe8/86lO1dXkjuP1Bi74sUwvGtunzz0W0W7VuoEPd9+iLxA8Fg86uhkvDh0gjxeUnC8Ez9VPIgVbjx27Oa8JWjcux8zrjx0YIe44P+AO43x+Tx3To08XzWPOMmE3Ls28wm9qMexvBCfCj2bIZQ7pLHVPOSfSzwmN5A8LrNmO7qrAbzNQZa8OM2kvHyDuzoSeaW8+LbYuxA9ZDxzOLE74+2GO98wTTxMi4+72pn4uxboo7yd50O8Bpk3vGsMebsoJZa8u3ExPM1MfTwJQgY9YsHuvFYvPDyi1zq7smtePPryGbmvfVi87ZGzPIsX3zwIwY07rrcou6ERCz1jLvw7r33YOzG117thtge8RAY1PMndfrzRCTe7d6evvFnYiruMhOy7wE29O/EUnTy2PAM8dLmpvK/Wejz0AqO7yd1+uQmHvbtJJ/g8EapxPF3l4rsP0NY8sn/JuxhpHL3PiL68zsIOvel71ztOvky3v4eNvPddtrzt6tW8bnCQvLmXFr2zOpI6CHPSvB7GoLxyfei8JpAyPKOJ/zvaQNa7K7H1O+DrFby3R+q8tSgYPMgXz7wus+a8K7F1O5nR57uXnqq7JSOlPOEKaDw7Yog82Ga7PCeknTuryaK8cKNNPKJ+GLvM1Ai9N2CXPGLV2bvntSc8n3ynu55oPLy5lxY8mdFnO15S8LySdGM8oOk0vNi/3TzNQZY7c5HTO/aXhjxW6gQ6UPoNPEsegjwjNR+7naKMvIUcgbxVwq47FkHGO+2lnjxqn+s8eM+Fuz2pMLylbB684P+AvDzjgDtqWrQ81dHXPLUUrTw6Th275jQvvaqhzLzHqsG7RS4LO5h4xbspkqO8pR7jvAngX7wp19q8FA4JPFsLyDxSe4a6u7ZovFh2ZLxZ4/E76qMtOwwwDLsIGjC8PNr8vGOkDTkFLKq7R/Q6vLoEpDwD8Og8YQ8qPDBIyrz3ou27DOLQOyIhtLy68Li8mHjFPEr2KzxuXCU9KBErPcG6yjuEYbg8hbrau7UomDt7b1C7pjLOvKpIqrxZ4/G6C2HYu5x6NjygQte71owgPYjQNjyD9Kq8eLuavKgg1DthaMw83gh3vDu7qrxjN4C8YsFuPNJ2xLxRBfW6ilEvvFqeurzpe9c8TytavPAxfjz3XTY8B1QAPSIhNLx7vQs8POMAO3CjzTxHTd28M90tPL3yqby+uNm8cn3oPIsXXzuqXBU9V5xJuzkmx7xhVOE7zUGWPGaSk7xYxJ88MslCu3LLI7yeaLw64VgjvevLg7shDUm89oObO2IjFTxMKem7BoXMuR337LwVGXC7szoSPJ7BXrxRZxs9fNxdPKSx1TwP0NY8+7hJPAW/nLxDQIW7EtJHvMhwcbzbwc48ptmrPKPrJbyjif+8EJ8KPSS2F7uewd662L9dvCENSbyCczI8VAfmPM1VAb0dWZM6fywKvdghBD0A7nc8d06NvHPqdbtOvkw7yGUKvLu2aDwQ+Ky82lTBPAfyWTy7tui8SGFIOm61R7wFv5y8+7hJOrVtzzsmkLI8eqkgPBnWKTxHrwM8KBGrPKhuj7wlfMc73gj3O3JyAT2+GgC9IzUfu7G5Gbyd+y48lc/2u1RVITw+AtO8FkHGvFSuQ7x3To28Aiq5PLS7Crw4zaS8ieShvEUui7pfIaQ7O23vvG/dHTyIFW67+7jJvE09VLzaVMG8iBVuPAPlATuox7E8jVOgu+RalDzi2Rs9FGcrvE34nLuDm4i8YsHuu4RNzbx7vYs87CQmOhtrDbxRrFI88JMkvUDcbbulxUC9f3FBvBqc2TzBYai8gJmXOysTnLuAS9w8TIsPPCFmazz53q68ZjBtPCVoXLxhDyq82CEEPL1LzLtuXCU7NvMJPF35TTstAaI74QrovNOKL7xUVSG6yqwyPIUcATwFGL873NW5O2uzVrxd5WK8Xg05PERLbDzvOoK862ldvFx41bouWsQ8homOPENAhbxkVtK7bCDkvLgWHrztpZ68blyluxK+3DyzOhI8yxnAPHru17z2lwa9bQMDu9XR17wq/7C8mHjFur2ZBzxm67U8HZ5KPH2XpjrMhk08NKPdOgscITwGmbe8UVOwOQW/HLw7Ygg8vl83PARSD7qUVwK9vZmHvA6xhDyYHyM8kWB4PCENyTvJK7o78igIu1V0c7zBuso5f4WsvIk9RLw8KLg5pYAJvQ4Kp7vW5cI8UD9FOzwoODzw7Ma7iYL7uh337Lya+T08hwoHvYA3cbzBukq8ZjBtvMRjGbsJLpu8y8Adu/sR7DxbH7M8nlRRO3SlPruUsCQ9HeyFvPx+ebtkVlI8vZmHvEKFPLz+dYO8ilGvOil+OLyj66W8WAnXvLPs1rvgndq7ap/rOrJr3jxuXCW94bHFvHaTxDt/ymO6uquBPBvEL7q2lSU70EOHvCFbhDxnnfo8RXPCu7cCs7vbaCw8v3MivIGkfjv4caE8Kv8wPrmDqzzMcmI8kOgDPSQPOjzNQRa8mTOOvGCinDt3To27Le02PdmOkTywOKE4OBLcuzbzCbtAl7Y7pWwePGJ8N73g/4C8nefDvEG/jLz/J8i7+gYFveJ3dbroDsq8ptmrutqZ+LtA8Ni8gEvcu2Mu/DzPL5w8Sk/OvAwwDLzF0CY8SSd4O6gg1LyatAa8JjeQPFIZYLxrDPk8+TdRvBnqlDwd7AW99pcGvEBJe7wWjwG85jSvPDT8f7xBvwy7ixdfOoW62rt4uxq8AWQJvEaHrTz/4pA7SvYrPO2Rs7xyfeg8ItN4vAXTB7yHCgc8ED1kvGPpxDzFvDs7lR2yPMGm37zfHGI896LtvBkvzLsaQ7c83JACvGw0T7zWKnq8uvC4O7qrgbt/LAq9qdscvF60Fj1g51M8V1cSPKtwgDxLHgK9aYCZO/OVFbyxEry8xAHzvA0787yXshW6ss0EvPgPe7x5KCi8gV/HvDSj3bxDmSe8FdQ4vDQFBDxbxhA9h0++vLOTNDzg65U7RK0SuzReJr3DlGU9tSgYPQPlAbtxEFu868L/O0cIprkOCqc8YOfTu1pFmLtVdHO7uCqJvF5S8LtOvky8/DnCupZFiDwD8Gi7WTGtu4QIljzDlOU7aZSEPC6zZjzffoi8KxOcO0pPTrzrwn+8Zuu1vHM4MTzxFB28tEX5vMhw8Tt2k8Q7XHhVPNegC7ucjiE8E+YyPGOQIjx1Ekw7RpuYvJf3zLzg/wA8eTyTu5CaSDw82vw7gQYlvAt1w7uCGpA8lAnHOAA8s7yAS9y8mTOOvEKFvLyZ0We83f0PPGyCCj2mi3C7E40QvAkuG71VdHM8q7W3vDT8/7zCzrW7KxMcPeUgRLtHCKa8g/SqvGpaNL6wkcM8e72LPC+CmjrqVXK7J6SdO6PrJTztpR47o4l/PDbzCTxG4M883gh3vMndfrzSu3s7OeEPPCl+OLzt/sC6eG3fvI5nCz0GQJU8Az6kPMhw8brraV084h7TOrRF+bvMLau7uxgPut/XKj1CGK+8b3t3vJGuszsK9Mo8XsiBO6Iccjy27kc7lwu4OQkuGzxlar28uqsBuhr1+zv2gxs8AWSJPCFm6ztifDe87erVPBwxPTwNnRk8mdHnvHZ/2TzRTu68DM7lOzdMLL0NT146+gYFPFQH5jy4Kok7A5fGvARd9jpjpA07tKcfvCLIkbzFvLu85/reu7LNhLxve3e8xXeEvGMufLo0XiY9OHQCvOe1pzuFulo7cDZAPP5hGDzhscU49peGPKbtFrxaRRi9GYjuPOIe0zp1zRQ8z4i+vCl+OD0T5rI70h0iPGG2Bzwdnko6RpuYvHUmNzxQ5qI7eZU1vP/OpTxqRsm7bQMDvVD6Dbzer1Q6qducOxEMmLyQ82o5CS6bvDhrfryZMw65hRP9u2sMebxhaEw8wnUTPfEUnTzhWKO7EarxPLyFHD15lbU8LICpvCk5gTtQ5qI8a26fO+/E8LtM0Ea7hGG4vHdODb07Ygi7Earxu0RLbDwBvSu85kgaORsd0roJzHS6eTyTPC0Bor3m2wy95uZzPMdRHz3D4iC87aWePL8lZ7w8gVo956E8PKnbHDwoEau8IbQmuxh9B71QP8W7SeLAO1pFGDkq/7C8exYuuywnh7zFvLs8SwqXvLmXlryRrrO7eM8FvVCYZztdRwk9aTLevH+FLD0+vZs8nq1zvDT8/7utj1K8jIRsPBboo7zLwJ28F/yOOjza/Lz2g5u8STtjO/SpAL0UDok8GkO3PDbzCTyZjLC8s9jrO0hhSLxi1dm8Kv+wPA93tLvRCbe8IKC7PKw2sDufI4W84P+APMQBc7uJKVm8KBGrPBvErzxRZ5u8NPx/vL+HDb3Su3s8cnIBvVvGED0mNxA8LICpOhh9h7wvIHS8FXuWPCwnh7ntpZ68vZmHPDOPcrzVZEo73gj3uOEK6Dqhai29sKWuvCuxdTwMzmW8mw0pvIP0qrzwp4+8t0dqvOpKCzz5hYw8huKwO+65iTv+YZg8gocdvUNABbw3YBc98QCyPA+Ln7xSewa9ZtdKPGYw7TxRZ5s81uXCOlXWmTsZiO689KB8vM7Cjr0FGD88/OAfO/bcvbzMLas7FA6JPGYwbbxVdPM6kOiDvCk5gTw3TCy8LIApPIP0Kr3qVXI8d1n0uwetIrwlaNw8szoSPO8mlzwpfjg7Y6QNPChqzbxW6gQ9q7U3PE4MCLypjeE89KkAu1Oa2Dz/zqW8O2KIuoGtgjxbH7O8VuoEOwcGxTxw8Qi8rCLFO8W8uzxz6vU7/hNdPKIc8jtMiw87O23vvIPgvzud+y67ptkrvbA4oTwubq+8IzUfuot5BT3kWhQ8Az6kPCwnhzxqWjS8KTkBvVsfszwaVyK7RuDPO4nkobza+567pLFVvIu+PD2LeYU8ieQhPTsUTTyyzYS8dc2UvP51g7zZ08i8dP7guqcBAr1qRkm7SHWzvOXHITxFzOQ6jT81PdzVuTxOqmG8zy+cO6NEyLoslJQ83ekkPY5nCzyqSCq98sbhPDT8/zsU+p08R6+DvAcGRbyXnqq7gDdxu+5X47z6BoU8R/Q6u28iVTtsNM+6D4sfvNx8l7x83N27MVy1PGOkDT3FFV48DrGEPDbfnrunRjm83emkvEf0urz84B+9bg7qvFA/RTx5lbU8tEV5O+2Rs7xdoKu8D4ufPODrFb0Qn4o8YtVZvE5lqrtMKem8WB3CPI0/tbsM4tA7SvarPGqfa7w+Fr48SwoXPLJ/Sbzry4O851wFPXkoKLxKnYk7ttpcPDtiiLz23L27DfY7vCgRK7yqSCq7nlRRvAsItrv2g5s9QNztO8QB87xfISS7pWyePIwrSjwOY0k8S2M5PJIbwbnFvLs7HUUoPW5cpbwanNm8/H75vKzdDb2eVFE8A+WBOudcBbtasqW7VK5DPP+AajvFd4Q8cKNNPFSuwzwlyoK7BkAVOLcCMzyvJLa7FPodPM6uo7ySdGM8h/abOWaSk7zhbI68vN6+u08r2jspOQE8awx5vO2RszyGO1O8aTLeO5q0BjxVdPO8ryS2uyVo3LwN9rs7i748vDjNpLwxA5O7\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 12,\n \"total_tokens\": 12\n }\n}\n" + headers: + CF-RAY: + - 936f9362dc5e7e01-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:57 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=8J2Cz0gyk5BpRSYbzjWETqMiyphlW8TAe7802MlHMe0-1745770077-1.0.1.1-qbyKIgJQJDS2wWDKC1x0RrxzJk5mcE4wDunq25j.sNSe_EMvVEIQTJR6t4Jmrknve3lSxXTsW4VL_3EYYk6ehiq6yIQDVzVkqNfU_xlwris; + path=/; expires=Sun, 27-Apr-25 16:37:57 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=r11uIiVDOWdkeX_FkCjMMnH.Z9zcvp4ptrKi3luIa9s-1745770077165-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '170' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-56c4dc8986-59htf + x-envoy-upstream-service-time: + - '96' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999987' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_4953e9919d74fabb02f89c587d38ea30 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:52.864741+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "query"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:56.879642+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '454' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:57 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '52' + status: + code: 200 + message: OK +- request: + body: '{"input": ["test CSV"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '87' + content-type: + - application/json + cookie: + - __cf_bm=8J2Cz0gyk5BpRSYbzjWETqMiyphlW8TAe7802MlHMe0-1745770077-1.0.1.1-qbyKIgJQJDS2wWDKC1x0RrxzJk5mcE4wDunq25j.sNSe_EMvVEIQTJR6t4Jmrknve3lSxXTsW4VL_3EYYk6ehiq6yIQDVzVkqNfU_xlwris; + _cfuvid=r11uIiVDOWdkeX_FkCjMMnH.Z9zcvp4ptrKi3luIa9s-1745770077165-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"ke+Wuwb2F7zPCQm8e/yYvJBYa7zH5Zk8pDw1vOOS67pn9TK8wpnavMRJrjtwGaI8M8irvDdkFzz5SSW7f7GsPEKNxjwBkTC8/wP5PPoclbxAfpK8USXRPPMH2rxjQJ87znLdOYQgCDYkUz08g0OkvGNjO7tq8H676PdSPMt3kbziYBu8m0XWvL+3trz3snm7YlQHvOcBR7wkbGU863qWvDFFaDxS+MC50siQuVECNTp6EIG8KMKYu6uXKDzDUyK8IJ6puwUADDyVpKo8RUJaO6Q8Nbx1oaW8RQYWPHv8mDyaT0o6X66nvGuRnrx2dJW88wdavPddjbzWfaS8j4V7PEuOGbxIz5G7A1osvLbZf7upCvG6e/yYOSJEiTyTuJI8C6G3vMNTIjwHyQc9rjMUu4UvvLwzDuS8ECk7PNExZbxsw248V2ecvBPFJrzcQWy8zJC5O7hNjzzvFoK7vv3uPN3YF7zvUka7ekLRO/z+uDzGK1I8xk7uPNxBbLw2qk+7cDLKO1AvRT0CoGQ7a83ivBaOorzxG8K8yLiJvAqIj7wOeee8gZ1EPIxnkzyqoZy8zlk1PHAySryPhfs7Y4ZXPCM6FbpS35i8JDAhvICOEDs/iIa7CufvvCjCmLyKnpe8xu+NPGNAHzxBdB49uyU/vfPBITt+uyA8mhMGvPPLFbxztY07cBmiuyJnJT3foRM8DxqHPEjFHTsA12i81sPcPLpSz7yDTRg8lOrivAGRsLw4WqM8q3QMPbXAV7tkcm+8KbgkvHkzHTyYhs48KAhRO7Wnrzxjn/+4dtN1PDE7dLxnMfc8kgi/OlkwmLvCmVo8gbZsPHPx0TvBiqa7GYluvHsftTuub9g8+I9dPJHvlrziv3s7V4DEPHlv4TxdCMg7zlm1vODT47yFUtg7Jlj9PCcr7by5Oac7NqrPu/+9QDzxNGq7xGLWO/LusTthdyO8fPIkvcf+wTwtqfw8zYbFPIqelzusahg8k/RWusflmbz+0ag8wWcKvOQpFz0lP1U8V2ecO33oMDtCaiq/avD+vBPoQrw04dO7ex81PQGq2Dyw/I88OWlXuZ4E3rymKE07laSqvMqBhTw0+nu7neu1u9sPHDr3XY28OYzzPLz4LrzqpyY81NdEPXlvYbzAlJo8YZo/PCuBoDtlLLc75i7XOveZUbwjmfW8OiOfPFWeILxKsbW89NrJuym4pDsnK228pRkZPazJeLvz5D28YMfPOzlQLzwMdCc9LmNEvBmJ7rxuLYo7o2nFPK09iLxH2QU8INrturWEE7zzyxW8qLUEPbTUP7z5Jom8/fTEO44wj7t9C807zWMpO6JzOTz2o8W85+iePEunQTp/say8Ia1du/ACGrr3mdG7IXGZvMGKJrzQO1m8K8fYt+DT4zukeHm888EhOy587LkDN5C7qt1gvH0B2TyUiwI9VnGQPLbZf7zGTu4716/0u3dqITzo3iq71PBsvG1aGjp4YK08a4equ8FnCr0fyzm75i5XvNev9DvXczA8XeWrPNhpvDz083E67HAiPLs+5zvKgYW86RD7OguhNzxy4h29INptO17bN7y1p685KpUIPF0ISLoGVfg7c/FRvAcPwDv+rgw95lHzvPA+XrtXo+A6vdUSvHh5Vby8+C68ODcHvYzG8zv10NU6i61LOxmJbrzm8hI9qNiguW5QJj3lHyO8gWEAvHK/AT1+u6C7ya6VvLTt57tLwGm75EyzO/AMDjqDZsA8uInTvD3iJjt+9+S6MtwTPX0kdbzVhxg8xGJWPCb5HLzyEU47OS2TvO8Wgrvfurs7px5ZO4COEL0uQKg7IkSJt7dwqzvinN+8/6QYvLadu7t9xZS6apsSvOxworx/mIQ8RG/qvC8djLxDPRq945LrvLWEEz1jn387wbx2Oo5s0zuhfa06b4J2OoRczDx2lzG7trbjvNLIED30ngW8Jlh9PLiJ0zx5kn28OHNLPFW3yLxz2Cm8VKgUPBiT4rmQ+Qo8BFC4PH0BWbxHLnI6yuBlPHaXsTxZMJg4UQI1PPE06ryMo9c87aLyu33PiLvreha816/0uy1KnLsvHQw9zKnhvJk2ojxW0PC5CquruNtL4DzaVdQ8o0apuhxIdrv1lJE8LZDUvH67ILwXerq8ulJPPDPrxzyQWOs7ImclvWrw/rxTsgg7fQtNO3eNPTxw9gU8WiYkvNev9DurdIw8I5n1O9Z9JLxNcL07bH02vICEnLxV2mQ87JO+Ohe2/jsS8rY7+I/du4q3P7wR/Co7VORYPOrKwjo1mxs9Ry7yPG9fWjyaE4a8C8TTPBIV0ztH2YU7jMZzPEjFnTy97rq8Sd5FucmuFTxjQB89kuWiPOnt3rwROO87jIovvAm1nzzn6B687aJyO3JBfjxrhyq8yse9uzojnzxQL0U8WTAYPZEr2zuhZIW7WY/4O0fyrTxOieW61NfEvH0BWTxkNis8rXlMvK8pILwWp0q8pRmZvA8ahzw0vre7q5eouuB0gzqLlKM66uPqPOGmUzwtqXw7wWcKvBPFprwtVJA87UOSPLIB0Lo6AIO8sfIbvaFkhbsfy7k6076cO1TBPDrDbMo8EwtfvGUTDzp6Ze07C36bPCrRTDztQ5K7/q6MPCFxmTytecw7vN+Gu/H4pTtA3fK8nciZPN8AdLuHNPy6jGeTvHV+iTyMZxO8ZzF3PPxEcbp+u6C8k7iSPD/OPrxOQy08XrgbvDH/LzwdvIU7YZo/O3zypLzQRU29tnqfvCqusDuDZsA8KouUPEUGljwjXbE8d6ZluwRQOLwvHYy6rGoYvWfcCro2bgu8yLiJPItxh7sECgA9vqiCuqbsiDu7SFs89sZhvHeDybx6Bo27DmA/u97nSzw+8dq8b4L2PI9i3zxgpLM8mXLmu70qfzxc7x88+++EPLP3WzyvZWS8ZB0DPQqSgzzq42o8puwIvCCeqTzz/WU8LicAPCb5HDxAfpI7oIchPOysZrvrnbI72FAUvHPYqbjc+7O7Z9wKPHWhpTwUogq8fPIkvVHpjLsH7KM7VtDwuzv2DrxrkZ48zXzRO2VP07zaVVQ8v7e2u8kN9ryghyG8nPWpvGzDbjxA3fK70+E4vKmrkLwK5++8lb3SvKNGKb0Kq6u7IkSJvPA+3rwS2Q69Ns1rPPhTGTy01L83vqiCPKbsiLu01L+8USVRPAFuFL1V2uS8gbZsOxPoQjvU8Oy8o4xhPJs74jpdCMg8vN+GPG88Pjs79g68y5otvJMXc7yNOgO9ya6VO8uzVbxEM6Y8rlawu/+kmLxyQX67S8DpOy1tOL2zuxc8LF6EvC2pfDzJDfa7PeImvIG27DqOUyu5LF6EPGxkjjyXqWq8YIEXO9Md/bve50s8RjhmuynbwDq+/W48uyW/u8DQXrsECoC7i9BnvINNGDwDN5A8OxmrPMt3ETwcSPY4cFXmvAco6LiGDKA6DY1PPHs43bgiZyW9pgWxu5ESs7xIxR28Nm6LPMkN9jxMYYm7qauQu8xtnbzKpCE8jlMrPHoQAbxEMya8EwvfvEJHjrsOPSO8gMrUu4F6KD1n3Io8HfjJux8H/rxLhKW6wYqmvKRf0bz5P7G8pTJBPGJUhzzaGZA8eZL9PGVPU7y97ro836ETPHK/gTo2kSe70TFlPLk5p7yKtz87kuWiu8qBhTtrkR682FAUPb7kxjtlE4+8A5ZwvKnnVLuJB+w7emVtuw8zL7w8S3u7iBFgO9Ex5bvR9aC7eZJ9vN+6Ozk8S/s8mGOyvOJgmzvYjFg7CufvPDhao7xvI5a6oWQFvSfWADyU6uK8/6SYO01XlbztTYa8Su35PF7btztDnHo87xYCvFzvnzuWmja8K8fYPDzsmruVvVI7A3PUO2jSFrzNn228nciZvBhwRrse7lU7oJEVvB/BxTy22f+6AHgIPGrwfry91ZK7ER9HPIOJ3LxFBhY9mXLmOyZY/TyBeqg8jMbzOy1tuLwp20C8lrPeu9a56Lwy9bs73wD0PEKDUrzF+QG9rMn4PPwhVTu6Fgs7b4L2u8q9STzn6B48aeFKPJD5irzFNUY7icEzvX+xLD2MZxO8C6G3vBkqjjwkU728puwIvPey+TzWuWi86dQ2PLdwq7vK4GW8n7QxPMNTorwxIkw7yse9uEcVSrvFNcY8VtBwvLdXgzrHCDa8JGxluzPrR7zieUM8emXtuvzlED1LhKW8G/MJvBHjgjw5jPM7UhtdPHmS/Tn3svm8jlMrvI5s07xz2Km8uTmnPJZ3Grw9+867nQ7Su3V+Cby6Us87yerZvFAWnbqjLYE8f7Gsu3/UyLwA1+g7P+fmPAUjKDtZMBg80TFlvMqBhTyo8cg8E+jCvIyAu7oUu7I8oy0BvHMU7rySCL88SpiNvBmJ7rvFNcY8fPKkvD2/iruMii+9Ns3ru0rt+Tuy6Ke8sPyPvDLck7rfAPQ8aeFKu4q3PzuSCL+8WVO0utTwbLzp7V67GxYmvMnRsbym7Ag8UemMvH3PCDyEXEw6Z9wKvUjPkbzzB1o6NqrPOwJBBLzd8T+8Dj2jO/WUkbxbHLA66PdSPNkjhDz3XY28StTRO82GxTvl/AY8myI6POrAzrzLs1W89oqdvGUTDzzLs1W8RxVKvLoWizypzqw7ehCBO6Uywby/ng69UPOAO1AvRbzabny736GTO6FkhTyYY7I8ekLROwJBBLx3g8k83diXO4R1dDzvL6q8xTXGu8yQOTtIz5E8nes1PBAGn7oOeee8E8+au50OUjz99MQ8qLUEPYyAuzxxDy48fc+IvDZui7wLul+4P8RKvHK/Abw11987B+yjvEJRAro5aVc8T3/xOlElUTxIzxG77xYCPPlJpbzT4Ti6Nm4Lvd8AdLzR9SC8+++EvI5s07tNV5U7SOg5Ozd9vzxztQ08KMKYPCZY/bsowpg8Hd+hvINDJLwQEJM7frugvMxtHbwm+Zy8v7c2PPAMDrxnMXe8gMDgvGJKkzqMgDu7k/RWPCjlNDx96LC8jIovu2DHTzwmNWE7WF0ou6G5cbz10FU8ekzFvPWtuTr5PzE7K73ku4+FezvNfNG7ZU9TvJ3rtbuBYYA8C8RTPgJkIDwtVBC8hFzMPBmJ7jtMnU28hzR8O3JBfjzF+YG8UC/FPJzcgTxbNdi7H4WBPAFulLuaE4Y7gleMPCFxGb2c3AG9l6lqvNziC71DPZo8eHlVO4yKr7o9+867g2bAO6G58bofy7m83B5QvDL1uzysjTQ8eTOdvO2icjuJy6c7ZRMPu4utS7wVmJa7jjAPu8f+wbs0+vs8rINAPJ6+pTxMerG8zUANvKt0jLz6HBW8Ut+YPCRsZTwR4wK79a05u25GMjyMZ5O8YOD3u02TWTwMl0M8TollPItxB7z3XQ09o0YpvNk8rLtG/KE6pktpt3K/gTzvUka8GUO2PIUWFL3XWog8U9UkvRsvzjt3jb26j0m3vL3uursWyma81c1QvDhaIzyzGvi8eVY5vHzyJD1uac48cQW6PL4H4zzMkLm8YkqTu6HD5TkwT1y8c9ipvO1NBr00+nu6jKPXu6Qjjbx3g0k5ndKNvBFC47xPIBG8byMWuirRzLursNA8VtDwvLBbcDzc+7M8dMTBOq9MPL21py89IorBO4q3P7sEaeC86qemu7/aUjqI7sM7gMrUuqHD5bytYKS77X/WvHdHhbvuXLo7JDAhO2RZRzuVgQ68opZVvMJ2vjwzDmS8o2lFPHWhpbzeCmg720tgPLWEkzwTC9+8eimpvH/USDz5SSW8dtP1vL7BKjyDTRi8zYbFOy82tDs4c8u6GDSCO5MX8zzkKRc8/8c0PHlWObyx8hs8umt3vAQKgLvinN+7WWzcODATGLvDj+Y893Y1PIkH7LzqhAq9Kup0vK9MvLzI9M28d2qhPHEFOj08S3u8XcIPvcMwBr3cHlA8ehABvHWhJb03oFu8sugnPQcPwLyTF3O7FZiWvHEFOr4+8do84YM3PJosLrpMejE8Xf7TPGyg0jyQ+Qq7myK6PK09iDsQZf88QoPSOhhwxrxbNVg87U2GO25QprtPf3G8CbWfuzT6+zyZWb48DVELPcypYbzscKI7w2xKPK15TLzhag88/urQu0fZBT2T2y47/ducvMrg5buVgQ46TioFPONvzzuJB+y75wFHvNpufLoFPNC83ufLu4yKL7zcQWw8EGV/O+Mzizzouw489J6FO3oGDTyMii872lVUvE6JZTy+qIK8UC/FPAM3EL2XkEI8/q4MvFscMDwS2Q68CqurvNpufLyU6mI8jIC7vHPYKbyrl6i8i3EHO5+bCbxZbNy8zYZFunJB/rp1us08vSp/vCFxmTx5kn28tcpLOj61Fjxq8H68lK6ePHO1jbwkMCG9DVGLPInkT7t7H7U7a4cqvL7BKj1S3xi8JFM9uNAiMTzS6yw8U+5MvNevdDwROO86Kq6wvBsWpjxe2ze86uPqvD/nZrwHyYc8WxwwO5SunrtCUQK8rwYEvG8jljoo5TQ8s/fbu5tFVrxIz5E8l0oKPQ8aBz10qxm7k7iSPL3VEj2YfFo8zMz9vGF3Izwgt1G5UPOAPPWtOTw/xEo8ZHLvvFIbXbzHId66CufvO7EVuDyiWhG8iNWbuxS7srvglx88Q3neOd7Er73EP7q8+I9dPLsCIz1uac46OgADPdmCZLyPP0M9Z/UyPNhGoDw9v4q8TZPZvHdHhbz3mVG78tWJOrprdzyiltW8jIovvF3Cj7vZPKw8wYAyvNeWzLuUrh48lb3SvOjeqrsuY0Q8zywlve8WAj09Hus8sgvEu+yTPrz1rbm7opZVPB34ybzkZdu8eTOdu4UWFL0mNeG88AIaPDOvg7xT1SQ9uwyXO5lZvjxnMfe8DY3POviPXbzFHJ68AHgIPXoGjbsfqJ28dtP1O62caDt1oSW7dKuZO6QjDTwJ8WM8ETjvPL6ogjzo3iq71JuAvDPrR73jb0+82m78vGjrPj3T4Tg8DJfDPCJnpbxJogG9g02YPKCHobsCh7y8XCtkPLk5p7yrsNA8jV0fvNWHGLw5jPO8/8c0vA49ozy3rG+8QN1yux8H/rwFI6i80x19uk8gETxXo2A8+jU9vOkQe7s0pY87/OUQvWQdgzztTQY9M8irPEfZhbydJ/q8vdWSPJhAljzbDxy7WgOIPD0eazwS8ja8zywluwJkoL2n4pQ7w1OiuinbwLwz60c7kRIzPNZ9pLs/5+Y7+WJNvAx0pztDPRq8/tEoPGUJm7xm/ya8qQrxu0jFnby7AiM8c7WNPEjoOTvzyxU8OzLTPKp+ALxPf3E8bH22PMMwBrsDc9Q85+ieu/sSIT3EP7q7/fREvFompDxaAwi98tUJu17btzx3pmW8HAyyvPddjTsHD0A8tYQTPX67ILxxDy48R/ItvIG2bDw+2LK8LkCovAqIDzzqykK8sS7guxmJbjujacU75wu7PORMszs04dM7ETjvvFTk2DtlE4+8OJbnO/7qUDplE488k/TWO1zvHz0bL848cuKdPLLopzx9JHW8/OUQO0UQCrv925y8GSoOOyrq9LzudWK7yQ12vGb/pjzfAHQ8mmhyPInLJzwRQuO84JefPMyp4boge408Q3lePI9JNzzhjSu9pijNPBkqDjy3rG88V0QAPBPPGrzNY6m8Q2C2u/aKnbygkRU8PeImPJESM7vBgLI6KAhRPAUADLxuUCY5wJSaPG5Gsjyc9Sm75CmXOzAJJDsp20C8QlECvFAWnbzfoRO9bMPuvH3oMLwlAxE9IzqVPPliTbw9v4q8VZ6gPPaKnbxVt8g8NpGnvIQgCLwNpne8V4q4PLes7zuwH6w8ZxjPO/6ujDsn1gA9Fo4iPFs1WLtLyl28ssULPUxhCToS2Q47YMfPO6UZGbsXerq8yse9uziWZ7wS2Y65wJQauiKKQbzVzdA90EVNPBPowrufm4k6FmsGPDd9vztcErw81LQoPGuRnrvFHJ68v9rSPMQ/urs0pQ+9AHgIvARp4Dty4h080fUgPPACGjprqsa7xfmBPKUywTs3ZBe8CufvPMyp4TzjM4u5ya4VukgL1jyyAVC8q3SMO3lv4bz/A3k8nuHBuwnYO7yKt7+8qcQ4OzH/r7vWoEC83qsHvZP0VjuFUti7XvTfOyqVCD2yAdC8NPr7uyDabbwnzIy74NPjO4FhALzc4os8\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 2,\n \"total_tokens\": 2\n }\n}\n" + headers: + CF-RAY: + - 936f93666e9d7e01-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:57 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '59' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-787f678775-4x2zc + x-envoy-upstream-service-time: + - '39' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_9bb18ff3e7da9b6d8406f6361cbf5497 + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/tools/cassettes/test_search_tools/test_directory_search_tool.yaml b/tests/tools/cassettes/test_search_tools/test_directory_search_tool.yaml new file mode 100644 index 000000000..6f3fd2d58 --- /dev/null +++ b/tests/tools/cassettes/test_search_tools/test_directory_search_tool.yaml @@ -0,0 +1,544 @@ +interactions: +- request: + body: '{"input": ["This is a test file for directory search"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '119' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"CtRHuxbenjwIql+8vF1VvHnqhrwbOSI9wKNkuL+xpbxWmXG8Mk/qvBiBpjt7jQ48ujNtOzbNojzazIa7rbD2OytRXzwaq468YrGJPO2O/rq00bY5TQagu5aaLrsP4JO8nTSOu5VwRrySuEo8jFYUvSR2CTveEpY60Qi/PFjmDr2m1aC70aQTvfP+9Tp6eBo8mmeePM1ehLwbssE85rMou1pmYbtTi4u8SrJPvHnqBr0AHkG8fPjsu5d3eTzklwG9EnXaOtEIvzyfwqE7xCEdPF6s8Lxs4K87GZaau1EpejzGS4U7S0DjPFdKurrKCrQ7BGTQvD/nVLxYX668Rx0JPHed6Ts6E7K8AsHIPGS4vDvUXI86+jSqOhxOljzwaa87Z4Usu06UMzxIqxw87j9HOk0GoLxfXbm7GZaaPHaI9bsGjrg8tV9KvAuaBLyBdiU8pJbEO5uRBroi0wE8Evw6PaswJL0FFRm83MV4u9Orxjx63EU8SRb7u8DbjbxCwgU7qreEPBxOljxrtse7Kl8gvdGkk7zDcNS7ItOBO5Npk7hpBX+88AUEvELChTwHHMy8pPpvu8CjZLv12aa8C9v6O/d8Lrzv8A+9pjnMOzKHE7oT7vk5X+SZvOLRxLxT77Y7/48tPIxrCD2SP6s8lpquvNIdszu+AN07ENLSvJ4mTb2uYb+7MmTevHWBQj3AP7k8YZyVPBOtgzxIiOe8xZq8PLTRNrwmGRE8T6knvfxzhrwqXyA8cMITPcT+Z7svl268HxuGOzqaEjzddsE7vnl8vEnAEDxBily8IZvYOzPdfbt/sGg8E+75u7M14jueEdk7Z+lXPNzFeLyxGbu8vF1VvDe/Ybq+efw8C9t6u+DKEb0r7TM9oXodPYxWlDwCJXQ8KCBEvNzFeLw/Coq8DrarPLvkNb23JQc8gqCNvMMMqTvJ9T8836CpPGVpBTsaiNk7qP8IvMuDUzs1HNo8oWWpPGfpV7x/sOi8r3YzvFuQSbzx90K8gn1YvLeepjx4Ob48eDk+PKFlKTwJWyi/3f2huyeE7zqCBLm8f9OdPOuqAD1pBf88K4mIPBOKTrz6mNU6OHAqvL55fDxWQwe8NqptOxfQXbyv71K8a8u7Oix7x7w+vWw8s1iXPK8SCLu9I5I8nop4O3X64buqG7C7KJljOzqvhjw5hZ68syDuPHX64Tvv2xu9pcCsuzFdq7txGP47aJogPYgQBTyH2Fu8cCY/OwUVmTzuKtM8Ck3nvHuigrv12aY6RN4sPLEZO7vddsG7kcYLPboz7btDUJk8iiwsvOY6iTyzvMI7B7igPGbUY7xcLB47USn6u1ntwTy8ckm8X125PHDCkzyXNgO8StWEPC2Quzv+AZq8jV3HvMn1vztKss+8hbw0PCenpDo0Kpu8RVfMPH0N4bq7SGE7cmUbPPU90jzQFgA9KdEMPcsfqDpsZ5C8hCDgO/M2nzygUDW54JLovGmvlLyPhy89r1P+OkCDqbwHlWu7fpv0uxQY4jsEh4U8//NYu8JG7Lx0j4O7lXDGO2RUEbvei7U6dqsqPHYP1jym1aC8plyBu0yNADwCJfS7We3BO+Tta7sBrNS7+R+2u7VfSjwFecQ8g7WBu0CYnbyNwfK8Of49vFZDh7qu/ZO8RGUNvSUEHTzchAK8SsdDPJpEabyAPvw8rxKIulJ2FzyCaGS7XCyePPy0fLvrDqw7EmDmu3uNjroJOHO7w3DUuy4JW7yijxE9h3SwvDxSjjwlBB030jKnPFC+mzxdM9E7+R+2vPhu7bzd2uw78ZOXuu4/x7uA/YU8YYchvOT7rLxIJDw8zwEMvYosrLu12Om8FJ/CPDsoJrwiFHg8plyBvBC93rt2q6o7uBfGvBudzbslaEi8JNo0PMJ+FTysWoy8hq7zu2MqKbz8XpI7P+dUvLdm/TwwSLe7WfsCvc3Crzs56ck7QIMpvP8rAj1/TL07HCthuyJMobxIiGc3ScCQO3g5PjpUGZ+7TjCIPIbmnDu7gIo8eDm+Osl8oDwzFac8NI5GPLeeprxO+F454JJovFUuEzxuHwy8X+SZu5xu0bv5CsI86UhvvFQZnzy4kGU77Y5+PGb3mDtf+Y07F1e+OwYqDb108y47mkTpvPiRorzkdMy8Z4UsPJNpkzuDtQE8USn6vECYnbzUXA88j4cvPCcL0DwznIe84lilO82f+rx4K/078AWEPB4GkjycCqY8QGB0ubaJsjx41RI8PcutPBs5ojzYoh66y4NTvLZ0PrzeJwo8w5MJO80m27mKLKw8j2T6uiM+4Dzp8oS7HGMKPXTzLrwwwVY856XnPHN6Dz2YoWG8ExEvPJAVwzxKTqQ7FDuXPNRcj7xEQtg8Kl8gvGj+S7yLQaA7RmzAPCg1uDyDtYE7PWeCvC0sEDtAg6k8GojZPIZKyDwIRjS8XVaGPNTAOjwPyx89Oq8GvZCxl7zibZk8jM8zvDqvBr1g60y8dquqOsuYxzw494o5cCa/PKlqZzx+vik8OelJO5385DwBl2C8pSRYu+2Ofrwse0c8/yuCPAV5xLuQsZe83MV4vFg8eTwJW6g7w5OJu2ghAbxQNzs8rv0TPLNYl7z4bm076w6sPNIdM7ohm9i81U5OOkx4DDxLQGM81mPCO8WvsLtgDoK7aRNAPd4SlrxQm2Y8Hs5ovOTt6zvc6K27hVgJOzDkizv8XpK8RN6su27n4rvL/HI7gMVcvK8SCDzeEhY9C5oEPU+Gcrwkt/+8N+KWvMA/OTzYBko8HE4WPW91djwawAI5IkyhvIu6P7zgtR29IhT4u+sOrDtO+F481FwPvPH3Qrwryv67iZ4YPKdOQDztsTO59T1SO0V6gbuqf1u8U4sLvT0vWbvo3ZA7Ka7XOsZLhTvKkRQ8e2rZu/AFhDwxOna879sbO5+fbLwEZFC8UNMPPYdfvDzYGz4944KNPJ6KeDx41RK7nibNO9rMBjyuxWq8UwSrPHTzrrsFFZm6XqxwvJR+hzySPys8YvL/u67F6jt3nek7P9LgvAm/UzzeEha81bJ5O8MMKTxomqA8K4kIPHEY/rv97KU8RVfMvPo0Kr3v25u8rsVqvPXEMjyMVpQ7m/WxuyGb2LzP3ta8U4uLvEERvbzXFIs7Jn08O+JK5LzTD/K83OgtOQjNlDySPys8EpiPuzhNdToBzwk7ZUbQPKsN77tf5Jm8K4mIPK92MzxXrmW7si6vPEgPyDyQsZc7KSf3OyTaNDyMVhS86vk3PGo9qDxbnoq8NT8PPbgsurwjYZU8WomWPHaWNryKyIA8mKHhuISnwLqJAsQ7W3tVuSI3LTw49wo8ulaiO8jLVzxBdWi7OnddvPdnOjzGKNC6jyOEuxKYD7xxGH47JhmRO7zW9DsqXyA9dYHCu6QP5Lyij5G7pcCsvBgIBzzvVDs82Bu+O5tZXTyuYT+8rbD2vI4OkLzrqgC8kLEXvePmOLx0jwO9t54mvTDBVryKkFe8ym5fvNVxgzw8trm8i7o/vDMVJ7tN42q8hq7zu4xrCLzSHbO8z97WvGLyf7sear07nhFZvJCxlzy5QS48nOfwO+/bm7xZ7cG8bdJuvA2hN71SU2K8/DtdPKUkWDxt9SM8tnS+POty17sryn47jFaUPEYIFTzWY0K75nv/umtSnLtkuDy8urpNPCBxcDy4F0Y7si6vPOZ7/7yyyoO7/nq5PLO8Qju4LDq7KJnju/KoC73C4sC8VzXGu7VfSrxry7s79K8+vBgIh7wy6z49BxzMO/MhqzzU1S68n8KhPDuM0bxhnJW7ptWgOwjNlDpWmfG89WCHPBmWmrw1HNq8PvUVu2dwODxdQZI8JQQdPO0V3zuNSNM80jInPHiyXTqwi6c6TGrLulntQbv+Fg68tG0LvX43ybyjHSW8oFC1OxW0NryPAM+7asSIPMbEJLzuP8c7X+SZu+nyhDyheh09Sk4kPCBxcDzKkRQ9GIGmu6qikLzIZ6w8KeaAvG31I7y5yA49plwBPZ4R2bw7ocW8C9v6O/XZpjumOUw80Y+fvH/TnbzchAI9/LT8PDdGQrwAgmw8dOVtvbuAijxIiGe7eWOmujFdq7zlJZU8Jy4Fu5rgvTw9yy29aJogPZ6K+DstLJC8cRh+O7vktbv8cwa7WXSiPFQZH7xoIYE8e6KCOsjujLu5yI48nAomO0gPyLztsbO7X9bYOmcMDT0gqZm8VS4TvOrkw7v7rUk7HCvhu94SljvSHTO8VBmfO5tZ3bsRbqc6gxmtPP56OTsAupU8NbiuvMC4WLzxcOK706tGvG80gDwXbDK8Cb9TvGZbxLwJv1M8p2O0PAQAJTxDtMQ6b3V2vJd3eTyEp0A9nOfwvJxu0bvjgo0706tGvC4J27yEIGA8YZyVu0pOJL3ei7U7Rx0JvQuFkLvWeDa9wn4VO3Tl7Ty+eXy7KeYAPEkW+zvvuOa54KdcPIBhMT0mGZG8Oq+GPBdXvrzqXWO8u+Q1PErHQzwiTKE7ldRxvNRcDzyPZPo7n8IhvWkotLy8ckk8aJogPAHPiTztFd+77IfLvBW0tjrlJZW7UUyvO/cDDzyKs4w87Y5+u3z47Ly1X8q8YZwVPEO0xLvaInE8k2mTvHhOsrvW3OG8K+2zvOOCDblQsNq6PUTNPLMg7rwGB9i8wkZsO70jEr1lv2+7XCwevBs5ojppBX87YYchvI1dR7x41ZI8Qa2Ru4f7kDy0Sta7L7qjvGdi97oriYg8Kl8gPFDTD7v3Zzq8c3qPvCxmU7vFNpE7KeYAvBfzkjz4kaK73GFNu1/kmbo+4CG8419Yux5qPTzwBYS736CpvEMt5DsBrFQ8J4RvOpgoQjzibRm8R3PzO8Jb4LtgDgI80aQTvdtaGrz817E7IjetvBPueTyBixm9p07APBkPujx2iPU8vQ6euxudTbt2Mou6rmG/vJuRhrxoIQE6e6KCvKUkWLo5hZ68+jQqPAYqjbxNBqA8NjHOvPH3QrxZ7cG8pp33u0GtkTvlJRW9E60DvftJHjt9DeG7eLLdOVWnMryH+xC8KCDEvOhWMD2cCiY8Lh7PvMWaPLw+4KE8O6FFPFYgUroIzRQ8TFVXPiKwzDylRw08y6YIPX2pNTx9RYo7tNE2vODKEbwAuhW7CDHAPI4OkDwJ4gi7C9v6PETJuDr8tPy7ZUbQu3rcxbxdHl28+G7tvHRszrx4TrI8ZUZQvAajrDu4LLq76oCYPC30Zrz7rcm81bL5O0eWKDy75DU8g5JMvOuVjLthnBU8prLrO3uiArtOMIi6WXQiPGiaoDsIMcA8tOaqOz71FTxDUBm9Z3C4vNmUXbuyp846rbD2uuZ7f7vXFAu9vnl8PE4bFDwE6zC82bcSvMhnLDxHHYm8+rsKvKDsCbwW3p48lpquOyg1uDrHUjg8QGB0vCggxDzcxfi7bufiPIqQV7xMaks8LRecvF/B5LuH+xA9XR7du4DaUDxQsNo6+G7tO+TtazrgtZ28fQ1hvAm/0zxMjQA8iFH7PODKkTy5yI683XbBvPZSRrxcCem89T1SvH6b9LxOGxS8krhKvDj3Crr1oX28tNG2vPgYg7ryqIu8Draru4O1gTxhAEE9C2Lbu8fZmDwRS/K7huacu9kwsrw/bjU9gy4hPXG00rsy6z68ox2lPDaq7Tt2qyq74bxQPKLzPLvRj58806vGvD69bDy6us27SRb7O/fgWTsW3p67IrBMvGfp17t/sOg7etxFO1ueCrs0Kpu7mVKqPP1lxbwaAfm6GZaavP56ObxNBiA8mVIqve2Ofrufn2y8AzpoPNOrRjy8csm7ztcjO6MdpTvmsyg7M919vTQqGzwaqw49pPpvPBmWGjoYCIc7CnAcvL4VUby2EBM8Bz8BvAaOOL0BrNS7UNOPvEtjmLyzWJc8cMKTOSTvKD1O+N6800ebvNZ4Nr0TJqM8Sk4kvCrDy7zI7ow75JcBPeazqLuQFcO8ExEvu2S4PL5BEb08m3wSPcwRZ7s8Uo48W54Ku7Mgbjz817G7S2MYPCM+YLvc6K24jyOEvNeNqrywi6c7ujNtvKSWRLxzV1o8UJvmu70jEj3Q80o7lPcmO5XUcTppBf87AkipvOPmuDq/KsU7A09cvBoB+Tu+FdG7Qp/QvCTvqDvzNp88xOlzPNMPcjxaiRY75SUVuyCpGTyoeKi8L7qjOha7abua4D084KdcPH0wFj2k+m+8c3qPu11Bkjy3Zv08ldRxvPdnOjyyQyO8uLOauwCC7LxKx0O79T1SOnEY/jzazIY88X6jvKnxxztEyTg8oFA1vLIuL7wxOna8rmE/vKSWRLzhvNC7OhOyvOQQobvNSZA8tnQ+vNKWUjyEQ5U7Oq8GO1FMrzw8Uo47MEi3PLTRNjvB8AG9m3ySPPhubbyay8k8D0S/uywCKD0p0Yw8/nq5PNkwMjxrUhw8w3BUvLEZu7ruP8c7ulYiO9Z4tjw1Pw+80Y+fvPhubbzchII7xox7PHuiArzYGz67dfphvBMmo7wqXyC84UOxvL6csbziNXA844INPRzHtToJW6i78yGrPKsN7zzzISs85Nj3vHwbojzVTk48XAlpPC+XbrzpSG88NI7GO7clB72+OAa7vYc9OylKrDsaJC47dGzOvB1Vybri0UQ8clAnPCx7x70upa+7m5GGPDFyHz0cK+G892e6PEeWKDoyZN48n8Khu7LKg7bchAK8qzAkvI+HL7zk7Wu8GXNlvMP3NLs494q6bdLuvJuRBr01o7o8djKLOq79E7ui8zw8ExGvvDj3irsznIc72TCyvEk5sDyvEog8h188vH2ptbpJnVu8qQY8vOWJwLyCaGS84+Y4PE4wCL0hm1i8isgAPaMIMbzzE+o7mdmKPGmvFDthh6E7B5Xruroz7TstkDu8xP5nPGMcaLo8PRq8rv2Tu8pu37u4kGW8GquOPCt0lDzxfqO7qNzTPFsXqjwIRjS8OpoSvGcMDbw/Coo8YHItvH43yTxnYne85O3rOVLaQrpZ2E08jwDPPOTY9zlCOyW84C69PKBQNbxjP507TI2AOrgXxjtHHQm9BOswvbnIjjzP3ta8aSg0vLG1j7wtFxy8fiLVuzfiljv+AZo8xZo8vK92szu9Dh484C49vYBhsTu9IxI7wltgu5xuUby0Sta8jFaUPEKf0DvRpBO8huYcvPM2nzzoQTy91v+WvJJUn72SVB88CtRHunp4mrxF0Ou7jwDPuxbeHryUW9I6nhFZvPxzBj0zALO8tdhpPAaOuLvBVK07doh1PKnxR7z8tPw8VpnxO8jujDu0SlY7lxNOPJaarrzwBYQ8gD58PIZKyLyv79I8wwwpvQV5xLsnpyS8B7igvJCco7uIUfu8vSOSvHSPAzw6E7I7N79hPPMT6rtQvhs87IdLO3E7s7nzISu8xihQvSggxDqF0ag7RVfMvB8bBjm8ckm8UNOPuyI3rTwFFRk8eeoGPTSOxjukD+S8dyTKvLCgmzwpJ/e7Mus+u56tLbzlJZW7QXVoOzPd/TxF8yA8lzYDPUgPyDx9DWE8TpQzvPKoC7zhvNC800ebPKBQtbzzIau8+JGivLclhzzouls8m3ySPK5hvzwYXvG8pau4u8OTCb1ryzs9eLLdPMw0HDybkQa97bGzPE+ppzw+9ZU8iRc4OrXD9bjyqIs6+aYWPGghgbzP3lY7JLd/PDaq7btnYve8QsKFvGKxiTzq+be7f+gRPbtrFj1cLB48urpNPG/8VrxIJLy8eCv9u1oCNjxaAra8CM0UvR1VyTsw5Is6bfUju5I/q7sNBWO8zZ/6PKDsibw6EzI8XboxupXpZbyoQP+885pKPBSfwrvTJGY8QJgdPf+PLbz5phY6OHAqPMwR5zyrqUO8UtrCPODKETuuYb+7MdZKPFJ2lzlt0m68AB7BvMFpIbybWV2806vGvD0v2bxUGZ89djKLPEV6Ab2qohA7p8dfvFqJljwGjrg8oFC1PNGkk7z1YIe8GF5xPDYxTry3JYc8hq7zu6KPkbzcbw485JcBva3TK7wVUAs9UtpCPOG80Dtg60w8jGuIu0RljTzk2He8YWTsO/DNWrrD9zS8u2uWvPSvPrwpSqw8/NexPH6+KbwAHsG7RMm4uktjmLtDUBm8y4NTPOuqAD1nDA08ZeKkOp4RWTyPAM+8PcstvF6s8LwYgSa8Muu+uyVoSLz3fK67\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 8,\n \"total_tokens\": 8\n }\n}\n" + headers: + CF-RAY: + - 936f92b30c267df3-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:28 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=nFZbdbFah.LWbzeW.glnLPLT8LbiE2gQXhAnfko3dOM-1745770048-1.0.1.1-6X7_GmSlrhT2JDG3UI.GdG197sz4YerSq59cGRFhchAip2X4Az27dMYcavJW.noLsarkBrxKgf7B5SZg7354p8ZOH9VBHq35KlZ6QavVyJ8; + path=/; expires=Sun, 27-Apr-25 16:37:28 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=.vAWcVjI11dzJOYj038IwLPbCQXQ1.tBpWmDu6Xt46k-1745770048727-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '78' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-97cfd68d4-7qqkm + x-envoy-upstream-service-time: + - '51' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999989' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_b2ab62724f2840722a52cfed5dd64580 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35099, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:28.073953+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:28.576735+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '453' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:29 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '37' + status: + code: 200 + message: OK +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35099, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:28.073953+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:29.624095+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '453' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:30 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '28' + status: + code: 200 + message: OK +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35099, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:28.073953+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:30.646962+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '453' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:31 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '28' + status: + code: 200 + message: OK +- request: + body: '{"input": ["test file"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '88' + content-type: + - application/json + cookie: + - __cf_bm=nFZbdbFah.LWbzeW.glnLPLT8LbiE2gQXhAnfko3dOM-1745770048-1.0.1.1-6X7_GmSlrhT2JDG3UI.GdG197sz4YerSq59cGRFhchAip2X4Az27dMYcavJW.noLsarkBrxKgf7B5SZg7354p8ZOH9VBHq35KlZ6QavVyJ8; + _cfuvid=.vAWcVjI11dzJOYj038IwLPbCQXQ1.tBpWmDu6Xt46k-1745770048727-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"MriavBo/HbyzL4C8J0aGvA0LObyTrcU8NvT2vLBR6ryxjEi8dTacvMRTrTwGPdo6DkYXOwUCfLonc4G7WAsGPG+VODyUyQu8k9rAPHzJnLxQECy8+yQ8PDnSjLwOoI07kp9ivEp9q7zbg2k8lMkLvdYMr7v6vGK7iMKiPAels7w3qOO8UC/EvCGzBbxzOe66GTE6O3CEA70Fie08b3YgvEPOZDzc60K7+TVxPKEciLxcKMq8Mz+MuRgjVzz0Csq8mcc3vDYh8jxRS4o7q1M+un+MarzdJiG8jtyUu9BMMzyxq+C71oU9vACq2TsOc5I7Xb0evYjCortCR3O8kzQ3PNEAoDuOKKi8KijsO8lRWTxPqFI7GCPXu6WTwjwdTn48pZPCuxq4KzzJnWw8jaG2ux4C67zQ8ry75wNhPGqXDLvV8Gg84Y94vPli7LuTNLc7V9CnPLITujsRKP07vErwPBQGk7vLQKQ7Q1VWPONAkzwr3Ng8egfNO2N9GrwtBgI7XUSQPMM35zyIo4q8T07cvH+M6rsI4JG8oIezu9DyPLwb8wm9ZNeQO4yT0ztnjPu7LdmGOwWJ7TseAmu81ZZyPHaQkry/zo+8yOn/O7cfSTx2F4Q8iByZvCuvXbxzDHO79iYQPGOqlTwee/k8j5ABvTKqNzwZBD88e5whvG6H1bwxQl47lLsovBMXSD048aQ8bi1fPLmVBTxPe1e8E0RDPMmd7Lz72Kg8OGqzvMjpf7yxuUM8i1j1PAhng7pYC4a83tqNvNdmJTy4ALE8JVe7Oj0cTDztPGu8mi8RPKeviLxnjPs8B9Iuu+85Gbv5NXE8jXS7O+mmGLs5pZG8DEnpu3wjkzyzAoU8EpBWPOpaBb2UFR88IeAAPYcONjxvoxs8mtUavHtCq7wlSVg6RAnDPBsgBb2kd/w7WAuGu+7DXDw31V66bdNoPJJy5zsAXka8sPfzvJ/TRjzKjDc9cznuPNh0iLyw93M7wxhPPPk1cbxJQs27DVdMvEWeFz2CXJ064Y94PBKCc7wBPy6/sYxIvPZTi7pjqhW8reiSPAE/Lj1YCwY9jMBOO0Mo27xP9OW6sea+uzZ76DyZxze84+acvFCJOjvFraO8gBNcPK1CibxiFUG88GYUPVAvRDzFYZA8SvY5u2qXDDzt4nS71gwvPO08a7vJney8dO3aPO7RP7q5tJ28GuWmuw2Ex7se1e+7V/0iPfAagTtnbWO84UNlvEkV0jzttXk8LEQyvD07ZLogHrE8GYuwOw1XTDqYX167Ks51PNQ8fLsfEE66lMkLPIijCjxRxBi8emHDu2m2JLyOVSO8z11ovH/Y/TwNCzm8e7s5PLnCgDwOGRy8BYltuQVccjyHWkm80S2bvEk0ajxqapG8pZPCPA6/pbvwGoG8Vu+/PHYXBLyHaKy80PI8vIDm4Dzq0xM97Yj+O5RCmrz0ZEC73jQEuypV57qNzjE6Z4z7vHtvprt7b6Y8ULa1u1dXmbxbR+K7ytjKvKXAvTt2FwQ9jq+ZPLnCgLzXZqU4YrtKPJn0srtWlUm83Z8vPD9lDT2TrUW8Ks71O28cqjsIhpu8n0xVu0S9r7samRM8SY7gO8+3Xjwsca08jjaLOmIVQbzpeZ28h1pJu0LtfLwUUqa8Y8ktvXz2FzyScuc77zmZO/wyH7zRLRs9oA6lO1uT9TxbR2I8dEfRO24tXzwGEF877Q/wvFYcO7yaAha81itHuj3C1TsqzvU8ghAKvWqXDDy5DpS8EoLzPMMYz7vq05M82HSIvGOqlbwlV7u7yyEMu4jvHTwILKU7je3JvDFv2bxibzc81Gn3uojQBTxENr68pHf8u4ZM5ryScmc7/F+avCoobLzwwAo6IYYKvbhaJ7uaTqm859bluj3vUDz0KWK8NwLaO3AqjbwrvcC5JmWevIK2EzyIdo+6JmUevdBrS7qw9/O7J3OBvOg+vzwGl1A8UFy/u7zDfrxpLzM7mW1BPJUjgrzFYRC8iEmUPB57+bs5pZE8hh/rOrHY2zx6rda7vu0nOqtyVrz8Mp88bxwqvNV3WjxkMYe8qr5pujOZArsIZ4M8j5CBu8nKZzv6Q9Q8hgDTOwx25Dz2zJk7c5NkO2rxgrvjXys8V6MsvXqt1jtaZnq84iTNO3BXiDwxnNQ7293fvEvXIb2BezU8DuwgPHZjlzyL/v66JdDJO7D3c7xC7fw7pigXO595ULvoMNy64GL9u6evCLoT+C887ZZhPLimOj10wN88lMmLOXtCK7xzZmk8Tm30O+85GbvFrSM9ZAQMvCaENjw+/bO8SY7gPAWJbTzXkyA7AvMaPDeo4zzjQJO80dMkO+Gd2zuUnJA877KnPEwSgLzeB4k83fklvILjjjxb7Wu8amqRPPzmCz2khV+87sNcvFHxEzwrNs88nh/aPIHVqzyCiZg8XnGLu+8MHroMo188yX5UvBoSorlLuIk8jAxivCr7cLxYCwa8f19vuytjSjyYBWi6MVDBPFyvOzxY3oo82HQIPW92oDxXV5m6m1yMvOGP+Lwlo048m7aCuu/+ujqxX027w2TivHM5bjwBi0E8V4SUPHR0zLsdTn67Qhp4PF2Qo7yymqs71+2WPN2fLzx1gq+7sJ19PB62V7xRPac80GtLvENV1rxw0Ja8oA6lPGrxgrzvOZm87bV5vOotijx62lE7ps4gPSfsj7pQAkm8Z+ZxPA04NDp/X288YyOkvIjCortaZvo8aYkpPFYcO7wUJau87h3TvLnhGDzdU5y6Jr8UPXAqjTy+DEA8Ks51vMRTLbzXZqW8DhmcvB6odDwIOgi5vma2O4e0v7zXOao8rIC5O2/CMzwTREM8H+NSPAhZILy/VYG77bX5u/oWWTpc3La7+hZZPHyqhDw5S5s8lLsovJQVHzz5rn887wyePPt+Mrob84m8jGbYPDb0djyyQLU86cWwPNxyND3UaXc8RcuSPGQxBzzJflS8sm2wPKZ0qrusjhy8Mriau3z2F7y8SvA7PiovPFEejzxENj48nh/avIJcHTzLxxU7cFcIvLHmPjq3TMQ8LVKVPLgtrLyTNLe7HgLru7MvAL3XGpK8Q87kvNLhhztLqia8rLsXvPaABr0mvxS96aaYvKDCkbzqWgU6gagwOyBLLLybtgK9XnELvGTXkDwhWY+7V1eZOr7ArLsg/5i7GarIPCGGCrwZMbq8AH1eOjhqs7kaEiK80MXBPNwYvjwSr+67jGbYO+Bi/bvkbQ4712alPCVJWDvDkd28UALJPA0LObxEkLQ6lJwQPJkTS7yzL4A83Bi+uB8QTrygDqU774WsvC1SFTx89hc7Hqj0O2ghUDxpL7O8SiM1vAbEyzyYjFm8q3JWO+O5IbxzDHM8mH72O6A7ILyIdg89V9AnvJ8AQrxq8YI6/OYLvZOOrTs2Tu06e0IrPAiGmzyyIR28M5mCvFWH5ruy9CG8rK00vJLM3TvE+ba87Q9wvNbfs7yG09e8FNkXvB57eTxjyS087TxrvMttn7xL16E7VpVJvMoFRrzt4nS81XfavNh0CLzuw9w6yZ3svN3MKjyzL4A7Vnaxu4GoML0VjYS8yuatuvtRN73DkV28pP7tO10XFTz1Rag8nh/aPC0Ggrv8QAI8bdNoOk4T/rs+hCU8nwDCu+g+P7yU6KO8qybDOksEHTzpeZ08fKoEPU97V7g2Tm284GJ9PLDK+Drh6W67nsVju9XwaLwYb2q64vfRO+fWZbxwKg08cISDvI0axbsCTRE9+rziu4ECJzyfpku5gdWrPKUM0bzwGgE8yl+8vMNk4rsYb+o6AKpZPKWTwryybbC8fFCOPHXcJTviUcg82wpbvNDyPDvj5pw57tG/PA5zkryUbxU7Jf1Eu+l5nTuhHAi7COCRvDgeIDtXsY85EtxpPHbqiDvgYn28B0s9u3xQDrwrkEW5CLMWO1ZJtrsf8TU9Ya1nPMW7Bj0gLBQ9Griru2e5drw+dkK6OBA9u3x9ibzF2p48qybDPLMChbzccrS8v0eePJ8tvTysrTQ8gdUrvGnjn7sYb+o8dr2NPFE9p7zEcsU6etpRvfxfGjuCEAq8mgIWvAG4vLx62tG7JmWevKVmxzynrwi9Hi/mPEmOYDw+/bO8ZNeQO/kIdrzUPHy80bQMPOeparx0wN88y8cVu9AfOLyIdg88Ak0RvPt+srwCeow61+2WN3qA2zzud0m9aRCbvEJ07jsVYIk89N1OO2OqlTsOoI28AnqMvMhw8bnQxcE7mZo8PA04NDqmRy88qr5pvFU7U7xutFC8P96bvNuw5Ls/vwO7UZcdvEk0aryl7Tg7H5c/PFejrDtdkCM8iyv6vOmmmDy5aAo9OB6gvFyvuzve2g08uACxO0JHc7wHeDg8VmjOu1HEmLygh7M86tMTvbc+YbwC8xq9vu0nvBic5TzvWLG7VnaxuxKv7rsZMbo7ThP+Oo6CHjxq8YI2joKeO/atgbwHSz26cP2RO3sVMLthNFm77h3TOuep6jvFBxo7WDgBvdQ8fLw2e+g7LCWauquf0bsgHjE7Er3RvO+yp7z0Vl285wNhPNwYvrlWHLu8rK00vFUOWLxeywG9H/E1PO8rtrz03U483HK0vMx7grl7nKG8PZVavGN9mjyxMlI89b62O2kvM7x1Npy8tz7hu4LjDr290eG6gmqAO/Qp4jvdrZI8DTg0vGN9GruAx8g8Z4x7uxpsmDygtC68Q6/MvLeY17s9wlU8Hi9mO3WvqrsFXPK8CCwlPO/+ujvkmok7jAxiPOHpbjx/jGo6jXQ7vPYmELwbIIU8uHm/uxl9Tby5woC8k1NPvAAxS7wRKH08zz7QvOrTEzm90eG8IKUiOzb0drxRSwo7n1o4vSVXO7zJney7b6Mbvb7ArDzgYv27BQL8OfVFqDxWaE48+dv6u7nCgLvRAKA8CLOWvD0cTLwgHrG67Q/wvO8MnrxnbWO6pnSqPPsFpLy3xdK7bxyqvB7Vb7zK2Eo8UZedOxNxvjw4xCm81R3kvBoSIrrn1uU7s9WJPGlcrrsOv6U8DNBavJScED3vK7Y87eJ0u1FLirsamZO4vbJJPOmmmLziq748+kNUPvRWXTzpTCK8aQI4PR7V77v8jBW8cFcIPGk9Frit6JK77qTEPDHJzzwT+K88dHRMO44oqDogpaK7RAlDPAf/Kb2IHJm8jUdAvMNFyrx6rVY87/66vLFfzbvQTDO78O0FPcW7BrwzEhG8s9WJvBKC8zx8yRy56Gu6vLPVibw9aN87gG1SPGReAr04ajM43EW5O/SDWDwhswU9iKOKuis2Tzz5CPa8LHGtO2m2pLxPe1c8SRXSPO2W4Ts+0Li84RbquwfxxjwlKkC8aVwuu8NFSjyTrcW5T3vXO4YtTjt0wN883HI0vKeCDTvqWoW8+TXxu/vYqDy88Pm8zHsCPR9qxLw2Tm07IVmPvKoY4LvIcPE7v3QZvHx9iTy5lQW8lLsoOpjY7Dt1r6q8ZASMvBVgCT0T+C88b5W4PGpqkTzQTDO8ZxNtOwLUAjyMhfC8XILAvLD387xXsY+73OvCO88RVbx/BXm6LVIVvdAfuLw5LIO8RBemvHvotLvhcGA89UWovF1EkDyYMmM8xCYyPKtTvrwBP647wzdnPNcaEjuCiZi7uIciu2dtYzun3IO7RXGcu9BrS7yzAoU89q0BvfwynztVh2a8Qu18PD8Llzxp4x+04zKwvDhqMzw2x/u7DkaXPIyya7qMwM676Gu6O59MVTmzAgW89iaQvLgtLLvUPHw8/F8avUwSALxzOW65ps4gPT6jPTzcRTm79INYvOqHADsgeCc7rRWOvFzcNji4eb88/DIfvCr7cLxRPSc8yfdiPDOZAruzAgU9XRcVOtEtm7xLi4669RitvCBLrLwMKlG8duoIPL1YUz17byY7w0XKvLN7E73Q8jw8XNy2vGeM+7wSr268DbFCPRIJZbylwD28K2PKu25oPb6rn9E8vaTmPHucoTtd6hk8xTSVO/Q3xTzkmom8mfQyPEVSBDxvwjM8EVX4u+otiryqGGA8sCTvOsshDDx7u7k7COCRvEMo2zxhrec8yhOpPD79M7ysB6s7yZ3su1dXmTsVjQS63HK0vD1o3zwa5Sa7aKhBvC2si7sMo188v84PPCQcXTz7fjI8AFDjutGmqTsYb2q8BS93OxlQ0jsr3Fg7XeoZPVyCwDppAji7sH5lPErJPjwAMcs80S0bPHyqBD3ifsO8ejTIPD5XqrxaOX+8sYxIvFuTdTwtUpU72KGDvNEAILx/MvQ7fH2JOhgjV7ysYaG8YuhFO0uLDjx/MnS8ANdUvHwjk7yCiZg8JpKZvFFLijxXhJS8SbvbvO08azzeNAS8dTacPGEHXrwC8xq9aKhBPFtHYryGLc47h4fEu+7wVz10occ7XChKPPk1cTwO7CC6ZDGHvJoCFjt1Nhy8aS8zvAhnAz2kK2m8YkI8vOoAj7wM/VU7UqUAO2e5drxnE+07sPdzvJ7FY7y938S7ThN+vO0PcLxQ1c07v84PPe9YsTzuHVM8OaURPSBLLD2U6CM8FWAJvVejrLsH/6k7vjk7PF0JMjykWOQ83cwqvLBR6rxk15C8AtSCO8hwcTxpAri7sPdzuQUCfDz2zBm7sm2wu0uLjr0tBoK81XfaPHaQkj3pphi84vfRPMshDDv7fjI9yVHZO5u2gjw+V6q7htNXvI2htrymoaW8avECu+gRxDvKXzy8pKT3u/sFpLxJFdI8cP2RvNzrQrxwKo08dM7CvB1OfrxuaL07JSrAvPmu/zz1vjY8Mqq3vBNEQzkUBpO8bmi9PICazbx8IxO8iNAFO91THL2MZti84RbqPA/6g7ykpHc8piiXPLLHprt7Qqu8bmi9O9dHjbw3tsa51itHPCaxMbwmZZ68GdfDOkJH8zqbXAy80B+4ukk0ajw5/4e7BQL8PC1SlTx/BXm8AH3evFHxk7wg/xg74xOYvGfm8TwHpbO7H5c/u17LgbwlV7u7fCOTPIDHSDuIHJk51ivHPAz9VbxRaiK7E/ivvFt0XTvWK0e9fH0JvRQzjjxpXC683a2SvNG0jLxKfau8ULY1OsO+2Dy9WFO4ddylu11jKLuMhXA8CDqIvCcZizoxnNQ8hkxmPKYatLy/KAa9aT2WPACq2TvRpik8Z4z7u2e5djy+GqO81Dz8vAJ6jL1E3Mc8RUQhO+hd17sfakQ70MXBPIdayTtVDli6GyAFvIH0QzxMEoC83HI0O+otCr3qAA+8YdpivA3ePbygwhE92KEDPW4ORzyGTOY7xa2jPHu7ubxpArg7BYntO1vta7wf8bU81ivHu61CCT08Dmm8ARKzvJp7pLlw/RG9K+o7vNLhhzz0Cko7ycpnvCB4p7vQHzg8CA0NPHZjF7vW/ku8RZ4XvZ95UDtEF6a8FDMOvNvdXzyCtpO8buHLu/nbejwSY1u7DCrROyX9xDtq8YK8kp9ivORtjjqngo28ps6gPHa9jbweidw7MZxUvHUJoTwORpc7Vkm2PBmqyDzYdAi8CA2NPIhJFDtOQHm8418rPB6o9LzVd9q8rIA5vDjEKTwldtM8YdriPIKJGDwGatW8avGCPCoobLvWWMI8H2rEPLHY2zwHHkK9RfiNPPWfnjy4ALE8ucKAuzH2yjrXRw26RGO5OEu4Cb2CL6I7S+WEO+SaCbugh7O8ejRIPC0Ggjt0dEw8lOijPLjTtTz0g1g8abaku43OsTsrY8q8vdHhuwFsKbzIQ/a8lG8VveLYubpJFdI8s04YPNQ8fLsOcxK8LBe3PIK2k7weqPQ7CA0NvBlQ0rstBgK9da+qPPpwTzxFUoQ8Yo7PPAIgFryfAMI8ZAQMO5gy47v7q627y8cVPI42Czz1RSi8gi8iO5L5WLnu0T+8+9govIHVK7vpH6e5Xb0ePCXQSbz1n549RXGcPMjp/7tpXK470VoWPD/eGzya1Ro86Zi1PAceQrynVZK8v3SZPDnSjLutQgm8c2ZpvIyy67wHSz08b3YgvKEciDz8Mp+7ROqqPBmLsDt6gFs7ExfIPN2tkjw5eJY6sMp4Oh57+Tu8HfU6v1WBu0OvzLzVHWQ7Wjl/POOMprvc68K8w+vTPMl+VLwYI9e6ucIAveSaCTxjnDK4iNCFPIFOOjzFrSO9yyGMvEu4ibtWlUm7Ks71vL+hFDxnjPu7\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 2,\n \"total_tokens\": 2\n }\n}\n" + headers: + CF-RAY: + - 936f92b4cd887df3-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:33 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '162' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-7bbfccd4b9-5rlz8 + x-envoy-upstream-service-time: + - '98' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_5414bfd96cbd16d84a01f68e994a38f2 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"input": ["This is a test file for directory search"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '119' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"CtRHuxbenjwIql+8vF1VvHnqhrwbOSI9wKNkuL+xpbxWmXG8Mk/qvBiBpjt7jQ48ujNtOzbNojzazIa7rbD2OytRXzwaq468YrGJPO2O/rq00bY5TQagu5aaLrsP4JO8nTSOu5VwRrySuEo8jFYUvSR2CTveEpY60Qi/PFjmDr2m1aC70aQTvfP+9Tp6eBo8mmeePM1ehLwbssE85rMou1pmYbtTi4u8SrJPvHnqBr0AHkG8fPjsu5d3eTzklwG9EnXaOtEIvzyfwqE7xCEdPF6s8Lxs4K87GZaau1EpejzGS4U7S0DjPFdKurrKCrQ7BGTQvD/nVLxYX668Rx0JPHed6Ts6E7K8AsHIPGS4vDvUXI86+jSqOhxOljzwaa87Z4Usu06UMzxIqxw87j9HOk0GoLxfXbm7GZaaPHaI9bsGjrg8tV9KvAuaBLyBdiU8pJbEO5uRBroi0wE8Evw6PaswJL0FFRm83MV4u9Orxjx63EU8SRb7u8DbjbxCwgU7qreEPBxOljxrtse7Kl8gvdGkk7zDcNS7ItOBO5Npk7hpBX+88AUEvELChTwHHMy8pPpvu8CjZLv12aa8C9v6O/d8Lrzv8A+9pjnMOzKHE7oT7vk5X+SZvOLRxLxT77Y7/48tPIxrCD2SP6s8lpquvNIdszu+AN07ENLSvJ4mTb2uYb+7MmTevHWBQj3AP7k8YZyVPBOtgzxIiOe8xZq8PLTRNrwmGRE8T6knvfxzhrwqXyA8cMITPcT+Z7svl268HxuGOzqaEjzddsE7vnl8vEnAEDxBily8IZvYOzPdfbt/sGg8E+75u7M14jueEdk7Z+lXPNzFeLyxGbu8vF1VvDe/Ybq+efw8C9t6u+DKEb0r7TM9oXodPYxWlDwCJXQ8KCBEvNzFeLw/Coq8DrarPLvkNb23JQc8gqCNvMMMqTvJ9T8836CpPGVpBTsaiNk7qP8IvMuDUzs1HNo8oWWpPGfpV7x/sOi8r3YzvFuQSbzx90K8gn1YvLeepjx4Ob48eDk+PKFlKTwJWyi/3f2huyeE7zqCBLm8f9OdPOuqAD1pBf88K4mIPBOKTrz6mNU6OHAqvL55fDxWQwe8NqptOxfQXbyv71K8a8u7Oix7x7w+vWw8s1iXPK8SCLu9I5I8nop4O3X64buqG7C7KJljOzqvhjw5hZ68syDuPHX64Tvv2xu9pcCsuzFdq7txGP47aJogPYgQBTyH2Fu8cCY/OwUVmTzuKtM8Ck3nvHuigrv12aY6RN4sPLEZO7vddsG7kcYLPboz7btDUJk8iiwsvOY6iTyzvMI7B7igPGbUY7xcLB47USn6u1ntwTy8ckm8X125PHDCkzyXNgO8StWEPC2Quzv+AZq8jV3HvMn1vztKss+8hbw0PCenpDo0Kpu8RVfMPH0N4bq7SGE7cmUbPPU90jzQFgA9KdEMPcsfqDpsZ5C8hCDgO/M2nzygUDW54JLovGmvlLyPhy89r1P+OkCDqbwHlWu7fpv0uxQY4jsEh4U8//NYu8JG7Lx0j4O7lXDGO2RUEbvei7U6dqsqPHYP1jym1aC8plyBu0yNADwCJfS7We3BO+Tta7sBrNS7+R+2u7VfSjwFecQ8g7WBu0CYnbyNwfK8Of49vFZDh7qu/ZO8RGUNvSUEHTzchAK8SsdDPJpEabyAPvw8rxKIulJ2FzyCaGS7XCyePPy0fLvrDqw7EmDmu3uNjroJOHO7w3DUuy4JW7yijxE9h3SwvDxSjjwlBB030jKnPFC+mzxdM9E7+R+2vPhu7bzd2uw78ZOXuu4/x7uA/YU8YYchvOT7rLxIJDw8zwEMvYosrLu12Om8FJ/CPDsoJrwiFHg8plyBvBC93rt2q6o7uBfGvBudzbslaEi8JNo0PMJ+FTysWoy8hq7zu2MqKbz8XpI7P+dUvLdm/TwwSLe7WfsCvc3Crzs56ck7QIMpvP8rAj1/TL07HCthuyJMobxIiGc3ScCQO3g5PjpUGZ+7TjCIPIbmnDu7gIo8eDm+Osl8oDwzFac8NI5GPLeeprxO+F454JJovFUuEzxuHwy8X+SZu5xu0bv5CsI86UhvvFQZnzy4kGU77Y5+PGb3mDtf+Y07F1e+OwYqDb108y47mkTpvPiRorzkdMy8Z4UsPJNpkzuDtQE8USn6vECYnbzUXA88j4cvPCcL0DwznIe84lilO82f+rx4K/078AWEPB4GkjycCqY8QGB0ubaJsjx41RI8PcutPBs5ojzYoh66y4NTvLZ0PrzeJwo8w5MJO80m27mKLKw8j2T6uiM+4Dzp8oS7HGMKPXTzLrwwwVY856XnPHN6Dz2YoWG8ExEvPJAVwzxKTqQ7FDuXPNRcj7xEQtg8Kl8gvGj+S7yLQaA7RmzAPCg1uDyDtYE7PWeCvC0sEDtAg6k8GojZPIZKyDwIRjS8XVaGPNTAOjwPyx89Oq8GvZCxl7zibZk8jM8zvDqvBr1g60y8dquqOsuYxzw494o5cCa/PKlqZzx+vik8OelJO5385DwBl2C8pSRYu+2Ofrwse0c8/yuCPAV5xLuQsZe83MV4vFg8eTwJW6g7w5OJu2ghAbxQNzs8rv0TPLNYl7z4bm076w6sPNIdM7ohm9i81U5OOkx4DDxLQGM81mPCO8WvsLtgDoK7aRNAPd4SlrxQm2Y8Hs5ovOTt6zvc6K27hVgJOzDkizv8XpK8RN6su27n4rvL/HI7gMVcvK8SCDzeEhY9C5oEPU+Gcrwkt/+8N+KWvMA/OTzYBko8HE4WPW91djwawAI5IkyhvIu6P7zgtR29IhT4u+sOrDtO+F481FwPvPH3Qrwryv67iZ4YPKdOQDztsTO59T1SO0V6gbuqf1u8U4sLvT0vWbvo3ZA7Ka7XOsZLhTvKkRQ8e2rZu/AFhDwxOna879sbO5+fbLwEZFC8UNMPPYdfvDzYGz4944KNPJ6KeDx41RK7nibNO9rMBjyuxWq8UwSrPHTzrrsFFZm6XqxwvJR+hzySPys8YvL/u67F6jt3nek7P9LgvAm/UzzeEha81bJ5O8MMKTxomqA8K4kIPHEY/rv97KU8RVfMvPo0Kr3v25u8rsVqvPXEMjyMVpQ7m/WxuyGb2LzP3ta8U4uLvEERvbzXFIs7Jn08O+JK5LzTD/K83OgtOQjNlDySPys8EpiPuzhNdToBzwk7ZUbQPKsN77tf5Jm8K4mIPK92MzxXrmW7si6vPEgPyDyQsZc7KSf3OyTaNDyMVhS86vk3PGo9qDxbnoq8NT8PPbgsurwjYZU8WomWPHaWNryKyIA8mKHhuISnwLqJAsQ7W3tVuSI3LTw49wo8ulaiO8jLVzxBdWi7OnddvPdnOjzGKNC6jyOEuxKYD7xxGH47JhmRO7zW9DsqXyA9dYHCu6QP5Lyij5G7pcCsvBgIBzzvVDs82Bu+O5tZXTyuYT+8rbD2vI4OkLzrqgC8kLEXvePmOLx0jwO9t54mvTDBVryKkFe8ym5fvNVxgzw8trm8i7o/vDMVJ7tN42q8hq7zu4xrCLzSHbO8z97WvGLyf7sear07nhFZvJCxlzy5QS48nOfwO+/bm7xZ7cG8bdJuvA2hN71SU2K8/DtdPKUkWDxt9SM8tnS+POty17sryn47jFaUPEYIFTzWY0K75nv/umtSnLtkuDy8urpNPCBxcDy4F0Y7si6vPOZ7/7yyyoO7/nq5PLO8Qju4LDq7KJnju/KoC73C4sC8VzXGu7VfSrxry7s79K8+vBgIh7wy6z49BxzMO/MhqzzU1S68n8KhPDuM0bxhnJW7ptWgOwjNlDpWmfG89WCHPBmWmrw1HNq8PvUVu2dwODxdQZI8JQQdPO0V3zuNSNM80jInPHiyXTqwi6c6TGrLulntQbv+Fg68tG0LvX43ybyjHSW8oFC1OxW0NryPAM+7asSIPMbEJLzuP8c7X+SZu+nyhDyheh09Sk4kPCBxcDzKkRQ9GIGmu6qikLzIZ6w8KeaAvG31I7y5yA49plwBPZ4R2bw7ocW8C9v6O/XZpjumOUw80Y+fvH/TnbzchAI9/LT8PDdGQrwAgmw8dOVtvbuAijxIiGe7eWOmujFdq7zlJZU8Jy4Fu5rgvTw9yy29aJogPZ6K+DstLJC8cRh+O7vktbv8cwa7WXSiPFQZH7xoIYE8e6KCOsjujLu5yI48nAomO0gPyLztsbO7X9bYOmcMDT0gqZm8VS4TvOrkw7v7rUk7HCvhu94SljvSHTO8VBmfO5tZ3bsRbqc6gxmtPP56OTsAupU8NbiuvMC4WLzxcOK706tGvG80gDwXbDK8Cb9TvGZbxLwJv1M8p2O0PAQAJTxDtMQ6b3V2vJd3eTyEp0A9nOfwvJxu0bvjgo0706tGvC4J27yEIGA8YZyVu0pOJL3ei7U7Rx0JvQuFkLvWeDa9wn4VO3Tl7Ty+eXy7KeYAPEkW+zvvuOa54KdcPIBhMT0mGZG8Oq+GPBdXvrzqXWO8u+Q1PErHQzwiTKE7ldRxvNRcDzyPZPo7n8IhvWkotLy8ckk8aJogPAHPiTztFd+77IfLvBW0tjrlJZW7UUyvO/cDDzyKs4w87Y5+u3z47Ly1X8q8YZwVPEO0xLvaInE8k2mTvHhOsrvW3OG8K+2zvOOCDblQsNq6PUTNPLMg7rwGB9i8wkZsO70jEr1lv2+7XCwevBs5ojppBX87YYchvI1dR7x41ZI8Qa2Ru4f7kDy0Sta7L7qjvGdi97oriYg8Kl8gPFDTD7v3Zzq8c3qPvCxmU7vFNpE7KeYAvBfzkjz4kaK73GFNu1/kmbo+4CG8419Yux5qPTzwBYS736CpvEMt5DsBrFQ8J4RvOpgoQjzibRm8R3PzO8Jb4LtgDgI80aQTvdtaGrz817E7IjetvBPueTyBixm9p07APBkPujx2iPU8vQ6euxudTbt2Mou6rmG/vJuRhrxoIQE6e6KCvKUkWLo5hZ68+jQqPAYqjbxNBqA8NjHOvPH3QrxZ7cG8pp33u0GtkTvlJRW9E60DvftJHjt9DeG7eLLdOVWnMryH+xC8KCDEvOhWMD2cCiY8Lh7PvMWaPLw+4KE8O6FFPFYgUroIzRQ8TFVXPiKwzDylRw08y6YIPX2pNTx9RYo7tNE2vODKEbwAuhW7CDHAPI4OkDwJ4gi7C9v6PETJuDr8tPy7ZUbQu3rcxbxdHl28+G7tvHRszrx4TrI8ZUZQvAajrDu4LLq76oCYPC30Zrz7rcm81bL5O0eWKDy75DU8g5JMvOuVjLthnBU8prLrO3uiArtOMIi6WXQiPGiaoDsIMcA8tOaqOz71FTxDUBm9Z3C4vNmUXbuyp846rbD2uuZ7f7vXFAu9vnl8PE4bFDwE6zC82bcSvMhnLDxHHYm8+rsKvKDsCbwW3p48lpquOyg1uDrHUjg8QGB0vCggxDzcxfi7bufiPIqQV7xMaks8LRecvF/B5LuH+xA9XR7du4DaUDxQsNo6+G7tO+TtazrgtZ28fQ1hvAm/0zxMjQA8iFH7PODKkTy5yI683XbBvPZSRrxcCem89T1SvH6b9LxOGxS8krhKvDj3Crr1oX28tNG2vPgYg7ryqIu8Draru4O1gTxhAEE9C2Lbu8fZmDwRS/K7huacu9kwsrw/bjU9gy4hPXG00rsy6z68ox2lPDaq7Tt2qyq74bxQPKLzPLvRj58806vGvD69bDy6us27SRb7O/fgWTsW3p67IrBMvGfp17t/sOg7etxFO1ueCrs0Kpu7mVKqPP1lxbwaAfm6GZaavP56ObxNBiA8mVIqve2Ofrufn2y8AzpoPNOrRjy8csm7ztcjO6MdpTvmsyg7M919vTQqGzwaqw49pPpvPBmWGjoYCIc7CnAcvL4VUby2EBM8Bz8BvAaOOL0BrNS7UNOPvEtjmLyzWJc8cMKTOSTvKD1O+N6800ebvNZ4Nr0TJqM8Sk4kvCrDy7zI7ow75JcBPeazqLuQFcO8ExEvu2S4PL5BEb08m3wSPcwRZ7s8Uo48W54Ku7Mgbjz817G7S2MYPCM+YLvc6K24jyOEvNeNqrywi6c7ujNtvKSWRLxzV1o8UJvmu70jEj3Q80o7lPcmO5XUcTppBf87AkipvOPmuDq/KsU7A09cvBoB+Tu+FdG7Qp/QvCTvqDvzNp88xOlzPNMPcjxaiRY75SUVuyCpGTyoeKi8L7qjOha7abua4D084KdcPH0wFj2k+m+8c3qPu11Bkjy3Zv08ldRxvPdnOjyyQyO8uLOauwCC7LxKx0O79T1SOnEY/jzazIY88X6jvKnxxztEyTg8oFA1vLIuL7wxOna8rmE/vKSWRLzhvNC7OhOyvOQQobvNSZA8tnQ+vNKWUjyEQ5U7Oq8GO1FMrzw8Uo47MEi3PLTRNjvB8AG9m3ySPPhubbyay8k8D0S/uywCKD0p0Yw8/nq5PNkwMjxrUhw8w3BUvLEZu7ruP8c7ulYiO9Z4tjw1Pw+80Y+fvPhubbzchII7xox7PHuiArzYGz67dfphvBMmo7wqXyC84UOxvL6csbziNXA844INPRzHtToJW6i78yGrPKsN7zzzISs85Nj3vHwbojzVTk48XAlpPC+XbrzpSG88NI7GO7clB72+OAa7vYc9OylKrDsaJC47dGzOvB1Vybri0UQ8clAnPCx7x70upa+7m5GGPDFyHz0cK+G892e6PEeWKDoyZN48n8Khu7LKg7bchAK8qzAkvI+HL7zk7Wu8GXNlvMP3NLs494q6bdLuvJuRBr01o7o8djKLOq79E7ui8zw8ExGvvDj3irsznIc72TCyvEk5sDyvEog8h188vH2ptbpJnVu8qQY8vOWJwLyCaGS84+Y4PE4wCL0hm1i8isgAPaMIMbzzE+o7mdmKPGmvFDthh6E7B5Xruroz7TstkDu8xP5nPGMcaLo8PRq8rv2Tu8pu37u4kGW8GquOPCt0lDzxfqO7qNzTPFsXqjwIRjS8OpoSvGcMDbw/Coo8YHItvH43yTxnYne85O3rOVLaQrpZ2E08jwDPPOTY9zlCOyW84C69PKBQNbxjP507TI2AOrgXxjtHHQm9BOswvbnIjjzP3ta8aSg0vLG1j7wtFxy8fiLVuzfiljv+AZo8xZo8vK92szu9Dh484C49vYBhsTu9IxI7wltgu5xuUby0Sta8jFaUPEKf0DvRpBO8huYcvPM2nzzoQTy91v+WvJJUn72SVB88CtRHunp4mrxF0Ou7jwDPuxbeHryUW9I6nhFZvPxzBj0zALO8tdhpPAaOuLvBVK07doh1PKnxR7z8tPw8VpnxO8jujDu0SlY7lxNOPJaarrzwBYQ8gD58PIZKyLyv79I8wwwpvQV5xLsnpyS8B7igvJCco7uIUfu8vSOSvHSPAzw6E7I7N79hPPMT6rtQvhs87IdLO3E7s7nzISu8xihQvSggxDqF0ag7RVfMvB8bBjm8ckm8UNOPuyI3rTwFFRk8eeoGPTSOxjukD+S8dyTKvLCgmzwpJ/e7Mus+u56tLbzlJZW7QXVoOzPd/TxF8yA8lzYDPUgPyDx9DWE8TpQzvPKoC7zhvNC800ebPKBQtbzzIau8+JGivLclhzzouls8m3ySPK5hvzwYXvG8pau4u8OTCb1ryzs9eLLdPMw0HDybkQa97bGzPE+ppzw+9ZU8iRc4OrXD9bjyqIs6+aYWPGghgbzP3lY7JLd/PDaq7btnYve8QsKFvGKxiTzq+be7f+gRPbtrFj1cLB48urpNPG/8VrxIJLy8eCv9u1oCNjxaAra8CM0UvR1VyTsw5Is6bfUju5I/q7sNBWO8zZ/6PKDsibw6EzI8XboxupXpZbyoQP+885pKPBSfwrvTJGY8QJgdPf+PLbz5phY6OHAqPMwR5zyrqUO8UtrCPODKETuuYb+7MdZKPFJ2lzlt0m68AB7BvMFpIbybWV2806vGvD0v2bxUGZ89djKLPEV6Ab2qohA7p8dfvFqJljwGjrg8oFC1PNGkk7z1YIe8GF5xPDYxTry3JYc8hq7zu6KPkbzcbw485JcBva3TK7wVUAs9UtpCPOG80Dtg60w8jGuIu0RljTzk2He8YWTsO/DNWrrD9zS8u2uWvPSvPrwpSqw8/NexPH6+KbwAHsG7RMm4uktjmLtDUBm8y4NTPOuqAD1nDA08ZeKkOp4RWTyPAM+8PcstvF6s8LwYgSa8Muu+uyVoSLz3fK67\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 8,\n \"total_tokens\": 8\n }\n}\n" + headers: + CF-RAY: + - 936f9336cc417e0a-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:49 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=EO3qaPuy2laM3xDGRwHtVhJMUVrBq0C4x5BxYYC8dT0-1745770069-1.0.1.1-kOylsOMvWlUF5owqqiIUziYDoC1f8vVA4C7C9em_s1Gdawqe_C0R5yIfCxJzf9.q9LZJQyCGp8L2rJaFzDF0Nk2pkv2v.tT.uQTRlmCgzwY; + path=/; expires=Sun, 27-Apr-25 16:37:49 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=52fi4.4bJilzZrvgAS3YttTnBjtEe8pVmM0VbBM5jis-1745770069782-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '39' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-97cfd68d4-nw6rt + x-envoy-upstream-service-time: + - '28' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999989' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_f9ca57dbb69b376529e9c874f44dba39 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"input": ["test file"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '88' + content-type: + - application/json + cookie: + - __cf_bm=EO3qaPuy2laM3xDGRwHtVhJMUVrBq0C4x5BxYYC8dT0-1745770069-1.0.1.1-kOylsOMvWlUF5owqqiIUziYDoC1f8vVA4C7C9em_s1Gdawqe_C0R5yIfCxJzf9.q9LZJQyCGp8L2rJaFzDF0Nk2pkv2v.tT.uQTRlmCgzwY; + _cfuvid=52fi4.4bJilzZrvgAS3YttTnBjtEe8pVmM0VbBM5jis-1745770069782-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"MriavBo/HbyzL4C8J0aGvA0LObyTrcU8NvT2vLBR6ryxjEi8dTacvMRTrTwGPdo6DkYXOwUCfLonc4G7WAsGPG+VODyUyQu8k9rAPHzJnLxQECy8+yQ8PDnSjLwOoI07kp9ivEp9q7zbg2k8lMkLvdYMr7v6vGK7iMKiPAels7w3qOO8UC/EvCGzBbxzOe66GTE6O3CEA70Fie08b3YgvEPOZDzc60K7+TVxPKEciLxcKMq8Mz+MuRgjVzz0Csq8mcc3vDYh8jxRS4o7q1M+un+MarzdJiG8jtyUu9BMMzyxq+C71oU9vACq2TsOc5I7Xb0evYjCortCR3O8kzQ3PNEAoDuOKKi8KijsO8lRWTxPqFI7GCPXu6WTwjwdTn48pZPCuxq4KzzJnWw8jaG2ux4C67zQ8ry75wNhPGqXDLvV8Gg84Y94vPli7LuTNLc7V9CnPLITujsRKP07vErwPBQGk7vLQKQ7Q1VWPONAkzwr3Ng8egfNO2N9GrwtBgI7XUSQPMM35zyIo4q8T07cvH+M6rsI4JG8oIezu9DyPLwb8wm9ZNeQO4yT0ztnjPu7LdmGOwWJ7TseAmu81ZZyPHaQkry/zo+8yOn/O7cfSTx2F4Q8iByZvCuvXbxzDHO79iYQPGOqlTwee/k8j5ABvTKqNzwZBD88e5whvG6H1bwxQl47lLsovBMXSD048aQ8bi1fPLmVBTxPe1e8E0RDPMmd7Lz72Kg8OGqzvMjpf7yxuUM8i1j1PAhng7pYC4a83tqNvNdmJTy4ALE8JVe7Oj0cTDztPGu8mi8RPKeviLxnjPs8B9Iuu+85Gbv5NXE8jXS7O+mmGLs5pZG8DEnpu3wjkzyzAoU8EpBWPOpaBb2UFR88IeAAPYcONjxvoxs8mtUavHtCq7wlSVg6RAnDPBsgBb2kd/w7WAuGu+7DXDw31V66bdNoPJJy5zsAXka8sPfzvJ/TRjzKjDc9cznuPNh0iLyw93M7wxhPPPk1cbxJQs27DVdMvEWeFz2CXJ064Y94PBKCc7wBPy6/sYxIvPZTi7pjqhW8reiSPAE/Lj1YCwY9jMBOO0Mo27xP9OW6sea+uzZ76DyZxze84+acvFCJOjvFraO8gBNcPK1CibxiFUG88GYUPVAvRDzFYZA8SvY5u2qXDDzt4nS71gwvPO08a7vJney8dO3aPO7RP7q5tJ28GuWmuw2Ex7se1e+7V/0iPfAagTtnbWO84UNlvEkV0jzttXk8LEQyvD07ZLogHrE8GYuwOw1XTDqYX167Ks51PNQ8fLsfEE66lMkLPIijCjxRxBi8emHDu2m2JLyOVSO8z11ovH/Y/TwNCzm8e7s5PLnCgDwOGRy8BYltuQVccjyHWkm80S2bvEk0ajxqapG8pZPCPA6/pbvwGoG8Vu+/PHYXBLyHaKy80PI8vIDm4Dzq0xM97Yj+O5RCmrz0ZEC73jQEuypV57qNzjE6Z4z7vHtvprt7b6Y8ULa1u1dXmbxbR+K7ytjKvKXAvTt2FwQ9jq+ZPLnCgLzXZqU4YrtKPJn0srtWlUm83Z8vPD9lDT2TrUW8Ks71O28cqjsIhpu8n0xVu0S9r7samRM8SY7gO8+3Xjwsca08jjaLOmIVQbzpeZ28h1pJu0LtfLwUUqa8Y8ktvXz2FzyScuc77zmZO/wyH7zRLRs9oA6lO1uT9TxbR2I8dEfRO24tXzwGEF877Q/wvFYcO7yaAha81itHuj3C1TsqzvU8ghAKvWqXDDy5DpS8EoLzPMMYz7vq05M82HSIvGOqlbwlV7u7yyEMu4jvHTwILKU7je3JvDFv2bxibzc81Gn3uojQBTxENr68pHf8u4ZM5ryScmc7/F+avCoobLzwwAo6IYYKvbhaJ7uaTqm859bluj3vUDz0KWK8NwLaO3AqjbwrvcC5JmWevIK2EzyIdo+6JmUevdBrS7qw9/O7J3OBvOg+vzwGl1A8UFy/u7zDfrxpLzM7mW1BPJUjgrzFYRC8iEmUPB57+bs5pZE8hh/rOrHY2zx6rda7vu0nOqtyVrz8Mp88bxwqvNV3WjxkMYe8qr5pujOZArsIZ4M8j5CBu8nKZzv6Q9Q8hgDTOwx25Dz2zJk7c5NkO2rxgrvjXys8V6MsvXqt1jtaZnq84iTNO3BXiDwxnNQ7293fvEvXIb2BezU8DuwgPHZjlzyL/v66JdDJO7D3c7xC7fw7pigXO595ULvoMNy64GL9u6evCLoT+C887ZZhPLimOj10wN88lMmLOXtCK7xzZmk8Tm30O+85GbvFrSM9ZAQMvCaENjw+/bO8SY7gPAWJbTzXkyA7AvMaPDeo4zzjQJO80dMkO+Gd2zuUnJA877KnPEwSgLzeB4k83fklvILjjjxb7Wu8amqRPPzmCz2khV+87sNcvFHxEzwrNs88nh/aPIHVqzyCiZg8XnGLu+8MHroMo188yX5UvBoSorlLuIk8jAxivCr7cLxYCwa8f19vuytjSjyYBWi6MVDBPFyvOzxY3oo82HQIPW92oDxXV5m6m1yMvOGP+Lwlo048m7aCuu/+ujqxX027w2TivHM5bjwBi0E8V4SUPHR0zLsdTn67Qhp4PF2Qo7yymqs71+2WPN2fLzx1gq+7sJ19PB62V7xRPac80GtLvENV1rxw0Ja8oA6lPGrxgrzvOZm87bV5vOotijx62lE7ps4gPSfsj7pQAkm8Z+ZxPA04NDp/X288YyOkvIjCortaZvo8aYkpPFYcO7wUJau87h3TvLnhGDzdU5y6Jr8UPXAqjTy+DEA8Ks51vMRTLbzXZqW8DhmcvB6odDwIOgi5vma2O4e0v7zXOao8rIC5O2/CMzwTREM8H+NSPAhZILy/VYG77bX5u/oWWTpc3La7+hZZPHyqhDw5S5s8lLsovJQVHzz5rn887wyePPt+Mrob84m8jGbYPDb0djyyQLU86cWwPNxyND3UaXc8RcuSPGQxBzzJflS8sm2wPKZ0qrusjhy8Mriau3z2F7y8SvA7PiovPFEejzxENj48nh/avIJcHTzLxxU7cFcIvLHmPjq3TMQ8LVKVPLgtrLyTNLe7HgLru7MvAL3XGpK8Q87kvNLhhztLqia8rLsXvPaABr0mvxS96aaYvKDCkbzqWgU6gagwOyBLLLybtgK9XnELvGTXkDwhWY+7V1eZOr7ArLsg/5i7GarIPCGGCrwZMbq8AH1eOjhqs7kaEiK80MXBPNwYvjwSr+67jGbYO+Bi/bvkbQ4712alPCVJWDvDkd28UALJPA0LObxEkLQ6lJwQPJkTS7yzL4A83Bi+uB8QTrygDqU774WsvC1SFTx89hc7Hqj0O2ghUDxpL7O8SiM1vAbEyzyYjFm8q3JWO+O5IbxzDHM8mH72O6A7ILyIdg89V9AnvJ8AQrxq8YI6/OYLvZOOrTs2Tu06e0IrPAiGmzyyIR28M5mCvFWH5ruy9CG8rK00vJLM3TvE+ba87Q9wvNbfs7yG09e8FNkXvB57eTxjyS087TxrvMttn7xL16E7VpVJvMoFRrzt4nS81XfavNh0CLzuw9w6yZ3svN3MKjyzL4A7Vnaxu4GoML0VjYS8yuatuvtRN73DkV28pP7tO10XFTz1Rag8nh/aPC0Ggrv8QAI8bdNoOk4T/rs+hCU8nwDCu+g+P7yU6KO8qybDOksEHTzpeZ08fKoEPU97V7g2Tm284GJ9PLDK+Drh6W67nsVju9XwaLwYb2q64vfRO+fWZbxwKg08cISDvI0axbsCTRE9+rziu4ECJzyfpku5gdWrPKUM0bzwGgE8yl+8vMNk4rsYb+o6AKpZPKWTwryybbC8fFCOPHXcJTviUcg82wpbvNDyPDvj5pw57tG/PA5zkryUbxU7Jf1Eu+l5nTuhHAi7COCRvDgeIDtXsY85EtxpPHbqiDvgYn28B0s9u3xQDrwrkEW5CLMWO1ZJtrsf8TU9Ya1nPMW7Bj0gLBQ9Griru2e5drw+dkK6OBA9u3x9ibzF2p48qybDPLMChbzccrS8v0eePJ8tvTysrTQ8gdUrvGnjn7sYb+o8dr2NPFE9p7zEcsU6etpRvfxfGjuCEAq8mgIWvAG4vLx62tG7JmWevKVmxzynrwi9Hi/mPEmOYDw+/bO8ZNeQO/kIdrzUPHy80bQMPOeparx0wN88y8cVu9AfOLyIdg88Ak0RvPt+srwCeow61+2WN3qA2zzud0m9aRCbvEJ07jsVYIk89N1OO2OqlTsOoI28AnqMvMhw8bnQxcE7mZo8PA04NDqmRy88qr5pvFU7U7xutFC8P96bvNuw5Ls/vwO7UZcdvEk0aryl7Tg7H5c/PFejrDtdkCM8iyv6vOmmmDy5aAo9OB6gvFyvuzve2g08uACxO0JHc7wHeDg8VmjOu1HEmLygh7M86tMTvbc+YbwC8xq9vu0nvBic5TzvWLG7VnaxuxKv7rsZMbo7ThP+Oo6CHjxq8YI2joKeO/atgbwHSz26cP2RO3sVMLthNFm77h3TOuep6jvFBxo7WDgBvdQ8fLw2e+g7LCWauquf0bsgHjE7Er3RvO+yp7z0Vl285wNhPNwYvrlWHLu8rK00vFUOWLxeywG9H/E1PO8rtrz03U483HK0vMx7grl7nKG8PZVavGN9mjyxMlI89b62O2kvM7x1Npy8tz7hu4LjDr290eG6gmqAO/Qp4jvdrZI8DTg0vGN9GruAx8g8Z4x7uxpsmDygtC68Q6/MvLeY17s9wlU8Hi9mO3WvqrsFXPK8CCwlPO/+ujvkmok7jAxiPOHpbjx/jGo6jXQ7vPYmELwbIIU8uHm/uxl9Tby5woC8k1NPvAAxS7wRKH08zz7QvOrTEzm90eG8IKUiOzb0drxRSwo7n1o4vSVXO7zJney7b6Mbvb7ArDzgYv27BQL8OfVFqDxWaE48+dv6u7nCgLvRAKA8CLOWvD0cTLwgHrG67Q/wvO8MnrxnbWO6pnSqPPsFpLy3xdK7bxyqvB7Vb7zK2Eo8UZedOxNxvjw4xCm81R3kvBoSIrrn1uU7s9WJPGlcrrsOv6U8DNBavJScED3vK7Y87eJ0u1FLirsamZO4vbJJPOmmmLziq748+kNUPvRWXTzpTCK8aQI4PR7V77v8jBW8cFcIPGk9Frit6JK77qTEPDHJzzwT+K88dHRMO44oqDogpaK7RAlDPAf/Kb2IHJm8jUdAvMNFyrx6rVY87/66vLFfzbvQTDO78O0FPcW7BrwzEhG8s9WJvBKC8zx8yRy56Gu6vLPVibw9aN87gG1SPGReAr04ajM43EW5O/SDWDwhswU9iKOKuis2Tzz5CPa8LHGtO2m2pLxPe1c8SRXSPO2W4Ts+0Li84RbquwfxxjwlKkC8aVwuu8NFSjyTrcW5T3vXO4YtTjt0wN883HI0vKeCDTvqWoW8+TXxu/vYqDy88Pm8zHsCPR9qxLw2Tm07IVmPvKoY4LvIcPE7v3QZvHx9iTy5lQW8lLsoOpjY7Dt1r6q8ZASMvBVgCT0T+C88b5W4PGpqkTzQTDO8ZxNtOwLUAjyMhfC8XILAvLD387xXsY+73OvCO88RVbx/BXm6LVIVvdAfuLw5LIO8RBemvHvotLvhcGA89UWovF1EkDyYMmM8xCYyPKtTvrwBP647wzdnPNcaEjuCiZi7uIciu2dtYzun3IO7RXGcu9BrS7yzAoU89q0BvfwynztVh2a8Qu18PD8Llzxp4x+04zKwvDhqMzw2x/u7DkaXPIyya7qMwM676Gu6O59MVTmzAgW89iaQvLgtLLvUPHw8/F8avUwSALxzOW65ps4gPT6jPTzcRTm79INYvOqHADsgeCc7rRWOvFzcNji4eb88/DIfvCr7cLxRPSc8yfdiPDOZAruzAgU9XRcVOtEtm7xLi4669RitvCBLrLwMKlG8duoIPL1YUz17byY7w0XKvLN7E73Q8jw8XNy2vGeM+7wSr268DbFCPRIJZbylwD28K2PKu25oPb6rn9E8vaTmPHucoTtd6hk8xTSVO/Q3xTzkmom8mfQyPEVSBDxvwjM8EVX4u+otiryqGGA8sCTvOsshDDx7u7k7COCRvEMo2zxhrec8yhOpPD79M7ysB6s7yZ3su1dXmTsVjQS63HK0vD1o3zwa5Sa7aKhBvC2si7sMo188v84PPCQcXTz7fjI8AFDjutGmqTsYb2q8BS93OxlQ0jsr3Fg7XeoZPVyCwDppAji7sH5lPErJPjwAMcs80S0bPHyqBD3ifsO8ejTIPD5XqrxaOX+8sYxIvFuTdTwtUpU72KGDvNEAILx/MvQ7fH2JOhgjV7ysYaG8YuhFO0uLDjx/MnS8ANdUvHwjk7yCiZg8JpKZvFFLijxXhJS8SbvbvO08azzeNAS8dTacPGEHXrwC8xq9aKhBPFtHYryGLc47h4fEu+7wVz10occ7XChKPPk1cTwO7CC6ZDGHvJoCFjt1Nhy8aS8zvAhnAz2kK2m8YkI8vOoAj7wM/VU7UqUAO2e5drxnE+07sPdzvJ7FY7y938S7ThN+vO0PcLxQ1c07v84PPe9YsTzuHVM8OaURPSBLLD2U6CM8FWAJvVejrLsH/6k7vjk7PF0JMjykWOQ83cwqvLBR6rxk15C8AtSCO8hwcTxpAri7sPdzuQUCfDz2zBm7sm2wu0uLjr0tBoK81XfaPHaQkj3pphi84vfRPMshDDv7fjI9yVHZO5u2gjw+V6q7htNXvI2htrymoaW8avECu+gRxDvKXzy8pKT3u/sFpLxJFdI8cP2RvNzrQrxwKo08dM7CvB1OfrxuaL07JSrAvPmu/zz1vjY8Mqq3vBNEQzkUBpO8bmi9PICazbx8IxO8iNAFO91THL2MZti84RbqPA/6g7ykpHc8piiXPLLHprt7Qqu8bmi9O9dHjbw3tsa51itHPCaxMbwmZZ68GdfDOkJH8zqbXAy80B+4ukk0ajw5/4e7BQL8PC1SlTx/BXm8AH3evFHxk7wg/xg74xOYvGfm8TwHpbO7H5c/u17LgbwlV7u7fCOTPIDHSDuIHJk51ivHPAz9VbxRaiK7E/ivvFt0XTvWK0e9fH0JvRQzjjxpXC683a2SvNG0jLxKfau8ULY1OsO+2Dy9WFO4ddylu11jKLuMhXA8CDqIvCcZizoxnNQ8hkxmPKYatLy/KAa9aT2WPACq2TvRpik8Z4z7u2e5djy+GqO81Dz8vAJ6jL1E3Mc8RUQhO+hd17sfakQ70MXBPIdayTtVDli6GyAFvIH0QzxMEoC83HI0O+otCr3qAA+8YdpivA3ePbygwhE92KEDPW4ORzyGTOY7xa2jPHu7ubxpArg7BYntO1vta7wf8bU81ivHu61CCT08Dmm8ARKzvJp7pLlw/RG9K+o7vNLhhzz0Cko7ycpnvCB4p7vQHzg8CA0NPHZjF7vW/ku8RZ4XvZ95UDtEF6a8FDMOvNvdXzyCtpO8buHLu/nbejwSY1u7DCrROyX9xDtq8YK8kp9ivORtjjqngo28ps6gPHa9jbweidw7MZxUvHUJoTwORpc7Vkm2PBmqyDzYdAi8CA2NPIhJFDtOQHm8418rPB6o9LzVd9q8rIA5vDjEKTwldtM8YdriPIKJGDwGatW8avGCPCoobLvWWMI8H2rEPLHY2zwHHkK9RfiNPPWfnjy4ALE8ucKAuzH2yjrXRw26RGO5OEu4Cb2CL6I7S+WEO+SaCbugh7O8ejRIPC0Ggjt0dEw8lOijPLjTtTz0g1g8abaku43OsTsrY8q8vdHhuwFsKbzIQ/a8lG8VveLYubpJFdI8s04YPNQ8fLsOcxK8LBe3PIK2k7weqPQ7CA0NvBlQ0rstBgK9da+qPPpwTzxFUoQ8Yo7PPAIgFryfAMI8ZAQMO5gy47v7q627y8cVPI42Czz1RSi8gi8iO5L5WLnu0T+8+9govIHVK7vpH6e5Xb0ePCXQSbz1n549RXGcPMjp/7tpXK470VoWPD/eGzya1Ro86Zi1PAceQrynVZK8v3SZPDnSjLutQgm8c2ZpvIyy67wHSz08b3YgvKEciDz8Mp+7ROqqPBmLsDt6gFs7ExfIPN2tkjw5eJY6sMp4Oh57+Tu8HfU6v1WBu0OvzLzVHWQ7Wjl/POOMprvc68K8w+vTPMl+VLwYI9e6ucIAveSaCTxjnDK4iNCFPIFOOjzFrSO9yyGMvEu4ibtWlUm7Ks71vL+hFDxnjPu7\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 2,\n \"total_tokens\": 2\n }\n}\n" + headers: + CF-RAY: + - 936f93388d697e0a-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:50 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '132' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-75c844b786-xxzqk + x-envoy-upstream-service-time: + - '61' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_5d278e154a0358a46c53ec740679883c + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/tools/cassettes/test_search_tools/test_json_search_tool.yaml b/tests/tools/cassettes/test_search_tools/test_json_search_tool.yaml new file mode 100644 index 000000000..2e509ef4a --- /dev/null +++ b/tests/tools/cassettes/test_search_tools/test_json_search_tool.yaml @@ -0,0 +1,300 @@ +interactions: +- request: + body: '{"input": ["\"test\": \"This is a test JSON file\""], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '117' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"idNbvKMk2jw8C1A8s7SLOnOLqLwZzpU8cwpbvGXq5LwcqlY7ue3avIEq0TwjELA8wsFUOmZAw7wDMAS8trs7PLZl3TwVmlu8SteCPOuihLxnWiA6HwflO6/WL7wjZg48fxKPuIiQKrzKlc47HCukvFRCcbvOcyq8lBeRPBPW3LwTq228u12WvIHUcrsvwoW87I/XOi+XlrxK1wI9h+TtOl2XODyDb5271vFFO39m0ry097y7MvQkvK/WL7uB1g29LOQpvLZlXTxFNSg8djz6PFZczryVhTG7YmQCvIQILbwOimC8h+RtPARztTyVL1O7a7euuoNErrs2p5G84DPgO33NQjy/EAO8xAahuzkEoDdDmn28ZP0RvKpfxLs2pxE8f+cfu2I5kzyv1q88zwy6O7sHuLyj0Ba8FnEHPDMfFDoTq208Tl8AvU93wrzY4LM8+x1uOi/ChbmKqoe8BB3XPIYiirylE8i8R6NIu+YrmTwN8dA850NbOqas17sCWdi5dfnIPFGRHz3PYH28zh3MvLnt2rvN8lw8hnZNvItsa7y7B7i7kGQkO3jXJD27B7g75iuZPCCIsjt/vDC8QgMJPYRccLycFN+8pWkmvOyPVzz4bBw8+OvOvMKYgLxVGR08a+IdPJvRrTweGPc8Z9nSvLFE0Lq2kMw8bmqbuy8WSb0XXtq8p1gUvIn+Sj1RuvM8KksaPF0YBjxDcam7/o0pPYfk7bxiDqQ8nOnvvOv2RzrYilU88ocQPffTjLtnrmM8IfZSO7uIBTx4rLU7eNckuwHASDxKgaS6ECNwu7YRGjvAKEU8tuaqux5u1bz6MBs9/uMHPYqqBzu4qim7DRxAPOy6RjwgCQA9Er4avK0SsbzwQsQ8FwoXPRI9TT2B1g28KnYJvKykkLzuU1a8K47LPAa25rw1pfY88JgivOS73Tw5rsE8/TdLPJkNrzuspJA8+EGtvJWFsTznbso802nIPNadArw4QKG8YuO0O6BIGbztOxQ9evEBPISH3zxRO8E8sW8/vByq1jvTvya/t/5svLJeLTz9DNy8o/nqPFXuLT0xBbc7Hm5VO3Gcurx9TPU7T80gvBn5BD3xsGQ8ch0IvSM7H7zw7oA7Zy+xPNiK1bxWCAu9Hhh3PB8y1Doa5tc8wsFUvDtyQDuNsTc7YmQCO8zY/7sg3hC7SteCPBXwuTuyiRy9F4nJuy8WSTyNW9k7C9kOPaeDAzzFHuO8KFwsOx4Y9zzAfiM9eu9muuftfLutZvS6qgsBPOLOirwakPk7tmXdPOZWCDsSkys8nthdPFYIizySp1W8kigjPXxforslftA7DZvyvOshtzy2EZo7FwqXPEorxjz/0Nq7s7SLPKq1IjwbZyU77BClvGQogTufcW28jbG3PASeJLyVsKC8HsQzPL66JDo3lGS8VG3gvE8h5Dxsz/A8/3yXPJzAm7sv69m70fsnPKoLgbsVRpi82voQvFOp4bw5g1I8Jak/ukA9b7zAfqO7HCukOyCzoTtb07k8qt52u3O2l7tdlzg6r6vAPPKHkDtKrBM8IaB0PEDpKztJEwS7tpBMO4fk7TyfRv47zANvPG5qm7uX81E8i0MXOx5DZjzz9bA8zC7eu0KCu7yvLI68JX7QvE6zw7zMBYq8W6hKvTlY4zsZItm7Zb/1PIGrHrzfRo27X1u3O4GALzuer4k7rywOOY+gpTtJkrY8HkPmu5UE5LxJvSU7vaDHu5/HSzynWBQ9NDfWvMXzc7pTgI27l/PRPLDu8TtOXwA9lVrCvHX5yLv/0Fq8PWGuu6q1IryEXgu7HkNmvNhhAb3ClmW7U6lhuqbXxjrWm2e81nKTu01dZbyNsTc56qDpvC+XlrxnhY870VEGvf+l6zu5mZe8uACIu16vejwPYYy74qMbvMFrdrzgCPG5Z67jucl9jDzJUh28LDqIvKo01bwNm/I7BJ6ku3xfIrxHzrc8mGHyPKdYlLzF9Y67sgjPPO7997v1D468sW+/PP+l67vCmIA8Gjy2PAHASDyXdB+5rRIxPFihmrxz3+s7ojeHvHFxyzyBgK87VRmdPItuBrwrOG08Zb91vGo2YTv6Wwo7WpAIPKOlpzyV2XQ8vrqkupUE5Lxzi6i7cAMrvSiyirpHzre8klH3OrKJnLx1T6c8KvU7vVuoyrzRUYY8qHBWPNMVhTlr4p28kqdVPOft/LviTT25UPgPPOft/LpuPyy8oeEou6XoWLw2fCI9ptdGOqeDAz1PzSA8JX7Qu7Du8Tw0uCM8RJwYPHZnaTz2/OA8Bo2Su/96fDz4lXC8LVLKPAID+jtAP4o87OU1u7TMzTyBKtG8hFxwPEdPBTxfW7c8U1WePLR4CjxTqWE8MQW3u7aQzDrkZ5q8hDMcPHqbozwSPU076nX6vFplGTvrogQ9ojcHPTJIaDxAFBu8iDrMPAvZjjwcgQI9OGuQvIe5/rzKlc677WaDu6POe7xnWiC8jYbIu4gPXbx6GtY4C1jBPI7JeTyoR4I8IfbSPF7a6Tzr9se7Tl8AvO5+Rbxz4YY7NtIAvMlSnTv4bJy7T3dCu9ZHpDug8jq7RJyYu4lUqTttppw7u1t7PMPbsbwLrh88IxAwPZ4DTTs3lGS8hAitO9n4dbwjELA8GfkEPBKTKzyxxZ272U7UPHY8ertIZ0e8vcs2PDvInjuV2XS8GrtoPLYRmjv8SF28tMzNu5zAmzw7csA6vuP4OWI3+Lrqdfo8DuC+u/P1sLzKwL28/6eGvMyvqzuIu5k71p0CPZWFsTxH+aa7zsmIu2lH8zlTgA29h7l+uyrKTDvzIKC82xJTvKBIGbt4rLU70Xpau1G6czx/ZlK8+jAbObf+bLzx29M7CRP1vDlY4ztrDQ28SROEOwGV2Txs+t88HkPmvFXuLTzsEKW5EHlOOyzkqbzBa3a8Wg+7PGuMvzwoXCw8zAWKuQoVED0XNQY5E4B+Oy+XljwQI/C74LStPJIoI7yseSG6MnNXu4tDlzvYYQE8Zb/1u9/wrjyc6e88dXoWvAMwBLyoR4K8nOuKvBOAfrzM2ho9TJsBuw1HrzrA1AE8AgP6uCM7n7wqdom8Oa7BunyKkbwJE/W8lGvUvINELr2fce28xw3RvIHWDb1bqEq8T3fCOwilVLz2/OC81p0CvHWlhTzM2P878dvTOj23DLyKqge8ZRVUPLf+bLxJkja8csepu//7ybrKwD27X7GVPGJi5zzJfQy8+EEtPAJZ2DwBwEi8cscpPErXgjzTlLe7FzWGPNhhAbxzCls6U1UePIuXWruI5gg8ajbhO040EbzWm2c805Q3vEzvxDxPd8I7in+YuqdYlDy9TIS84k29Okxwkrs2pxG8faLTu6P5arm30/07yuusu6WUFbx1T6c8rWb0vBmjpryqijO8fk4QvQOvNjyfx0s8jbG3PPxI3TuehJq7yhacvIZ2zbxM70Q8qEeCvJ5Zqztg9Ea82iWAvC2oqLxs+t+8Gc6VuqWUFTxe2um7o857O1plGTy3/uw4f2bSu8CpEryUlkO88O4AvTEFN7zkZ5o82fj1u10YhjxhSiU8nBRfO334Mb3mKxm8if7Ku+1mA71fMMi8F17aPMrrrDsF9II8sO7xPNEmF7lpSQ68TojUO7AZ4btPojG7iA9dOp+cXLyxRFC8Cb8xvB8y1DtWMV+6ysC9PMWfMLuVBOS8EE7fO0IDCbx83tQ7/6Vru3rGEr1Fiwa8/9DaPKYCtrwtUkq76d6FvE/NoLwCLmk81nB4vOyP1zzuKoI7CRP1PJyVrLxWXM68R01qvG2mHLzVrpQ5Q8XsO6W/BLrOyYi8X7GVPDJIaLtkUdU8VRmdu8wFijyb0a07oyTaPIqqB70XM+s8rHmhvEfON7yvVWI8UbrzvLluqDrizoq84LQtO92CjjxwV+67faLTO3X5yLtgHzY8cFfuO0qBJDvwF1U9X1u3uv96/DkvlxY9bFC+PF6v+rzwmCK6g5qMu5QXkbw9tww9JCjyO0WLhrvCQiK8E4D+PM3yXDuVBGS725OgvGF1FLsx2kc8QgMJPKAdqrwnw5w7quARvfjA3ztbKRi8pgK2ux5DZrymLSU87BAlveKjGz2XSTC8CCYiPRqQeTxDmv28djz6O0xFI7xt0Qu94ngsPJhhcjzsEKU8hiIKvC3+Brv6MBs8Xq96O6BziLzMBYo8o/lqO00y9jzu1KO8mntPu3IdCDqNhkg8u1t7vNtosTwmmC28Lf4GvRLpibweRQG70xUFPXM1yjiLGKg6CZRCvPd9rjruU9a6z+HKvJJ85jvgCHG7YaADuse5Dby7iAW8meI/u1tS7LsvbKc7tKFevJfI4jzHuY08Wg+7vJRCALyID928p4MDu2bBEL3OyQi7hF4Lu8DUgbxbfds40uj6vG4UvbxJ6BS9j6AlPC3+Bj0S6Ym880uPPMzaGjwXCpe8NI00vEckljzOSLu8/WK6PP+l67w78w27ltsPPVqQiLwzHxS9c2A5PJQXkTtpHp88CCaivLO0C7zyMbI8poFoPEJXTDzCQqK7VggLvKEMGLyEXHC8CNBDPDzg4Dvm1bq8z+HKvJKn1Ttrt666hN09PPGFdbybJ4y7bpUKvXWlhbwrOG28aUmOuzLJtTwoh5s8fLUAPKgck7z1OOK8u12WO4iQKrza+pC8aR6fvJd0HzxFtNo8faLTu0zvxLvDWuQ8a4y/PN8bnjxhoIO7iqoHvclSnbyhDBi8u9zIPP03yzt+TpC866KEvEdPhToBQRa7Q/BbPA8LLjz1OOK7+GycvA82nbtsUL48VrIsvLtb+7uLwkk7J+4LvZ6vCTvDWuQ5VG3gO1YxXzyE3b28ARanO2daILz/Jrk71dkDvUC+vLtyx6m8cAOrvJTBsjy5Q7m8klF3O+kHWjzzIKA8qt72OncTpjv1Y9E8xAahvCtj3Lqcaj084DNgvIi7mbxH+aa7cK1MPPjAX7z+jSm8tE2bvLO0C7zEBqG8oyRau8CpEj2lE0i9pZSVvIe5frpUQnE7t/7sPCXUrry+jzW8PLVxvJzAGzyS/bM8aw0NvPBtM7xRvI46myeMPITdPbwySOg7bFA+Pl8F2Ts+pN88qgsBPTudr7wtfbm6AZXZu85zqryW2w+7iLsZPbjVGLt6RcU85lYIvCaYLTuyCE+8j8uUO+YAKr0Nm3K8aw0NvZL9M72QZCQ6eS0DvY91Njwl/x28Ai5pPAMwhLzOyYi7UbrzO27pzTzufsU8FUYYu9p5w7wI0EO74F5PPF6verwO4L687LpGPOfvlzyl6Ng8/6cGu1sn/Txz4Qa9RQo5PFYIi7yVWkI8XtppPK1oDzyIZTu7a+IdPGTSojwynka8kiijvGsNDT3tZgM8oyTaPEAUm7wt/gY968tYuzudL7w0YkU8r1VivCX/HTwB6ze7YmQCPaWUFb27sVk8tPe8vJcewbzCF7M8yussPK+rQDwl1K67HwflO4706LtOXwC8/riYvKI3Bz34wF+7J+6LPK0SsTxR5WK8jvToOxQBzDs7R9G8ekXFvIg6TL3ieCw8in+YvCH2UrxuP6y7xXTBvI70aLwSaLy7/PSZvFuoSrx2Z2k8thGavFoPO7s7csA8lbAgPHBX7ryyiZw83aviPINELjzTPtm7w7DCu4MZP7yj+Wo7wH6juiqf3TqmgWg8AcBIvI70aLzI0U+8u4ZqvE93wjvVrpS8pb8EvORnmrpdlzi8o/lqPEckFrqgcwi9XzBIO60SsbsbZyU6DZ2NvP+nBrxRuvM8Ai7pvMpBizu30328evGBPLFEULwJ6iA8Yjf4OoReCzyNsTc8qMa0vJmMYTvKwL08IaB0u77lkzzWnYK8VELxOkejyLzI0c88hAgtu/vy/rtdl7i8cZw6vV6verw+z868zAWKvL7lkzwy9CS7yuusO2TSIr3RUYY8lYWxvJSWQ72e2F27+JXwPGD0xrzKat+8YMlXvAY3NL7bPcI8wCjFPL7lk7yyiZw6U9TQO+qg6TyGIoq85tU6u+Q8qzw3lOQ8bHutvPIxsrznGge8jVtZvPKHEDyB1HI8UyovOnIdCDt3aYQ8bpUKPDZ8Irz7HW48g0SuvHBZiTuQZKS4fUx1vBWa2zyQZCS8CwJjvF8FWTtzCts7//tJPC9BODwaPLY8FzUGvJRCADxOs0O8QD8KvJXZdDyOSse8ChUQPc83KbsUgpm7StVnPEqBpDznxKg8rWiPu1EQUjw0YsW8HhoSu1E7Qb3A/dW7Yg6kOo2xtzxxRty7VEJxvF3CJzy99qW7nq8JvQRztbzgCPG8l58OPN0BQbyvAZ+7pb8EOlYIC7wvl5Y6Msm1vJp7TzyyiZy8lOyhO/iV8Dung4M7ECWLvFaHPbxCgru8xXTBPBStCDxONBE9Vt2bvIuX2jwaPDa8zzcpPJnivzt8s2U8AwUVvAZiozy0du87IaIPPJJ+AT2iNwc8rRKxu/0MXLw5rkE7SGdHOxwrpLsxMCY98yCgvFRtYLx2klg5fN5UvEmSNryc6W+7evEBPWx7rTw4axC8nBTfPEdPBT0xW5U8IAkAvXoa1jxz4YY80xWFO4ahPDx1Tyc86V24u3ktA72q4JE8VG3gu6U+tzz1DfO76yE3vGkeHzs1pXY8LVJKPCMQsL1rt668kGSku7HFHT3kkgm7faLTPGFKJTxHo0g9kOPWO4kpOjuwGeG8T6IxO69V4rwyyTU6+OtOvOfvFzvdgHO8I2aOuuYAKr0ESMY8hAitvPb8YLzKFpw6OQSgvGzPcDyj+eo8gxm/vP/7yTxCrao7LdOXvGxQPjy3/uw7MTCmPO1mA715LYO8u1v7O/hBrbzio5u7274PPU/NILs7na86Rd/JPHxfIruJKbq7p1gUPUXfybv/fJe8IaD0PL7lE7w5LXS8byx/uxyBgjt1ztm74s4KPB8y1DyfRv4696gdPVj13TxRkZ+8dSQ4u7aQzLw0YkU8jdymvI2xNz0LAuM7lbCgPIZ2zbxdl7i8vrqkPMqVTjsLAuM7yussPKre9rvmVoi5ZwTCu1MqLzyLQfy8vuUTvJL9MzwZTci82IpVu1FmMDtWXM68iDpMu17a6TxdbMk8Nnyiu9XZg7yehJo7G5IUvXY8ejt5LQM90VGGu8VJ0rxKgSS96qBpO766pDwyyTW7Q0Y6vHiBxjnYitW8cK3MvAm/Mb1sz/A8bulNPOJNvbwhog+6tuYqPEpWtbzFdMG78yAgvEA/Cjy25iq70xNqPNLo+rzbPcK8Hu+iOgnqoLufnNw83xuePByq1jsx2ke8vHVYPByBgrw0uKM8baacuPMgILyoxrQ8W1JsvBSCGT2mVvm8Aeu3vLuG6juUlkO8Tl+AvNMVBTxp86+6cXFLPGzP8DvYYYE8XcKnO6v407ojOx+9HFYTvYFVQLw04xI6eIFGvRZxhzzHjh69TrNDvILD4DxC2Jk5qrWiPL3LNjzOcyq9Kp/dvD97i7u2Zd27Mh35u1MqrzmcP867r4BROt/FPz1FtFq82+fjPIZ2TbozSgO8achAPOWqS7wLWMG7i26GPORlf7z6BSy8QldMvN0BwTzTPlk8QD3vPHhW1ztOCSK8zh1MPPhsHDs2UTM9Vt0bPS0nWzsNnQ290uh6POy6Rjy+OVc858SovJdJsLrazyE8hnbNO8wFCryNhkg8lGtUvBu9gzxHTWo6st3fvIMZv7tK1ee7qyPDPPqvzTw+Ja06/6cGu28s/zqeLjy8a+KdvMAoxbzDWmS9dpLYvDudLzy+Dmi6g5qMvD4lLbxFi4a89Q1zPMwuXrwXCpe6tHZvu5pQYDyEXHC8cIJdO+AI8Trk5ky7vrokPUA9bzuqX8Q8zANvO6YCNrtn2dK8ypXOO0qBpDuxxZ084QqMPB7vorxHTeq7DXKevCWpv7s0uKO8wpiAO4g6zLu4AIg9Jf8dPGcvMb3mACq8a7cuPcpqXzx5LYO6SRMEPfjAX7xKrBO8NtDlPHMK27yw7nG7Bov3vIiQqrzMWU07zANvvH4jITyCw+C70xWFPLkYSjo5rkE9uNUYPB8HZTtFtNq6if5KOxDPrDwO4D684F7Pu5+c3DvVrpQ8TgmiO7aQzLzTlLe8kiijuW3RC7nKQQs77TsUvYQIrTsbZyW82DYSPKeDgzxKrBO9E6vtvHdpBLx1JLi6L8BqO/oFrLu9TAS9\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 11,\n \"total_tokens\": 11\n }\n}\n" + headers: + CF-RAY: + - 936f93430d8e7df5-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:51 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=ZWmwH7qy_Do2gMLxa1sSsrEo.85HbT2vglvD1Dwg1Zs-1745770071-1.0.1.1-WFNWy52G66A4oGmHOWFAlhnFBFbZJ31LnUNvi7bwKg2R2anwH7wnxAc.zA9GMIYExcRah3uIl5KRt723DyGt5EZ60XcQksxVd2co80t2i.g; + path=/; expires=Sun, 27-Apr-25 16:37:51 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=u7YNKY8LlLPo_cstP53bpHP1eV7pP._t2QByCJYNkyk-1745770071796-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '93' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-5f4895bd76-796jv + x-envoy-upstream-service-time: + - '61' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999991' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_69bfa1db5b89ca60293896c5f37d0d8f + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:50.287520+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:51.445161+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '453' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:52 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '44' + status: + code: 200 + message: OK +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:51.347055+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}, {"properties": {"class": + "App", "version": "0.1.126", "language": "python", "pid": 35168, "$lib": "posthog-python", + "$lib_version": "3.9.3", "$geoip_disable": true, "data_type": "json", "word_count": + 7, "chunks_count": 1}, "timestamp": "2025-04-27T16:07:51.676881+00:00", "context": + {}, "distinct_id": "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "add"}], + "historical_migration": false, "sentAt": "2025-04-27T16:07:51.852107+00:00", + "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '812' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:52 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '24' + status: + code: 200 + message: OK +- request: + body: '{"input": ["test JSON"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '88' + content-type: + - application/json + cookie: + - __cf_bm=ZWmwH7qy_Do2gMLxa1sSsrEo.85HbT2vglvD1Dwg1Zs-1745770071-1.0.1.1-WFNWy52G66A4oGmHOWFAlhnFBFbZJ31LnUNvi7bwKg2R2anwH7wnxAc.zA9GMIYExcRah3uIl5KRt723DyGt5EZ60XcQksxVd2co80t2i.g; + _cfuvid=u7YNKY8LlLPo_cstP53bpHP1eV7pP._t2QByCJYNkyk-1745770071796-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"0EYcOgYS2DtbrqU5m0ufuxBuYLyMS108PFJ6vCHKybu9GFa8cDfEvFOtdTuNTKw8JbLZuiX3/rxUaR87CW7/PPGH6jwt+K669rXuPBn6Kby3pF27SpdhPF3Fp7z5zg47PFJ6O6vsLbyc14e69FqWvIhjzbtWgCG8x3RePGnzjLyo1Ny8VwwKvENS2zsAzqA65ytivJh5wrw0PoQ8X5eEvJoFqzuzdlk82ixvOlOt9byUG/276819O343hjuhBYy8tL2cu8akHzxv8c88CFmbPMG7QLyI2QK9CW7/O/LN3juA2NI7KIWFvJSRMjzV/uq7k0s+vNIXKrrD0fO8DhOIPMYvOTyDqi+8/xH3uwEUFbxdxNi7PIMKu046TDxzT5U8ZAlfvLilLLpDDTY9+MxwO13Fp7tMJJm8vl5KuwLlIrzVdKA8izWqvPZwybtcfuQ8ZpaWu6x3xztBgc289aAKPYfX5Ls+aXy8P2rLu8C6cTx8qs48HSdfPBk/T7yWHRs7OCXFvEg8CT2l7Ru7mHlCvHOVibzGpB+7IIRVvCsnobyojzc7g+9UvG/xTzykMfK7Fm3yOrxIFzyEez281OkGPS7IbbwDcLy7+lhZvDo8Rzwwmso7xqQfvfoTNLywpHy8YjjRPED2szygBD092qKkvJZjDzxn3Ao8cDfEu8kBlrxcfuS8/xH3u+3k/zxA9eQ8/obdPIu/dDwghNW7pe2bPGYgYbyy7I48x3TevGvEGryQ7Xg71kTfPMxeDDwHE6c8ldcmOz5p/Dz/h6w8EvpIPM9EfjsQbmC78oi5u0iBLjxRl8I85hWvPKx2+LzoLLE8Sg2XPHvajzyrpjm8cU13PPvjcjy566A8j2Lfu7LsDr0LthE7aTfjPM51Dj2O10W73ouDvBbjp7zxQ5S8Fm3yPGaWlrz75ME8VfWHvPxv2zy/pY08gJOtPB3iObxRUc67dfDhvClWkzyMBek88oi5PDwN1TrRjJA7N9/Qu/nNv7yqGyA8UZbzO8UZBj1uIZE83LkmvA2HH7xdxSe/kO5HvFndFzyI2YK8PiRXPVcLOz2pkIY7otYZPAMrF71cOo669OTgultpAD2NB4e7DlfevOpDM7sNEjm7HOHqPDs9Fr1eUMG7QLEOPce60js5sF48VTlevB0nX7pPgY+7Dc2TOur+DbzR0gS906MSPTKxzDupGtG8hpHwu53sazx4CDO8fTY3PZDuR7rj/d28cU33Ol2AgjzickQ9wgIEvMi7Ibs8Uvo8ScbTO343hrxVOd485qBIPOJzk7yzu368zC38OltpALsrbMa8vNLhOyDJerxgIU87AuWivDSDKTwcnMW8BogNPIQ2GDxuq1u8agjxOuuJJ7zToxI8G8wGvVuuJTwi4Py6BUIZPO+2XLvPRc285hUvOi8PsTykMfK8XDk/vFaAIT1v8p48ajmBPBbi2LxCDGc7BLawO6ql6jtqw8u8SceivCgQH7wvD7E8awqPu26r27xqOYE78okIPOwUQTyFfAw9YCFPPLDVjDuzd6g8bSDCPIrwBDwBWgk7g2WKPCux6zx0ZUi8EvrIO7Mygzwi4Pw70dKEPCb5HLuw1Qw8fKpOu1YKbDyhBYw8eQmCvM4vmrwBnt+52y4NvDHhjbyV1ya99SskvWtOZbw3mqu7wgKEO3lOp7xFI2k8h9gzOo8eCT3XiyI8TCSZuyVttDwRtFS6txoTvZE0vLtV9Qe5VfUHvAATxrvlz7o8p0nDvJE0vDwYtDW8Dp3SPDs9FryhBYw8HJ2UvKoboDxRlvO7yQEWvKx2+Luy7A48gWNsvKpgRb30Wha8GPnaupcy/zvN6aW8IYUkvOr+Db0gyXo7+c0/vOBckbwCKkg8SDwJvdsuDTwwmkq8UMeDvGLzKzxY23k8SlK8O0tTi7zE05G7rHfHOPoTtDyjYbO72uiYvExo77s6PMe7rr4KOR5t07pPxeW7AirIPK4DsLwRtaO8rXiWPK6+CjpIPAm8c5UJPSRs5bteUEE85lpUPCIRjTxI9hS8D+PGPDDf77xuZrY7VsVGOzeaKzzZodU7A3A8PLyNPDxRUh088YdqOzYPEjxpODI8G8wGu2yU2Tw2D5I8z0T+O9os77vvcTc8x3UtvTyDCrvXitO8yxfJO65JpDsEcYs7TGk+vbtHyLwoD9A7szE0PEIMZzy4YAe8AuUiO9ZEX7w0PgS8K2zGPK/UPbyZvzY6WJcjvBub9rymM5A89SpVvE/FZTwWKM07MzzmuxWds7ue7Tq7EkA9PIR7vTypX/Y8ujGVO8IChDzbc7K87Z/aPDUN9DtqCPE7cX4HvCWy2TwP40a8KZs4PFOtdTyzdyg9AZ+uPMwtfLz7nxw8qI83vGXabLuwGrK82y4NuklR7TxzlYk6k8Cku2chsDxa8ns8UdznPOm4mTyy7I66wLrxPGp+pjwls6g8DRI5vAaHPrwRtaO8Qw4FOc2kADxQC1q8l6g0vKiOaLg7PZa7CeS0PBvMhrqmeDU86LZ7PKTszDzJRwo5XlBBvKa9Wrw+3zE8KibSunupfzwZP0+7znWOvBApOzo8DVW8s7t+u10Kzbs3JHY7FZxkPMB1TLxyk2s8RiUHPdJcz7sUVnC7unY6vDL2cbwiVrI8bqvbPM9FzTtv8c+8fPBCPJzXB7wGzbK8YjhRvHY2VrsbVlG8lh0bPDCaSjo/aku8VK5Eu66+CjyUG3077FlmvDAQgLzjuYc8OWu5u5TW17zIAEe9+lmou5g0HTtuIRE6R/XFPKiOaDyjG7+7E4YxO5ft2Tqi1pm80heqvCxtFbyrp4i8DPyFO1XE97tkxQg9KA9QvOm4GTx18OG4ie7muo6SoLzyzq07NcjOu1v0mbv85RC8U611PK14ljwf+bs8Spfhus+7Ajw+37E8jAa4uYBOCLwgyfo7KVVEPJvWODzkzms8h9gzvIwF6TwlboM8saXLO4rwBD0z98C8+ylnPEj2lDsZ+ik83f8aPC8PMTyo1Nw73ENxvDIngjwHV308uneJvHNPFby2jqq6s3covDZUt7x98ZE8LsjtuuGiBb11qu27zrozvGWVx7wnPxG8OoFsvCJV47t3fMq8Lg7ivDIngrxA9rO88HKGvClWE739cKq8ECm7uzYPErzR0bW83ENxPMlHCj2tvTs8GYXDPDmwXryzMgM8DhMIPOqI2LzCR6m8/vvDOm6rWzt2e3u86S2APAVCmTx5Tie8FVgOPKJLADy9GFa8lRzMutx0AbyENhi8ohpwum5mNrxwfTg806MSO7q8Lrzpt8q7aX1XuWfcCr2lMkE8dqwLuTCaSruikCW5sjDluzDfbzwGh768XgucO72OizydYqG7ID8wvJ95I7yMBjg8Lg7ivB/5O7yIHqg8Fp4CvR/5u7yoSpI67aApvd9a8zyw1Yw8B84BPEg8CTk7x2C8pjMQvNHRNTvLGBg8YNwpPB8+YTzgXJG8oY/WOWk4srz15S+8uesgu56olTwCKfk7N5qrO06wgbwRtSO8xulEO0NS27wE+9W723LjvPLOrbzGpB886OcLvPnODj0iEQ089J+7u0+AwLxzlQm8KBCfvB0orrzE0xG8Z9yKO+3kfzzLGJg8TrABPb8v2LxPO5s7BLawO+UUYLvWRS470IvBPCuxa7xwwl28EbUju6unCLyZv7Y6DhOIPOaf+btrCcC8Dp3SO6/UvbwSP+46urwuO0MOhby4pay5DRHqOwRxCzzeiwO8MieCvLzS4TpUaZ88H7SWO8G7wDs2DxI8Jz+RPHF+h7w8Unq8szIDvYrvNbxxfge8qEqSuyKbV7wnhDa81OkGPeO5BzztWrU85c+6OwIqSDxaI4y7wrwPPV+Wtbx1qzw88xSiuqTszLsy9vG6iWQcvC6El7pXxpU6oQWMu1PeBT18qx28lRzMOtV0oLzjQ9K6pTLBuqIa8LqjYTM94rg4PDYPkjwDcLw8AysXPFhRr7zPu4K8e2RavNy5prwInsA8vl5KPGo5gbhQx4O8wLrxPNosbzw/asu644j3u3Qf1DvJAZY7j2OuO0wjSrynSHS6H/k7vTJspzxorMm8qqVqvKka0bt7ZFq8NckdvSxtFT1XUGC8h9izPKXtGzw5bIi8MJpKPO3kf7wx4D68NQ10O6Qx8jul7Zs8SpfhvGM5oLsF/KQ8p76pO1iXo7zhooU8ke8WPCIRDT1dCf68GLS1u65I1Tt7qf86ohrwO/pY2Ts8DqS80EYcvAaHvrwvygu6Rq/RPIdNmjvtn9o7dqyLuz1TSTsYtYS8Z2ckvTAQALxO9aY7mXoRPOktgLzCAoS7NlNoPLnroDzOurM7Y3+UvIKp4DxTaNA8njLgvOuJJzwz98A6vEgXO1Tz6by21J482ugYumN/lLzbc7I8bducvL+ljbzsWWa8hHpuPPSfuzuZepG8jpKgvFFSHTwUEcs77uYdvKLWGbxnZtW8zi+aPEH3grztW4Q819BHOujni7zWRF+8S5iwO4IfFro1yE48asNLvM+7Ar3toCk8w9HzO6LWGbukpli8BHGLu43W9rt+fCu5a07lPLUDETxyk+u8/8xRu9BGnDygA+67dB/UPOr+DbzCvI+8GoYSve2f2jqDZYq8szG0vOEtHzx7IAQ9C/u2O+m4mburp4i8W2kAuoR7vbxBPCi8PMivvPByhjxBPKg8G5t2O98WnbydYiE9dGVIPFPeBT3CAoS84nMTvXQgIzynSPS7V1DgPFryezwtPiO8WNv5u4R6bjyfMy88/bXPPNOjkjzvttw75yvivG3bnLyHk447TjpMvE2u47v9tU+7GYT0vO1bBLw696E7OjxHPOUUYDze0Ci8uOrRO/SfO7wsswk82Ba8vHoeZjt8qs68GLS1vKmQhjz2te4681nHu+uJpzxQC9o7RmqsPM9E/ju+o2888f2fvAu2EbqVkgE8/SuFvGdnpLx+N4a86CyxOyz3X7ydYqE7b6wqvOr+DbukMXK8QPazu5QbfTyzu/68h5MOvDn2UjzDF2i76OcLulfGlbxdxNg5ivCEvKSmWLwY+dq7wkbaOqNgZLznK+I6JfjNusxejLwFQhk8vNJhPlx+ZLsy9vG6szIDPTmwXrzrRIK85p95u9X+6rveirS85M5rPPC3KzwCKXk8an4mPH8HxbuwpPw6IuB8O8tdvbwhysm8rHZ4vGyUWb0ay7c8lEyNu+FxdTscnZS7iu+1PJwcLbzjiHc8QGsaO7MxtDyiGvA8fjcGvE5/cbwKb047ivCEPLMyA702VLe84SxQOnIJoTyl7Rs9ivCEPBj52jwjVwG9xBi3O3tkWrw1yM67nu06O9os7ztEU6q7JW6DO/oUAz28jby8tY3bu+ktAD1tH/O5++TBPNmhVbwVnTM9BUKZu3oe5ju6MRU7TrCBu6y87DuD8CO8CnAdPX43Br32cRg8izUqvbilLLvvtlw7IMn6u1ojDDuPYl+5HOFqOxRW8Lq/pL68LG2VvCIRDT0ZP088hHruPHyqzjzh5yq8mXoRPMui4jyWYsC8DIZQvLO8zbwdJ188F7Pmu6x3x7xOsAE7eQkCvBQRy7x98ZG7j2MuvO4rw7zu5p08pnmEOqdJQzxo8W489eWvu3mS/bxpOLI8FijNOXIJoTwVWI68oY/Wu4V8DLwQbuC5G5v2u6umObwTQQw7GhDdvFbFRrx8Nei7VTotvDxS+jtF35K88xQivBL7F7xb9Jm8JvmcOxVYDr1qOYG8ABPGPK6NejyuSaS8yUa7vI6SILyUG308m5ETvZoFqzx3fZm8VK5EPOm4mTtt2xw70l2eO4JkOzway7c7ZyGwOnQfVDw7gjs864mnvEVpXTyawIW8jdb2O+Jzk7xvNvU8B84BvHFNd7ze0Ki8VwwKvXc3pbwXbsG7LoSXuytsxjwS+5e8g2WKvD4k17w0PoQ8JCgPvPUqVb0VEho6JW20PDlsiLyOHGu8Z9yKu0xpPr4ebdM8LLMJPUMOBb0oEJ87iqoQPFFSHT1wfOm860QCPPwqNjwF/CQ99FqWu7aOqrxmlhY8OOAfvBmFQzxiONG7JCgPPNrnyTwJKdo8/fp0O5MGmbsg+go8IYWkOQxBK7yikCU8HeOIvBi1hDzRjJA7GPnaO9MtXbwwmxm8MFWlPLFgpjp7ZSk80qJDusrSIzzcdIG7ID8wvBVYjjxvrCq8FMylPNu4Vzzd/xo7Ut22O9PnaDyikCU8iWQcPJZjDzyQ7Xi8RJjPO9OjEr2TwKQ70hcqOjBVpTwe+Oy6etlAvHTarruSNQs8aTdjvAVBSrxhaBK9kXlhPKSm2Lsx4Y28hkzLu3mSfbyHkw6636DnvImpQTxRUc683ENxu/ByhjyzMTS8Rq9RO0zeJLzpLQC9asPLPA2HnzsS+sg78HKGvJCo0zyiGvC7pe0bPKvsrTtbaYA8p76pvJ6olTwbVlG8T8Vlu6Z5hDxIPAk7suwOvGDcKbxgIU87zOhWO+Zbozo7x+A8ccMsvC0+IzvR0gS8v6Q+u8rR1LvPACg8CnCdPCrg3TxgIh66zrqzPCc/ET2r6948Fp4CvfzlEDzu5U68CvpnPP+HLDzJi2A9GoaSvGchsLzaLT488YdqPDJspzyRNDy8sRsBvaMbPzzl0Ik8ttSeOw+eob2D8CO9oL+Xu48dOj1uIRE8rHdHPZK/1Tq6djo9O8dgvFGXwjybkMS8hDYYvCGFJLywGeO6+lkovLUDETz0n7u8O8dgu/ufHLzoLLE8IlXjvDqBbLzVupS8NIMpvHgIM7w8Uno8eMMNvUH3Aj2ATog85lrUu20f8zv7KWe7Fm1yPOr9vrw5azm8q6eIuk1qDb15Tqe8+lkoPQEUlbuteJY8nezrO33xETx6Hma8yxfJPAT7Vbwpmum7xzAIPbzS4bv3t4y85dAJvfjMcDxYlyO6O4K7uiKb1zxQDKk8c5UJPfFCRTwF/CS8UMeDvOJyxLyReWG8NQ10vNsuDT3CAgQ8BxMnPe7mHb0ebqK8sWCmPFojDLw0PgS81HPRPOSJRrzE0xE8Wq3WvHkJAjwx4Y28Hm1Tu8F2mzxeC5y8cX4Hu1w5P7x3fZm82BY8PGBnwzzsWWY8ajmBvAaHPrxkxYi6kDPtvKSnpzx5kn08fPDCO+P93bymMxC9cpPrOssYGDzULiw74KE2PDmxLTzrRIK8xV4rvAluf705a7k8qZCGu866s7wLtpG7Y3+UPO7lTrynvqm6L1RWPB5tUztL3VW8xRmGO3tlqbwahhK96819N8DrgbwbzIY8vqPvPGisSTtgIh48lJGyPKiOaLwahhK8fwgUPGTFCLx3fRk82ixvvFPeBT1gZ8O7a8QavZi+ZzzeiwO9N99QvBmFwzwVWA68Y37FvCgQHzvfoOc7Jfd+PKV35jvPu4K8Ail5vPiHS7y2SQW8Ztu7vJi+Z7qQM+28TGk+u83ppTtvNvW5Wd0XPHIJITwj4cu8iqoQvXF+h7xZ3Re8yowvPCJV4zvpcqU8Zts7PIqqED0SQD28dCAjPOBckTuzMTS7jQeHPC8PMbwkKI86w9FzPGk347wbzIa81f5qvDHgvjwFQpk8E0EMPOktALuIHdm8S1MLPR5uIrwF/KQ8afOMPGM5oDxCx0G9p0h0PB5uojybkEQ8U611vDDf7zsUEUs7jdb2O9CLQbzK0dS7vEiXux747Due7To75EShOj4kV7yqYMU7zaQAPXRkeTzPRP47e6l/vGyUWTwyJ4K8aPFuvAcTp7wInkC9rQLhvODmW7qo1Fw8Yq6GPPa2vbx+Nwa9Vws7PDzIr7qLe547Q1Lbu2BnQzynSHS8QLGOPFndlzooyio8Lw+xPFkivTtJUe08QTtZPPiHyzv2cRi9TGk+PMi7ITsHV307wbvAOot6z7suyO28JvmcvPH9H7yaBSu8HJzFOxj5WrwE+9U9tL2cPLBfV7zHMAi6uKUsPKUyQbuD79S6Fm3yPF+XBLzFo9C8YGfDPDhqarwUEUu8Tn9xvAfOgbwGzbK7cDiTuv5BuDtN9Fe6tY3bO/9Chzqrpwg9Jz8RPKQx8rlrTmU5cQhSu13E2DzNpIC86ohYO1qtVrypX/Y8PFJ6O+r+Db25deu8xulEO/wqNrxYllS814siveXQiTvyzV46AVqJPKFKMT1rCg+9Am9tvPoUgzxi8yu8z0VNPMhF7Lu5MEa8\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 2,\n \"total_tokens\": 2\n }\n}\n" + headers: + CF-RAY: + - 936f9344eed57df5-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:52 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '196' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-5b456cc969-csbxc + x-envoy-upstream-service-time: + - '104' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_e2376f4641b02bc9b096c5b33fd91f9d + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/tools/cassettes/test_search_tools/test_mdx_search_tool.yaml b/tests/tools/cassettes/test_search_tools/test_mdx_search_tool.yaml new file mode 100644 index 000000000..914ee947f --- /dev/null +++ b/tests/tools/cassettes/test_search_tools/test_mdx_search_tool.yaml @@ -0,0 +1,255 @@ +interactions: +- request: + body: '{"input": ["# Test MDX This is a test MDX file"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '113' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"iL7iuzpnFj1kbQ69KZQLvFdJhDkojug81ggMvRzvEL0a2Ii8mHzGvMjc/TyxQ2o8d1kCPJoWoDsXPM67EvVUPItanbsgnpG8frcDPfHidbywrVK7WwlqPIQP4ju+VC68DTEtuwdrpDueRhG7bngvvXdq5zq5IPs6PZNFPchfT7xOZlC864oXvW507bvJYbA8NzmGujY3pbq6pa08dlchPCkRujxJITi8geFROs2RIbvd4zs6wOwmOuKUHTsrqbK851jFO6FwXzu18mo8YD2dPFD857zGNQE8skXLPIBNm7vA/+y6FzxOPKHxT7tWRyM8Hhf+u+KUHbwZU9a7frcDPDt6XLxXXEq8MxzbunInMLtJpIk8nlf2O1da6Tz1AYI85UOeOiRNErzGx9Y7QcHVOrVxerwPySW8VK+qO9FAorwa1ic8DTMOvETvZbwaV5g6fJ6aPOdYRTuB4dG7I0sxPVB92LwCuGG8v2u2PLcL1DzzaYk7XaFivDplNTuD+ro81fEDPNRusjznWMW8NzkGvV852zualxC85UMevcSubbxJoii9uqUtPJiAiDuRIge8bOC2PIg/U7ppsia86gXlOuB9FTzPOv+80L+xOtA+QTu1cfo7Eve1vKMMmrxIjYG8XSLTu361Ij1nGi47KRE6vGzeVTwqKMI8FqiXuqlRMr2vGZy8L9kjvAj9eT2BYGE8VsgTvKrl6DyPCR69asdNPQQ9FL10PNc8RF0Qvfzekrnp8h48pSOiPM86fzxYYIy7ZQFFvFp1sztqSD47qM5gvCurkzvz+968tF40vCLG/rwdgeY7abSHux+YbrxwEKg8hRFDPPFSAbx3WYK82J6jvCiQybsv24Q8bF1lPHKk3rwm5Qo8HO2vPDc5hjz4LbG68uRWvHoIgzqHKqw8fTSyPO0gr7wTeoc8N8tbu9ighDwZwYA7JvbvO1p3lDrcT4W8xTG/vNRsUbsswLo8SSE4PM+qCrwfnDC88ua3PBDgrbw55ES8k0r0vO0z9TwtQww9ldEHO6J2Aru2dx2/TE1nvP71mrvjKrW8CIDLPEo23zw9EHQ8a8kuO6pm2bzT6X+8D8sGOlsJ6jybqHW79HzPvHjvGTsIbYW8egYivC5YM72JwMO8cA5HPRL3tbur6wu8+kQ5u2eZvbrtM3W7zykavIWUFDxueK87oN4JPYN7qzs0IB292KCEvCRNEjzHSMe7FI8uPaBdGbxj6ry8b/sAvELFFz1BRKc7uiS9vNPr4DsVkY88RXSYO9Pr4LwxcZy6rhXaOnsZ6DuB47I8AzszPOB9FbzOJdg7HYNHPARQ2rtKt8+7YVSlueuKFz2Yfqc7abSHOyb277ou1eG8zzp/PLiMxLwMrtu8MPCrvEq3T7z1gJG8zZMCPfHRkLzwTr+7N0xMPIR9DDwADSO8xTMgOtmzSj2p1IM8PpcHPUxN57v2l5k8caJ9PGpG3TywrdK8KH0DvQKlm7wTegc9VsYyvAh+ar2BYOG6GdTGunmBbztE7+U83MyzuzW007mulOm8x0yJPDjPnTxoruS88E6/PNvI8Tzed3K8rYOEvKhP0Tw/qs26dD44PIzwtDrOJzk86gVlvJoWoDwsP8o8V1rpvMngv7ygXRm9J/ySvGNpzLzJ4D+8jXElvVjfmzv4rEC7ZG0Oux4Xfryf2sc8asdNPHKooDz2Fim7HYPHu2tKnztMzPY8RwyRvGHTtLrRQgM75sQOO0Rw1juh8c88LcKbvHyeGjy0XNM7kJ81OxF0ZDtxI248E4vsuxYlxryStr07+seKPDFxHDxj6Fu8yF3uvI+KDr01M2O7G2w/vFD+yDti1ZW8ZP/jOy5U8bsc75A8EvXUui1DjLwbbL88sK+zvCAfArvf+GK87jXWvB0EuDzYnqO7g/o6vIWUlLwkTZI7OuiGu0Ru9TvGNYG8yF/PvMwOUDxt83w7xJ2IvCooQry0XjS7EnbFuwRO+bxVQ+E7cqRePEb1iLwUj648uA+WPG507bti5nq87aOAu1p1szz98dg8vL4WOxN6h7v1k1e7EeKOvDdMzLrBgN28TVOKPOuKFzsne6I8nscBvaWkEjzJ5AG7DbKdPM4nuTxsXeU7AJ94PDjPnbx9MHA8qdKivG713TrS1rm8A7yjuj6Vpjy03UM8+sWpvNTtQbzSV6o7jobMOr9rNjzYHxS8rpRpvIYVhbyQnzW8FI3NuuQsljl3WQI8xbQQPDc5hry1cXo8SJ5mPH9J2TyyR6w8hI5xvLmhazoH6rM8u7pUPO0zdTuBYOE7vD+HO55EsDzpcS66qdKiPAboUrwZ0uU7HO+QPJ3DvzzRwZK7O3y9O2vJLrziJvM8YuZ6O3yvf7t9s8E8XI6cu1AAKjxkbQ48abQHvEmiqDzPOv+8nkYRvbHCeTyaFqA82B8UPCmUizwl4Ug87zmYPOIVDjsBJKs8DC1rO3/MqrwCpZs8beIXvFKU4LxvepC7AA8EupK4HjsYPi+8iD3yO09qEryoT9E8Xzu8PJoYgTwI/9q6WvYju4nCpLxVQ2E8SI0BPY4HPbxWxrI6xbQQvKz+UTwCJow7O3rcPAdpQzx5g1A7Mgc0vNJXqrx7mti4v2u2PA/JpTxi1ZU67TN1vP1ySTqMb0Q8kjcuvD6VJjyLWLy7lEzVPPYWKTtBQsY76O7cvLVx+js9EPS8dtawvNRusrt9MtG8tFxTPHwfCzwtwpu80DxgvLs7xbvQPGA81ocbPB6FqLucLai8D8sGu8MaNz1fuOo802rwPB2DxztXXEq5MYRivDlj1DwLHAa98dGQu5j/lzx47bg7hywNu+yfPrsPySW8eQLgO9ieIz3HSig8jgc9Oyom4bwDusK70lVJvLLIHLy14YU8XqcFPG3zfDu5o8w8tfJqOsp217srKqO7beKXPNRusrxbC0u88E6/PDjPnTzdYks8SI0BPB+Y7jzczLM8T+sCurPbYjzRQgO8BE55OW50bbvVg9m6NCCdvMUzoDxTmoM8LlTxvOSthrsgnpE88uRWvGYWbLyuF7u8QC8AvH0yUbu8z/s76wmnPJbmLrxa+AQ7YVDju7w9Jr1mlfu85sSOvIP8mzyTSvQ6xTG/vCssBL36xwq9geMyvLHC+bxTq2i7SzyCvNicQrz2Fqm8HO+QO3yemjwMLWs8ldGHPNFAorup1AO8DbKdPBi9PrxbiHm8+cNIPGcYzbuZEt47uqWtO2xd5TznXIe7HYNHOluIeToVI+W8XI6cu+0grzxfvKy8IshfPGzgNjoqpfC6eQD/OzhQjryi9RE83E2kvGWAVLxwDke8eHCKO7XyajwhNCm7teEFPG3z/Dwmdf+7dVF+vOuItjshNKk7pSFBvC9aFL383LG6SaQJvNq3jDq4DbU8tfLqvH61Irysf0I7paKxvBi/nzxv+wC7+sUpPBDgrTuCZgS9yNx9vLZ3nbyV0Yc7PRB0uvLkVrviFQ69T3t3vHoEQb05YXO83E8FOpyumDxt4pc7G2h9vGtKn7y8Pwc8i1i8uwCfeLrqB8a88VCgvPrFqby0YJU7S7uRvCurEzzbS0M8wYK+PCd5Qb2qZtm8MPArumm0B71E72W7GECQPH0w8DzTWYs80UIDPedaJjxepaQ8u7rUO5XRBzxjacy76XOPu3oIA7yLWLy66XGuuyXjqTwfmG66YL6NPLJHLLpjacy8+scKux4X/ruM8LS8uiS9OvHi9bxt4he82KAEPNxNpLyFEyS87J3dvCI2irxN0hk9NbLyu8uL/jvYnqM8qE/RPHESCbzFsE66zqRnu8KEH7xdo0O8IkfvPDrmJbwI/fm8JnX/PDhQDrwb6049gWQjPEs8AjxNUak8OWPUui/Zo7x6BEE8HG6gvObEjruHKMu7d+tXvWFSxLvjqyU8hA9iPOoF5TtSFzK8+sWpPMhd7rxv+R+8y/uJOlsLy7qmNmg9uJAGPHXBCTzfedM8xTG/OSoowryala88PZPFvAVSu7tArg89PH4ePVny4bxeJhW8AA8EvO43N7x3WQI9NrgVPGvLjzwQ4C28XqUkPLgPlryrahu8HgYZvfcYCj3+9Rq6c7vmugAgabyvmCs8h6k7PIUTJDyHqxy9KqdRPInCJLwIbQU7nkSwutPYGjx/S7q7q2qbPKFywLrPqgo9rYOEub5UrjxbCeo86G/NO7w9JrwjSVC8h6ucPEJGCD0Wpra8hROkuxamNrkn+rE80tRYuusJp7oofQO9OWPUvK2BI7xdJLS8HHCBPHqHEr3TavA7p7uauwK44TkEPRS8eO+Zu2/5Hzyxwnk8RG71vPBOP7xeN3o79ZF2PIDOCz07fD08EeKOvH/KyTyvmow8AA+EvMhdbrwML0y8RXQYvFwPDb1O50A87aMAvMQt/btArg88yNz9vMnioDqcrDe9hZQUvKtqmzyMb8S7b/sAPEiNgbwdg0e8TNA4vFQs2TzqBeU7Q9hdPFMZE73WCAw8AI4TvOVBvbuOiK07ZQFFPDni47tLPAK7sK8zvB+azzwx8oy78EzePLAsYjuBYsI47SIQvdYGK7tyqKC7n1u4PA9bezzob827BFDaPHoIAzwDvKO8CP35O0PcH73Mj0C83M4UvbEyhTxALwC96G9NvAsapTxSF7I7xjUBPdgdM7yeRhG9YL6Nu3kA/7zNkwK9luRNu1dJBDsXuXw8uztFvE/rArx0Pjg9SaQJO2/7gLx1wQm8y/kovB+cMLshs7i8AA0jPWcarrymtfc6f8rJuzFxHDwaVTe8jOxyPBFhnjyqZHg8mHxGvJ7HgbwtQww859k1uuKUnTsNMw68lFCXvHVT3zu4jEQ8UH+5Oiqn0buOiK28wH58PA/ca7wTDN05dtTPvEigxzrChgA8CYSNvDEDcjxyJU+8twvUO/DNzjwlYjk9HYNHvHyv/zsDO7M8mH6nvCCeEbznXAc7iUG0vMGCPrwlYjm8+C+SOxF05LpzKRG9ssTavD0S1Ts0IJ28xbBOvAK44TsitRm9hywNvLw9JrqkjYq7PP8OPHmBb7u3C9Q8WnUzvQbo0jtOZtA8hZDSunoEwbtbiHk8BE75OwsapToDvKM8kJ1UPikRujwxA3K8FA4+PSLGfjxCRog8dlehu/nB5zuNcSU58ua3PGT/Yzy0XFM8xJ0IPEAtHzu8z/u7owhYvIWUFL3toZ+8O3rcvDY1RL0hs7g8mH4nvMSubTumtfe89pW4PJ/Y5ru7O8U6WfJhPEzOVzwF06u6j4oOvG73vjqlI6I8yvXmPDjNPDvuNVY7xJ2IuzrmpTv1Euc8icKkPBrYCD01oQ29KyyEOgbVjDvJYTA8MwkVPRto/btwDse6NbLyPLVxejx5AP+6s9tiPPmwgjytAhQ7XiYVvCPMobyjDJo8ChhEO9q3jLwR4g6859m1u1uMuzwzigW9YuZ6PKBdGbyXaQA8x0wJvT0Q9DvfedM7JWQavComYTuYfie8Xjd6vARO+bs2uJW8DKx6vKMI2DwEvgQ8p7saPaUhwTzI3l68dEAZvOwezrwG6NK820tDvTHyDL2lIcE7eQLgvPWR9rw66Ia7l3plvJAgJr3Gx9a85UOeuy1DDLuaFqA8bN5VO0XxRjww8Cu7BVScvK2Bo7x0vyg9YVLEO3bUTzx0Pji6XaHiOtYEyjxO5d86xTE/OxUSgDtXXMo7TdKZuyZ34LsIbYU7NbJyO7w/Bzy/6sW7MG+7vOMo1Dq8PSa8/naLPGPqPDsneyK8prdYO+ZWZLxSmCK8h6k7O+Mqtbz5sII8hZBSvcW0ELtWRUK7xbQQPZ5EsLxNU4o6MfKMPEu5MD2fWzg86G/NvI8JHrzLehk8GL8fumC+DTtZc9I7sK1SPLLE2ry4jqU8yF1uu4DOi7tTKni8wxwYvL1STbxInuY7TVGpu/pEOT1ueK+8iL5iu7gPFrw5Y1Q8gWDhODKGQ72QHsU7BFDaPIzs8rv1gBG8fJ6avMMaN75qRl08joitPHVTX7wMrHo8bnivvAbo0jzyZce7yeQBPPWTVzvKdte7H5ywOxL1VLzkLBY8x0yJvAO6QjwOx8Q6VC46vCb2bzyvmKs7CH7qPAS+hDe4D5Y8ZYK1vCmSqrkIgMu8YdFTu5boDz1jaUy8QC8AvQCOEzyUTNW7dlXAPJTNxbnZMto7ncHeu3jtuDvnXAe8h6m7vIcsjbtnHA88RO9lPPUBAj1mFmy5Dkg1uE/rgjz+dgs9owhYPC5U8Tv8X4O8TVGpPE3SGb0Nsh08pSHBuwhthTyeRDA83E+FvKY26DxDWU68abImvD6Xh7zn2bW8HgYZPLVxerzRU+i72JxCOu605bzPOv883nfyukcKMLnS1jm80L1QvKIGdzyPCR48dcGJPLw9pjww8Ku8+sWpPLAuwzwwbzs89HzPvCK1GT0IbQW7MXGcPLAuQzxxI+46ogZ3vGeZvTtj6rw8YVSlOaMMGrsyiKS8h6k7PLLEWry5oWs8sUPqOxhAkLxv+4C7l/l0PDlh87sswLq7rH9CvLqnDrzDHJg83M4UPbGxFDx7GWg71QLpPMniID2raLq7BE75vA0zjjtpMTY8KI5oPF43ejzQvdA8tviNOqY26LyPio68/nQqParnyTx7G8k5Y2nMvCiOaLzHSii7lM3FurZ3nb2FEUO8ofOwPIYVBT2bKWa8HxnfPLb4jbywr7M8Y+ydOwh+6jwehai8SzyCvF6lpDtJIbi8dL1Huzz/DrzutkY8iD3yvAboUrzT6f88nK6YO9zMs7tormQ8oXDfvF6lJLwn/JI7egYivQO8ozyMb0Q8lM8muyK1mTvxUgG7SjZfPIg/U7z6RLm7D1v7vGNrrTqlI6K828pSPD8p3byPig46xTMgPTBvOzwccAG9LUGrPEC/dLw/qk28z6gpPFr4hDxfuOq6CG0FPHS/KLyqZlm8/fHYu7kiXDtIjQG6W4j5OxYnJzzkLBa9IB8CvSAfgrxpMbY4fbPBvC/ZIz1Q/Gc7jwkevI+b87phUkS88VKBPHjtOLwnecG8XSLTPFfbWbtHCjA7nkaRvMMat7vwzc68YL4NvQO6QjxbC8u8YlYGvRDeTLx/yOi8ncO/vJoUvzwqpfA8x0jHvGpG3TtY3bo8K6sTvWWEFjrIX088cA5HPI+bc7yala+8uaPMO5Ac5Dz6RDm8rH1hOnhuKT0v24S86fC9vDt8vb3hkNs8VDAbvOKUHbzRwRK8Q9q+uwoW47vGx9a7UP5IvFECizwXu928CpdTvLRgFb2aFiC828hxPF87vLx0vcc8rpTpO4YVhTsAn3g8/nSqO7Z1PLyM7HI7KibhPPFSgbvEL948qVGyu2eZvTxpsqa8UxmTuWccjzuXaYC81GxRvA0zDj0QX727+cHnu1jfGzzcTSQ8MYTiO5K4nrx0QBm89ZF2vfva0Du14QW85UG9OS/ZozvUbjK9RwwROq+aDDxOaLE8tGAVPRnUxrpwjda8uaPMvIpWWzy5IHu8vtWePFIVUbyulko8mH4nvHjvGT0lYrk8cSPuPOqEdDxSlkG7w5nGPBQOvry5oes7c6oBPAsapbuxQ2q4ziXYuuqG1TyalxA9MO5KPbGxlLwfmO67MPArPMD/bLyjCrk84RHMPPWT1zyIPfK8nULPPJK4nrudQs882jacvIURw7qnOEk4aTG2u3Q+OLzpcS68JvjQu2ebnjvNEhI8FZGPu5GhFrkXuXy8freDPDjPHT1epSQ8/NyxO6Ylg7wT+Ra90lXJvMdKqLqM7PK8d1kCvcEDrzwP3Ou7WwtLvAVUHDxYXiu8U5oDPQ9b+7y03cO5Uyr4ujFxHLv83pK8zZGhPEcMkbuDeUo84qfjPAM7sztqRPw7fTJRPGFSRLztIpC8KZQLPcQtfTs1tFM8nlf2PMt4ODx10m68DK5bvJK2vbuJQbS8IjaKPMWyrzsYvb49B+qzOyf8krztIK+85K2Gu252zjwrKqM8V9n4PMKGgLwPW/u8nKw3PYcoyzp0QJm89P0/vE9qEr25Ilw8vtWeO3hsSDtWRUI8/fM5PPLmt7qvmgw9L1oUPDBtWjvA/+y8K6myOw/LBj2SNc25VDCbvJqVr7wLm5U86fKePL/o5LwxA3K8DTOOubPb4rzNEhI859k1vMbHVj2Gp1q6yWGwOe62RjwXOu286O7cO5srx7ynOiq9kB5FvDY3pbsPy4a8\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 11,\n \"total_tokens\": 11\n }\n}\n" + headers: + CF-RAY: + - 936f936a5c107e1e-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:58 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=utmyQFzT_wcpHa9.mgJCTUKMEwjaKO1KUN4w4FESXA8-1745770078-1.0.1.1-c__HC5oqY30dc8uUgateYwWXyd5rkkLT_sv7FaglerEzNk2yyURMruWVkA12xyL7Frj5cXci33jdwdr8.yO6MRk_jssq5iAvJP3Aq.SVfyE; + path=/; expires=Sun, 27-Apr-25 16:37:58 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=mD0miBgD.AKaxJ3xiOsfjrDskazLjoHBA1QrHecqrX0-1745770078037-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '61' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-5f4895bd76-h5k2k + x-envoy-upstream-service-time: + - '41' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999991' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_3609b88991fc31a1bcb94c34547d9451 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true, "data_type": "csv", "word_count": 9, "chunks_count": 1}, "timestamp": + "2025-04-27T16:07:57.041070+00:00", "context": {}, "distinct_id": "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", + "event": "add"}, {"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:57.605978+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "query"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:57.928462+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '812' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:58 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '39' + status: + code: 200 + message: OK +- request: + body: '{"input": ["test MDX"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '87' + content-type: + - application/json + cookie: + - __cf_bm=utmyQFzT_wcpHa9.mgJCTUKMEwjaKO1KUN4w4FESXA8-1745770078-1.0.1.1-c__HC5oqY30dc8uUgateYwWXyd5rkkLT_sv7FaglerEzNk2yyURMruWVkA12xyL7Frj5cXci33jdwdr8.yO6MRk_jssq5iAvJP3Aq.SVfyE; + _cfuvid=mD0miBgD.AKaxJ3xiOsfjrDskazLjoHBA1QrHecqrX0-1745770078037-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"3EwWvB29uTzJK8K8xZmHvJvFJLzh4cw8qYAVveo22rz4ZYi8mR+YvIMPmjwBjfk7LdiVPDnWq7u8LSy8mO7/O7gN6jtjTZ+71Gz9PBQii7z9EY23QD+LPEIrOTwdGqm8454nu4i7Hjwu25E7N3bAvDcwn7xSGHk71CZcPaK6RrvMSAi9bFwLvUNFg7oT8fI7sV7pOpS5NDuNIYI8LTWFuwEBtzx+TEe8ll9Buy44AbiJG4o7H8A1uza5ZTwZcaC8JGw6vNQmXLtjfPI8gWkNPKJ0pbwXKIO8m65WPLC72LtF1EE8oi4EO2Dqt7vExd67nxE+vCfPobzslkW8MCSvPOP7lrw+yNG7DELyu48NMLxq5VE8TJqQPOLK/jxqWY88k/zZOij+9LumHa48dYP8ugXw4LpCcdq8xyjGO4YVEryRbRs8WPUVvA8Cybt1sRg7/PdCPMyOqboBu5U3/6BLPKLRlLybIpS7BlDMPEV3UjxtAhg80oBPvEyDwjpvBZQ884tnuv+gSzze29S8P2vivBG/I7z7a4C6Z5m4vNPgurvnpB+94oTdOxEFRbwfwLW7jVBVPBFLZryYHJy8ZZa8PCvVmTyBry69AQE3vB29ObzGDvw5Nlz2vPO5A7yrbEO8PDmTO1OmADyDPm08pyAqPIEMHjwreKo8ZVAbu87XxryD+Eu8NS0jO5T/VT3Av2Y8TD2hPIxNWTw8lgK96pPJPNfmsrybxSQ97bCPvJgFTrytWHE8xcjaPK3MrjzFgjk71Z0VujTkhTyZfAc8UEMZOcyOqTvXcvW8stWivJ0lEL1s6E080/eIO4NVO7wSwh87xH89PNnS4LpAy028A2EivEQX5zubIhQ8X6SWPIVB6bwEZJ678Z+5PL52yTpZOze7imGrvFz7DTxujlo8NxnRPFIY+bw05IU818/kut4hdjxu68k7UUYVPPtrALxMmpC87FCkO0r0A7tam6I82KONPOXnxLskstu5QkKHO+9WHLvGDny83iH2vFU1vzxjk8A8Z/anu8XfqLvOkSW/fDJ9vE3J47sgIKG88fwoPccoxjykYNM7XorMOvH8qLwpXmC8hfvHOc132zzsCoO8TJoQvdTJ7Ds7k4a8oi6EuaDOGL3Nd1s7A2EiPdPguryxXuk6fOxbu9fmsjp8YJm8+2sAvc40trsgw7E76pPJPLFeaTzNvfy8Xv6JvHb6Nbwd1Ie8H3oUPfgIGby3alm8bjFrPEnauTw1ihI9hFi3vB0aqTvl0HY8b2KDPKbXjLwEwQ08JMkpvDWKEjx4oMI74/uWO6Oj+LmtzK671UAmu2LW5bse1wO7iEfhuzt8uDxqK3O88ULKO5EQLDxH1z28Qs7JOza55bxhp5K86JBNvODe0Ls3MB88C3COPHyP7LuuiYk7GFfWO7bEzDyIAcC8+gsVPLvkDj3op5s8qMO6PItkJ7xHek48qcY2PBqg8zw5eby8uT4CvdLdPryNrUQ9V5WqO+kzXr3l58S7PDmTOTUto7qrg5E8NYqSuzmQijx4ifS8iBiOPJGc7jyiLoS8Qis5PJTQAj33NPC7jVDVvMKTDz2oCVw8vC0sPJ/6bzwwJC88nxG+vDw5EzxuMes8Y9nhvPO5g7xsXIu8H6nnvG6lqLxJ8Qe8gWkNvYQSlrsouNM5YjPVvA+l2bxv7sU8bjHrOaK6xjx/8tO7p32ZvLU4ijs5eTw8IU90vAtwDjyrbMM6QIUsvNijjbviJ248J3KyvGNNH7sT8fI6myIUPCQmmbvelTO8ekZPu1zN8bvKRQw8hhUSPKmAFbyNxBK7MCQvvQhqFr2YqN46/xQJvFXvnTum14y8fkzHuna0FLxzl0488D9OPPWO47zUbP08QkKHvAxC8rpjqg68knAXvWor8zz9+r67wJCTuj9r4ruyMpK8i8GWO6S9wjzlW4K8GRSxvGni1Tw7H0m86dbuu7TBULzqk0m795FfO2izgrzJcWO8+mgEPFWSLrxVTA09fOzbPNWdFbzFPJi7FCKLusIf0jwniQA8PWhmPHGrILuOlnY8ZrCGvJz0d7sMQnK81UCmPDlibjtLgEY8sC+WvGEEArzc2Fi8+q6lOwiZ6TzggWG7N9MvPEXrDzs0Kqc8eFqhvLIbxLuNIQK9A+1kuznWqzzzLng4JMkpvZYZoLkfqee6Z9/ZuXfj5zwnz6E7xTyYvFXvnbxYUoW8/kBgPJLNhjvMSAg9iRsKPN5PkrvSgE879mIMPFz7jTwuOIE8FrFJvIrtbTtTMsM7NzCfPOIn7jqbaLU8YI3IPOInbjvlilW7jVDVPIsHuLu33pY8Mlb+O9XM6DvnMGI7QOKbPLnhkrsUxRs9p9qIu2zozbmJG4o8G11OvJLNBjo2FtU7cfHBvBSuTbvQ2sK8NS0jvLyKmzz6riU8aohiPPA/zjwLEx88BZPxPEl9yru0wVA83k+SO2Htszk5BX+7vtM4u5vFpLyfV1+7eKBCvD6CMDzvnL28BZNxPBnOj7znpJ88dVQpPLfeFjzR9Aw7ARiFvMXI2rxvS7U8deDrPGxFvbsRqNW8bKKsvOH4mjuYqF66r7jcO7mEoztcQS+8qQxYPBTFm7yow7o74fiaPAhTyDytKZ48Gc6PvAK+ETy85wo8pNQQu3wy/Tpzl868TkCdPO72sDta+JG7ICAhvXHa8ztq5dG8ZzzJvK7PKrvYow29D7ynO3X3ubvcqYW7wzmcvAGNebudJZA8RzStPLU4CrwrMgm98fwoOwyf4TwSwh898fyoPEWOILztsI+7R5EcvHCRVjx1VCm8ix6GvNzY2DxCiKg7qlJ5u7dqWTsBuxW84T48vD1/tDzQ8ZA8YQQCPAatu7xF1ME7gmwJPP36Prw2XHY5IWZCPOl5fzzUbP08xCLOO8Ac1joNXLy7QLR/PIQSlryyMhI8d0DXuVqbojtgRyc8caugu6pSeTxX8pk8E/Fyu0vG5zu7KjA78+jWu/9D3LvJztI7u4cfu8UlyjyKkH48E/HyvMAc1js5M5s8H2PGvAQHL7wP63q7Tp0MvCkYP7uYS+86AV4mPJLNhrwuITO8/FQyvHzsW7xEdNa8WviRvL4Z2jxlOU077AqDvG5IuTqSE6i8FGisu59uLb212xo7KwTtvNGXHbw3MJ+8U48yPOH4mjx8Mn087H/3PEmUGLts/xu7Wd5HPF7nu7zZjD+5U+whPCT4/LvHy9Y7VTU/PDKz7TtlOU28Ui9HPEIrOTykMYC8HWBKvDbQMzy0HkC8tAfyO4M+7Tq1fis8ORxNPM7XRrzAM6Q7fAMqvad9mbybf4O8+lG2u6OjeDvF3yg7eEPTurgN6jynfRk8LTWFvGWtCjwmKRU7+w4RvHP0vbyIAcA7bqWougGN+Tv48co8zI6pvNfPZDsgICE8IiOdvP9D3Dyaq9q60Deyu1kk6TsUrs289QIhvBJlsLxJNyk8qq/oO41nIzxXlaq887kDvEw9Ib1nPEm7WlUBPDZzRDxF6w88JFXsu0CFrLxHwG883iF2vG6OWrx4WqG72uyqvL9f+7zvsws8fAMqvA0WGz1M4DG8vRbeuaIA6Ly1OAq9ZZa8vGcl+7yWX8G7dbEYO9fP5DyRP388aFYTPbsqsDta+JE8QuWXO6+4XDwZcaC8H3qUPFmYpjtjNlG7uRBmu1FGFTz6Omi85zBiPNdDorf63Xi8oi6EPJP8WbzX/QC8rcwuPFV74LzOS4Q77JbFuyleYLzZjD+8WviRvKvJMrwDSlQ944fZO6d9GTzE3Kw7YzbRPHigwrvCNqA7IQlTvHYRBLwpL427KzKJPGeCarzJFHS8jpb2PJqrWrvbG/48tMHQO2VQmzz2qC08eP2xOiFPdLzCkw87/uNwvJ1rMbyyvlS8UUaVvMU8mLv9Vy66yeWgPJRzkzwybUy83k+SPLknNLwkJpm8RY6gOyZYaLzCH1I9dp1GO5EQLDxVe+A7y9HOO/rd+Lx6XR060DeyvDm/Xbxc+w09kbM8PeoHB7xMmpC8JIOIu+HhTLydsVI8BMENO20CmDyqUnm8mHmLu3/yU7ymqXC810Mivc6RJT0mWGi8lFzFum9ig7tnmbi7UeklPDyWgjzvnD29gfVPO+inG7zzi2c7u7ZyO6apcDtHNC28NBPZPA/reruR+d08QD8LPNnSYDxbJ+U8GG6kOysyibybUWe8RNFFPMecAz1p4tW8DJ/hO0fuizq33pY7vXPNODYWVbyfV9+8almPvMlCELytho28bV8HPCSy27wpLw07O8JZO0qXlLwBGAW9wDMku8nOUjt/w4A84IHhvAptkryjo3g8oi6EPO9WHD18j2w8XEGvvOdHMDz9EY07nSUQvG8FFLzQw3S7YUqjvDJW/ryTWUk8/7cZvP+3Gbz+QGA8D0jqvA9IarvTg8u81Z2VO/H8KLz2BR089Y7jvI0hArqEWLe8L8TDuyKAjDuOlva7PSLFPJ3IIL3f9Z48Iq9fvLXbGjqgK4g8CA2nOohH4bqowzq8PDmTvGictDzTPSq8BZPxPPeRX7yUomY6iAHAvHWaSrpZx/m6LdgVPSfPITziJ267R3rOPHVUKbuU0IK8QogoPLEB+rxj8K+8ARgFvTBq0DutEtC8NhZVvBZU2rsPdoY8K3iqPHq6DLzUbP28uA3qu/FZmLx1msq8TOAxu/XrUjwT8XI8pBqyulAsS7wEBy89CA0nPDQTWbyGcoG8pDEAubsqMDrumcG8y4stPbk+grzhm6u7qSMmvAyfYTyNUFW73/WePCj+dDzloSM8U9VTvG6O2rzg3lC8R3rOO9ijDTsGCiu72emuvK6JiTyNxBI85gEPvDfTL7sBATe8Mm3MPOqTSbutEtC7NrllvIq+Gjp/8lM8r3K7vHqjvjrCwmI7PshRO/qupTz6aIQ88Z85PMLC4juxAXo8UaOEvMCQk7zuPFI8rW+/vAZQzLsEqj+8vuoGu6+43Duwdbe8Vzi7vEWOoDxam6K80/eIvNGXnTs3MJ+8onSlt4M+7bvhm6u8sHU3O3ymOrvVnRU8bkg5veLK/rtSL0c8tX4rvNB907vSOi67m1HnO3YRBDw8OZM8iu1tPlR4ZDwIDae8VZIuPR0aKTzokE08ipD+O+eNUTwI9li7ReuPPBhX1jwaWlI6R5GcO5VFdzvzXJQ7SyPXu1WSrrwb0Yu88fyovLmEI72BDB49O9mnusmIsTvggWG8OzYXPWOqDjv8PWS7dlelPDi84TsdYEq8VR5xu5NZSbtzOt88BAevPIsehrrT94i78fwovGg/xTmGW7M85dB2PBEcEz0ddxi9VR7xuyt4Kjvh4cy7EajVPFJ16LtX8hm884vnPIZEZTzFgjm8Gc6PO6smojxzl847c/S9uxG/o7yk1JA8EWK0PGDqt7s1LaO7pNSQvMJlczxlUBu9H6nnPCsEbbxbhFQ8DJ/hvIb+wzw15wE7R5EcvKcgqjv/Wqq8qlL5uwbz3LofepS8n1dfvCAgoTxqn7A8UaMEPQz80Dxe5zu8MIEeu6Zjz7vDlou8PjwPvV6KzLyNxBI84VWKu7lt1bsRqFU77bAPvIP4y7z3kd+8FCKLu0R01jsFk/E8cDRnPN44xDzbG/67vhnavLlt1bxlOU09HwZXvL+8ajzJ5SC8btR7PMc/FLr890K88IVvPGeC6rv9EY08ucrEvH849buNIQI8YI1Iu/ZijDzf9R68riyavNf9ADwXKIO8stWiPNKAz7xXODs61Gx9PPYFHbznjVG8Ln6iu73QPLy3OwY87FAkvYZE5bm0HkC8YOq3PKFa27wbdBy6hkRlPKRg0zzl0Ha6Msq7vMAzpLv9+j67n7ROvGhWk7wr1Rk8vC2sPLJh5bzOS4Q7g5tcPHi3ELv4Trq8DwJJvJ0OwrzlW4K7+t14uyYSRz2nfZm8WTu3vPo6aDsXKAO7xMVePMc/FL07wlk8YzbRPDvZp7u2Z12829Vcu25IOb5hSqM8D3YGPa9yu7w9C3c8JCaZur7qBj0gfZC8nxG+u8xIiDs1LSO7NhbVPGGQxLwaoHM8VHhkvFFGlTyfy5w7pnodvK+4XDuZfIc74/uWPP1XrrmkvUI8dhEEvXyP7DsgICG8iu1tvNvV3Dwkgwi850ewvB8dJTwD7eS80iPgPMCQk7xJIFs87t/iu6IAaDt/8lO8eIn0vFsn5TvOkaU64oRdPELlFz2kYFM6uyowvES6dzyUFqQ82qYJPGpZjzt/leS7J4kAPUIrubyDD5o8t94WvH+ssjxAPwu6mghKuWDqtzyfV9+7f2YRvGffWbtZx/m8n26tPCeJgLwNXLy8g5tcO/sOEb0k+Pw8O3w4vBQiizw8loK8DwLJvB/ANTwt2JW7ptcMPMecAz0kD8u6tyS4O+ytkzxQoIi8S2n4vO5ToDxRRpW8S2n4PJt/A7wpLw08jfPluzfTLzyRP388+t14OxJlsDthp5K8QCi9O04pT7xaPjM8ICChPEIrObyUcxM7RhpjPCKv3ztX8pk7bKIsup0lkLxH1z081+YyPDcZ0TwP63q6rs+qPI6W9jzbG3671/0Avc6RJTxOQB07DxkXPFeVqjyUcxM9BZNxPFKMtrzMMbq80fQMPRG/Iz254ZK7wJCTvL0W3jp6XZ26lrwwvJEQrL1j8K+8gWmNPN5Pkjx6Rk876Xn/PAptErzT94g78+jWu2OqDj3npJ+8n26tvKfaiDwDp8O8J4kAuyKvX7u+dkk8ykWMvOGbq7t/ZhE9JPh8vBQLPbw+gjA7GG6kvMCQE7wWsck7FmsovcM5nDx2tJQ8QD+LuiSy2zt1mkq8DRYbPEuARryF+8e7ph0uvb9f+zsPdga9KXWuPLk+ArwNXDw8RC61PCDDsTzNGuy8H2PGPNxMlrzfUg68sC+WPGxFvTwJygG86Xl/vNzvpjsTq1G8bwUUvA8Cybu0B3K6yUKQOqEUOjxRRhW9lKJmvPPo1rsmtVe86vC4uxYlBz0u25E8RNHFu4VBabz//bq8CcqBPAu2r7xi1mW7AY35PDZc9ry0ey+75efEvJSi5rl66V+8HdSHvBG/IzxAbt68YDDZvJgcHLyGW7O8/PfCvA9fOD2K7e08uRDmvLDSJjx7vYg8OzYXvdxMljrMMbo8uYQjPMTFXryNIYK8kT9/Ow3/zDzefmW8QBFvPEAR7zzEIs478OLevM7ulL2LwZY8bP8bvLR7r7qWAlK7psA+vHdA17t/wwC8+q6lO4a4IjzL0c68m3+DvMcoRrzEf707YQQCOriw+rw154E8f8OAPCYplTzR9Iw8H6lnuUQXZ7zH4iS8ResPPfhliDiiLoQ8e70IvCth3Dyyj4G8SZQYvFr4kTwBATc871acvG4x6zwFk/G8dT3bvDkzGzxuMes8cJFWPFWSLrxlOU08stUivZ3IIDxF64+8g5tcO6bXjDv/Wiq9yeWgur3QPLyYqF47kZzuPLC72Lr/Wqq8kMoKvSRVbDsGxAm929Xcun5Mx7t/T0M8hFg3O+5TID0o/vQ81eO2PMU8GDrQw3Q8O9mnuzIQ3bx9wIQ8Fg45PGzoTbw05AW7RLr3OXyPbDxgjcg8+2sAPbC72Ly54RI8JFVsvF6hmjrlRLQ8+GWIuxZUWjyUcxO91MnsPEw9obuYv6w8uA3qu+ekHzx8j+y7cQgQO7TYnrn+4/C7OXm8O0w9oTs154E7VZIuPM0a7DpAP4u8XEEvPC4hszyk1JA8AUfYO3oALryGuCK9tX6rvD4lwTtR6SW9/D3kvAg8ejur4AC78/+ku/oLFTy7KrA7U0mRPK214LwGZxq7Xi1du4S1prvMMbq7pmNPPCZvNjw9C/c81/2APDBqUDyEWLc8cfFBPDs2lzzVnZW8k/zZPB96FDviJ248c5fOPA+l2Tsdd5i8fWMVvEDim7xxlFK8QOKbPLQ1jrumwL49mHmLPOHhTLvz6Fa8EQVFvMOWCzw3jY482S/QPIher7udazG96KcbPcUlSjyBUr+5hUHpO9hGHr2125o8WuHDOROr0bsG89w8yUKQOyGs4ztSGPk8FAs9PIsehrypxra8mAXOO0qXFD23OwY76EqsvNRs/bwBXqY6tX6ruiT4/LzDOZy8tmddO4RYt7xnJfu7jWejvPNcFD1x8UE8+AiZO79fezzhVYq8Eh+Pu/Mu+LtvYgO9cTdjuwyfYbz37s67\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 3,\n \"total_tokens\": 3\n }\n}\n" + headers: + CF-RAY: + - 936f936becf67e1e-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:59 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '191' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-59cfcdcc4b-wnwks + x-envoy-upstream-service-time: + - '90' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_e29bb7e5a6b3af41f94939adce45f5f1 + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/tools/cassettes/test_search_tools/test_txt_search_tool.yaml b/tests/tools/cassettes/test_search_tools/test_txt_search_tool.yaml new file mode 100644 index 000000000..18d07c0b3 --- /dev/null +++ b/tests/tools/cassettes/test_search_tools/test_txt_search_tool.yaml @@ -0,0 +1,251 @@ +interactions: +- request: + body: '{"input": ["This is a test file for txt search"], "model": "text-embedding-ada-002", + "encoding_format": "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '113' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"3OpOvMylVzwB8B28pUeTvPm5AryhrAU9O6z0u7pncrxio5+8xdiTuzJWrDt2quM8YjO2uy1SRzx0ATK5/nQ9PFCF5zs+B6i6j/42PC1Sx7wNMTC8TcMaPDnjlbsF+xQ8vcIlvMYYbrwsyUI8FkAMvZuvsrvcyiG86Au0PBBDubw4muu8dzNovFsdSLzuCAc8uiCGPEUdlrxb9gg99UU0vHpF8Tp4LNa7Ey6DOxT+c7zARJi82U/BvHD2ujwPSsu87X8CPM8AizxMyqw8Drq0Ox7mELzkcKY4JqzCuwCHRjwAh8a7T/ziPEKiNbtyWIA8cn+/vLcVD7yzmi68rZ1bO3Poljsv28u8Svj9OUzKrDs0Txo7AIfGPNZdZbqCxDY7bnRIu+frhrk9fqM8aXDjO/KjlLx2quM6vcKlPA9Ky7v8Eng8YVp1vIzMgLtXMn66uC4qO5kmrjuq2w481JQGPdvRM71K+H284leLPJarzTyFRqk8xWgqO7guqryoeUm7IoGeuzbRDD3OlzO7GFIVvTnjFbwuK4i7lbJfukatLDuSMO28sqHAPBMuAz2WhI68CU14vFWJzDvsxu67LTIaPO6Ynbzsxu6891e9u6uEQDxio587rS3yvHJYAL0w1Dk7yQM4Or/bwDxZlEM8AGeZvAMJuTuaH5w8SW95vGppUb3uuEq8+0KHvMDULj33V708cIbRO1PnrDzZKIK8q2STPD0OurwnFZo5Zj4tvcBEmLvpdIs8lYsgPQpGZruTmcS8F8kQPO0PGTwILcs6r7Z2vBvUhzySgKm8wwZlPMqMvLwr0FQ754KvukEZsbtIL588r/8gPFeiZzvzvK+8+2K0vIpKjruYLcA8TryIPJKg1ryrhEA9vrsTPWvSKDzwsbg6HibruxBDubyO5Zu8OxzePJQCHL0BYAc8LKkVvKfJBTsAZ5k8jXzEPCO65jsXWSc4nMjNvA8jjDzGGO481JSGPA66NLxHFoS8xP9SvEpIOrwKjxC8HX25vImR+jwoLrU8YLHDO5idqbv/bSu/D0pLu/ppRjzk4I+8A3kiPaE8HD2vtvY8tbPJO0Zd8Lwc7aI8bFutvKsbaTtU4Jq6ELOiusC0AbxJKI28pkABu09szLu78PY7LaIDPfpJmbsK1nw8IfGHPMN2TjwRPKc7Z1dIPLogBrzZuJi8//3BPFapeTz9m/y8akkkvPpJGbtSV5Y7V+sRPQ3BRrzaSC88iHhfu9SUBjxQZTo8IypQvJdUf7wRPKc6Rj1DO0po57tMOpY7xhjuPDVICLyl1yk8Co+QO7c1PDx6/gQ82tjFPJq2RLyaj4W7HO0iPLO62zytndu7GHnUPDpsGjujBXu8cIZRuzVotTmGX8S87Q8ZvcJWITow9Ga8XBa2PMh6MzwOmoe86z3qO9Vkdzzdww+8NfhLOYjoSDwigR49+FCrPCKBHrzqtGW8/gTUPElv+Ts3WpE7nBgKvAs/1LsyxhU9m6+yOpRyBb3PkCE7ikoOvF4ov7s7rPQ7Hnanu57h6LxiU2O76z3qO7elJbzGYZg5+eBBO4MtDj3f/Ne7HX25O1r9mjyEbei7pkABPFN3QzvfTJS616YPPLIx1zv6+dw8q/QpvGQlkrwwtIy8V6JnuxuEy7xltai7+bkCvUCJmjvqHT28kynbOjpsmrxR7r48koCpO1qNMTyNDFu8+0KHPLO62zs28bm5gy2OvGBB2rr9e0+8HuYQvHFfkrww1Dk9oPPxvAb0gjz+5Ka8U5fwPF0v0TypkmQ8ukdFvB//q7xx7yg7+9IdvCxZWbu78PY60LDOvDtliLw9Luc7i/O/vO0vRrwdfTm8Lrseu0EZsby4Lqo8o3XkvCE4dLwQQzm8atm6vOfrBjv54EG8QhKfO1v2iDvE36W7x+qcOLmXgbtoMIm7LcKwvKJVNz3L9ZO7mZYXvdm4GDzKbI+7cn8/vGRM0TxA+YO8qmulOxcQ/by0k5y7CU14PEW0PrzNfhg5L9vLPDzuDDw28bk8ANcCvPtCBzxV+TU8wT2GPLWMCrzG0YE62kgvvLSTHLwiEbW8oPNxPC3p7ztMWkM8A3mivNfGvDyvb4o6w3ZOPBBDOTsOmoc89u7lOrcVD70rYOu79q4LvXmVrbvdw4+8onx2PGE6SDxxpv47h3/xvG0L8bzKbI88xdgTPJi91jy2HCG8h39xO7QjM7yifPY7iHjfO5VCdjwrsKc8B8RzvKVnwDs7ZYg83DoLPbA/+zypAs4739wqvF6YKDtabQS779h3PK4GM7zW7fs8U1AEvLOarjwLr727TuPHPC/7+Lr5AG+7k3mXPAqPkDzF2JO8nMhNPKRu0jzOl7M7VdkIPKkCzrsIDR49yvyluygOiDsnpTC8mL3WO/bu5TzG0YG8QImavGhQtjxfIa08iAj2PHRxmzyCNCC8nuHoO1CF5zytLfI8x6HyvJWy37tu5LE79j4ivI/+try2HKG8mQYBvGm5DT1cpky6KtfmPKwUVzyrhMA8KL5LPGGqsTy2HCG8fKc2vH+SAL2l99Y88To9PKMFezsG9IK7u7CcvBBDuTyulsm7qeIgOx7mEDyIwQm8f5KAPGenhLz8Eng7AqDhPMh6MzurZJO8JYyVu9EZprfYv6o8+onzOaSOf7zwYXy7Y+P5PNkoAr1ltai7Vqn5u03DGjxfIa27OvywPHczaDyfI4G8lHIFvLMqxTt+mRK6NG9HvLYcITxgQdo8syrFPPW1HbzfTBS9IK9vvO4Ihzz1RTQ82C8UPZY75Dv2zri7TVMxvHFfkrqoUgq9FtAivBVHHjw7hbW7ruYFvLWzSbvq/Y+69CWHPOKedzzfjO46thyhu+zGbrzG0YG8ukfFvJ4qk7spJ6M7VOCaO/TcXDvbQR0854KvvAlN+Du7sBy8mQaBPJidqbwucnS8IyrQPGngzDztDxk8R8ZHPJOZxDz2rgu74RVzvOCF3DsXgGa8WHuoPEjm9LtMOpY705sYvM4nSjsxXT48ZdVVO6FcSTwKH6e68yyZvLpHxTtqSaS8tjzOuy/7+Dmkjn88YEFaPOh7Hbw9vn08ymwPvTus9LzznIK8ud5tvAfEczu/20C8p1kcvOD1xbzLhaq81bSzvIE7srwGhJm6/22rPCcVmrxJb/m87S/GO3oesjzIKve5ud7tu1BFDTsmPFm7qOkyPO0PmbujBfu8ymwPPV6YKLxXgro7R1beO5MpWzynWRy7Q7tQPHpFcTzZKIK7QjJMPNthSjwzv4O8Wm0EPcDUrrzf3Co8CQYMu4Td0bryo5S6yXOhvNBAZbx5JcQ8GovduT4HqDzJc6E79CWHPDVotTyrhEC8cV+SuxMuAzzIKnc84e4zuyWMlTrD5rc7HnYnPGjn3jo5cyw9cPY6u6SO/7yEtpI7q/SpvIDS2jv9m3w8IfGHPGVFPzxPTJ+8vKkKvclzIbzUlIa7IjHivLvwdryLGv+8p8kFvalyt7wR7Oq6q2STvFib1Tsjuma8Un7VvNw6C7zZT8G6lhs3vBCzoroCoOG8Awm5vD6XPjsVZ0s8/90UvHJ/Pzw+lz482tjFOz53kbzbYcq8b/1Mu4VGKb3TC4K7oPPxO6uEwDz8Evg8T0wfPUXU67uQZw67vruTPP/dlLueuqk7CSY5uYS2kjukTiW8YVr1OhXXtDxUcDE8wwblPFcS0bzAtIG8D9phPL7i0jvhzoY7cRboO95z07wJlqK8SU/MO3l1ALwPSks8c+iWvIGrm7xqaVE9n2rtuoavgDyGP5c8u9BJO3cz6Lzl+aq8JbNUPKI1CrwNwca8WXQWPA2hmbyaRtu8e4eJPGvy1TzKs3s8k5nEumQlkjzl+ao891c9Osr8JTuBGwW7KScjOnuuyDpiU+O8N+qnvFkErbw8pWK8EjUVPFQASDxsO4C7sK/kO9rYRTniV4u7zidKvHRxGzwr0FQ94GWvO7D4jjybrzI9k3kXvG50SLye4Wg8tAOGvCCvb7zsNtg83nPTPDgKVbwg+Jm805uYPKFcSTz11co5I3qMvHklxLwqIJE8XZ+6PHJYAL0mHKw5zO6Bvd3DjzyPHmS8eZUtvKCzF7xA+YM8PDV5vLGo0jzCViG9ptCXPGITiTz27mW80qIqu1SQXjskA5G8lbJfPPyC4btTl3A85HAmu0XU6zvnolw854IvPJLpgLxOvIg6c3gtO52hDj0B8B29sRg8u7dV6brRyek7GOm9uw0RgztsWy28O/WeOwofJ7zlicG40TnTPNSUhrndU6a69zeQvOPnobyhrIW8ymyPuwDXAjxkJZK8WSTavCfFXbzYL5Q8VWmfPDO/AzyZlhc8imo7vC2igzwZ4is9FLeHvMXYE7vbsYa6JEp9uo9uoLwTdW88//1Bu8NPj7xUAEg8RJQRvXFfkru+uxO9dCHfOrOarjxHVl67YVp1uxniKzyeKpM51q0hPNE50zxdDyS8swqYPOq0ZbpKaOe7yJpgPGlwY7uvj7c6lxQlu03qWboOmoc7eQUXvSCvb7w28bm7ez7fPDbRDDxtxIQ64yd8vD53EbyUcgW7FxD9OV1/DTvcOgs8xP9SOs6Xs7piEwm9xI/pO2ITibwU3kY8CC3LvPIzq7tWYg29OtwDvVJXFrsA14I8+OdTu8sc07yOdbK8QalHu4/+Nr2L0xK8Rj3DvNIywboMyFg8QPkDvPh3arwsycI8c5jau2auljxa/Rq7MLSMvH2gJLweJus7xfhAPNm4mLk1aLW8BAInvEIyzDsAh8Y6K9BUO9m4mDz5uQK7zKXXO/QlhzpQZbo7PwCWu36ZErsnFZo73VOmvDRPGjsovss7JEp9u/0LZruZlhe8nDg3PBlyQrrKjLw6oLMXvftCB7z6aca7L/t4vJWy3zyspG28JAORu7Ix1zxabYQ8YVp1u1X5NbteKL88R8bHu5bLerqqayU8EexqvKMF+7uKaju8NfjLO60tcrntD5k7hUapu3gsVrwEkr28PgcovJ2hjjxbrd688LG4vAkmObp7rkg8dCFfPFVpHzuZlhe80wuCvHVqCT3ec9M8+FAruod/8bu3FY88+MCUPPHKU7ycOLc8kylbPpKg1jtJuCM8wNQuPRVHHjwtMho8HnYnvIhRILtv3R+8Svh9PCRKfTyBW987kGeOPOKedzocFGK8K9DUuQidtLyvbwq9LcKwvJuvMr3gZa86CZaivNc2JjpabQS8PyDDPDeBULyJkXq8cGaku2IztjxfkRY8tAOGvIBCRLx3M+g80jJBPEtBqLy5Ttc705uYPKr7OzwoDgg9+olzuw8jjDyCNCC9W63evJdUf7z54EG8rZ1bPJyoILxK+P28xG88vG3EhLtq+We8F1knvIPk4ztwZiS7fRCOPC5y9LsQY2Y8ljtku9MLArxR7r475mICvCienjzL9ZO8v0uqPNgvlLx5JUQ8fpkSvHDWDbwkk6c8j/42vHr+hLq6R8W74yf8uz4nVbqhrAW9oLOXvHh8kjywP3s8CU14PGUeAD2J4ba822FKvIxcFztg0fC8LcIwvK//IL3yo5S8Ue6+vM8Aizu5lwG8i9OSvBVny7yr9Km8JoWDvAofpzyxyP88cB36uy/7eDxAsFm7QalHPBrbmbzXVtM8sajSPG9ttrn/jVi7xN8lvK+Ptzsw1Lm7kPckPD2eUDsR7Go8LTKaO30QDjxwhtG7Iahdu6DzcTzuCIe7GmswvOHus7r78ko8Un7VO/vSnbjDT4+7K2BrO7YcobxCEp+81bQzvNthSryMXBc8o3XkvMh6M7ssOSy8xxHcPOKedzyaHxy8N+qnO+frhjrF2BM8xdgTvU5zXjxTUAQ9LiuIOc8Ai7xsWy08DcFGPIvzP7wEcpA7PX4jvO/Y97y1s8m6ZbWovON3uLu1Q2A7F1mnPKjpMj3BPYa8S0GovLYcIb3PkKE8Drq0u+r9D724npO7rF0BPdc2JrywP3u8+tmvu/zrOL7451M8a0KSPBxdDDtZBK08QRkxPOBFAj1Myiy8GkuDPJQCnLrZ39e70wuCvCaswrxQRY275xLGvNGpvDu3Nby7SJ+IvBT+8zwEcpA8riZgPP2bfDn6acY8mL3WvHMIxDoVRx48rBTXu4safzyiVbe4/Os4vKJVtzsgiDA86QQiOwN5ojsx7VS7VvIjPKBDrjwmHCy7lUL2u/Bh/Ll8N008x6FyPIPk4zxhGpu8wLSBvOYZWDxV+bU80am8vFkELTxNwxq8Q7vQugQi1Lxiw0w8Y5wNu+TgjzzTmxg8LaKDvI+OTTuqayU80clpO1uGn7ySgCm6y4WqOxtkHrz9m3y8RdRrvGjAn7x6jps8gTsyvNY9uDuatsQ7PX4jPHaDJDzZ31c8Ski6PLw5ITrl+Sq9QoKIPJRyBbzBzZw8wNQuvLQDBj1abYQ87rjKPNvRszzPkKE8jQxbvIEbhbx1agm8GXLCOmE6yDyGz625sD/7u/O8L7wovks8cRZoPL9r17vQiY86L9tLvNemD7vwYfy7p4DbvCrXZrzk4I87ekXxPHkFFzw/IMO6Vqn5PDkj8DwuKwg8Me1UvXHvKDwVZ0s89NxcPPdXvTrrPeo7TryIu1uGH70NEYO8ikoOPCtAvjzbQR26imq7u+kEIjy3pSU8shEquwD3r71ltai7qtuOPB7mED1KSLq8rO0XPdW0szvzLBk9W4afu73CpTzeA+o7oEMuO1NQhLzngi+832zBu418RDwQs6I7TTMEvLN6Ab1Xouc8BzRdu3pF8bt0Id87niqTvDC0jLzH6pw7R1bevLWMCj1BOV48vuLSvMmTzrpAsFm8ARfdu7IxV7xHFoS8vVK8PGJT47wyVqy82C+UPO1/grwoLrW7SbgjPNTb8rv8guG80cnpOSc1x7w7rPS6V4K6PNMrr7uzCpi8vTIPuu0PmTviVwu9fMfjOxT+czwOUV08G9SHPKPlzTyI6Mi84GWvu36ZErwls9Q7QqK1vL0yjzz4wJS85AA9O3wXoDv6+dy7rpZJPODVmDqLYym8Ctb8PJyoILwdnWY8VxJRvFDVoztOvAi93DoLvSEYxzxRzhG9GXLCvHbzjbxQhee8ud7tO4yDVrvqtGU8/XvPu3VqCbwAh8Y7KC41vaBDLjy3Vek8eZWtugidtDqjvg690ak8PAQiVDsxhP06IqFLuzzuDLvJ4wq9+2I0vVbyo72hXEk85mICPBv0tLzjdzg77X8Cu6hSCrxnx7E7c3itvNCJjzyWhI68/20rPIzsrbypcje8rBTXupkGATtpcGM8+0IHPF4IEjylZ8A7we3JPB9vFb2zCpg8pE6lPGhQtruq+7s85+sGvcXYEzwOmoe8o76OvGlw47s7rPS8xhhuux7mEDw8pWK7FkAMPRHsarxxpn48LcIwusbRgbvv2He8TMosvSQDkbuIUaC8WQStvACHRjwInTS8IypQvBGskDzWzc47mz/JPCcVGjyeKpO8ljvkvBJVQjyWy3q8oEMuPMgqd7wRrBA84RVzO7tAszzPAIs8T0yfPF0PpLoKHye76JvKO7WzSbvJc6G88LG4PHh8Er278Ha8VWkfvBMugzzmYoI8dWqJPJIwbTwoLrW87pidPOCFXLzHEdw8Kbc5Pa19rju/a9e8+kmZPFwWNjx/koA8pE4luy3CMDtgioS8FtAiPJBnjrxtC/G7kPckuwBnmbsedqe8/gTUu4JUTTxElBE54RXzPM4HHT3sNtg6YlNju8eh8ru2rLe84c6GvOJXizwIfQe930wUvSm3uTwT5Vg8Wm2Eu+BFgrwPSsu7xP/SPPaui7zVtLM8kjBtOuMn/LtgigS9p8kFPe/Y97tltag7x4FFPSE4dLxLQSi8+HfqutthyjzpJM+7bVQbPAqPEDwg+Jm80clpPNO7xbuSoFa8N1qRvHAdejvsxu65hdY/u53BuzqPbqA9ANeCPJofnLx6jpu8BhtCvKN15DzFaKo8TcOaPKTeu7yWhI67CU34O+HOhrrK/CU88qMUvfDR5bzznAI7lUL2vObymLv3NxA7K9DUO1eiZzuv/yA8d6NRPJRyhTsxhP27U1AEu7m3LjzVHYu8Rj3Duwb0ArzJk048NN8wPNc2JrwxhP27//1BPLpncjthqrG86Au0Oseh8jzYL5Q7c5haPHw3zTxOvIi8WXQWO1kk2ruQhzu7PO6MOw6ah7sMGJW8\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 8,\n \"total_tokens\": 8\n }\n}\n" + headers: + CF-RAY: + - 936f933c6fce7e0a-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:50 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=yAVgFNKcy2l7Cppym.kBBGNZMWvD0zYJbXBq3jJGg4A-1745770070-1.0.1.1-JvNpysiGohLJGBruqnedD94Y4r9AHPY_.gIefUGns48V4KkyaY5gC8yad0_SwaXeXArhpipuz5eQynAK2Rawe64.qrtUlri84024pQ0V8lE; + path=/; expires=Sun, 27-Apr-25 16:37:50 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=NOl2bW7B9MHsJt0XLs1fWk8BS4vWKLsCcHDInciUQBY-1745770070996-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '172' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-5f4895bd76-msnvl + x-envoy-upstream-service-time: + - '100' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999991' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_20f7c5a3327d4060dbc7a61f4c5c4ba1 + http_version: HTTP/1.1 + status_code: 200 +- request: + body: '{"batch": [{"properties": {"class": "App", "version": "0.1.126", "language": + "python", "pid": 35168, "$lib": "posthog-python", "$lib_version": "3.9.3", "$geoip_disable": + true}, "timestamp": "2025-04-27T16:07:50.287520+00:00", "context": {}, "distinct_id": + "5303ea6e-a423-419e-a71c-3a0f0eaaaa16", "event": "init"}], "historical_migration": + false, "sentAt": "2025-04-27T16:07:50.792604+00:00", "api_key": "phc_PHQDA5KwztijnSojsxJ2c1DuJd52QCzJzT2xnSGvjN2"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate, zstd + Connection: + - keep-alive + Content-Length: + - '453' + Content-Type: + - application/json + User-Agent: + - posthog-python/3.9.3 + method: POST + uri: https://us.i.posthog.com/batch/ + response: + body: + string: '{"status":"Ok"}' + headers: + Connection: + - keep-alive + Content-Length: + - '15' + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:51 GMT + access-control-allow-credentials: + - 'true' + server: + - envoy + vary: + - origin, access-control-request-method, access-control-request-headers + x-envoy-upstream-service-time: + - '46' + status: + code: 200 + message: OK +- request: + body: '{"input": ["test file"], "model": "text-embedding-ada-002", "encoding_format": + "base64"}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate, zstd + connection: + - keep-alive + content-length: + - '88' + content-type: + - application/json + cookie: + - __cf_bm=yAVgFNKcy2l7Cppym.kBBGNZMWvD0zYJbXBq3jJGg4A-1745770070-1.0.1.1-JvNpysiGohLJGBruqnedD94Y4r9AHPY_.gIefUGns48V4KkyaY5gC8yad0_SwaXeXArhpipuz5eQynAK2Rawe64.qrtUlri84024pQ0V8lE; + _cfuvid=NOl2bW7B9MHsJt0XLs1fWk8BS4vWKLsCcHDInciUQBY-1745770070996-0.0.1.1-604800000 + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.66.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.66.3 + x-stainless-read-timeout: + - '600' + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.9 + method: POST + uri: https://api.openai.com/v1/embeddings + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": + \"embedding\",\n \"index\": 0,\n \"embedding\": \"MriavBo/HbyzL4C8J0aGvA0LObyTrcU8NvT2vLBR6ryxjEi8dTacvMRTrTwGPdo6DkYXOwUCfLonc4G7WAsGPG+VODyUyQu8k9rAPHzJnLxQECy8+yQ8PDnSjLwOoI07kp9ivEp9q7zbg2k8lMkLvdYMr7v6vGK7iMKiPAels7w3qOO8UC/EvCGzBbxzOe66GTE6O3CEA70Fie08b3YgvEPOZDzc60K7+TVxPKEciLxcKMq8Mz+MuRgjVzz0Csq8mcc3vDYh8jxRS4o7q1M+un+MarzdJiG8jtyUu9BMMzyxq+C71oU9vACq2TsOc5I7Xb0evYjCortCR3O8kzQ3PNEAoDuOKKi8KijsO8lRWTxPqFI7GCPXu6WTwjwdTn48pZPCuxq4KzzJnWw8jaG2ux4C67zQ8ry75wNhPGqXDLvV8Gg84Y94vPli7LuTNLc7V9CnPLITujsRKP07vErwPBQGk7vLQKQ7Q1VWPONAkzwr3Ng8egfNO2N9GrwtBgI7XUSQPMM35zyIo4q8T07cvH+M6rsI4JG8oIezu9DyPLwb8wm9ZNeQO4yT0ztnjPu7LdmGOwWJ7TseAmu81ZZyPHaQkry/zo+8yOn/O7cfSTx2F4Q8iByZvCuvXbxzDHO79iYQPGOqlTwee/k8j5ABvTKqNzwZBD88e5whvG6H1bwxQl47lLsovBMXSD048aQ8bi1fPLmVBTxPe1e8E0RDPMmd7Lz72Kg8OGqzvMjpf7yxuUM8i1j1PAhng7pYC4a83tqNvNdmJTy4ALE8JVe7Oj0cTDztPGu8mi8RPKeviLxnjPs8B9Iuu+85Gbv5NXE8jXS7O+mmGLs5pZG8DEnpu3wjkzyzAoU8EpBWPOpaBb2UFR88IeAAPYcONjxvoxs8mtUavHtCq7wlSVg6RAnDPBsgBb2kd/w7WAuGu+7DXDw31V66bdNoPJJy5zsAXka8sPfzvJ/TRjzKjDc9cznuPNh0iLyw93M7wxhPPPk1cbxJQs27DVdMvEWeFz2CXJ064Y94PBKCc7wBPy6/sYxIvPZTi7pjqhW8reiSPAE/Lj1YCwY9jMBOO0Mo27xP9OW6sea+uzZ76DyZxze84+acvFCJOjvFraO8gBNcPK1CibxiFUG88GYUPVAvRDzFYZA8SvY5u2qXDDzt4nS71gwvPO08a7vJney8dO3aPO7RP7q5tJ28GuWmuw2Ex7se1e+7V/0iPfAagTtnbWO84UNlvEkV0jzttXk8LEQyvD07ZLogHrE8GYuwOw1XTDqYX167Ks51PNQ8fLsfEE66lMkLPIijCjxRxBi8emHDu2m2JLyOVSO8z11ovH/Y/TwNCzm8e7s5PLnCgDwOGRy8BYltuQVccjyHWkm80S2bvEk0ajxqapG8pZPCPA6/pbvwGoG8Vu+/PHYXBLyHaKy80PI8vIDm4Dzq0xM97Yj+O5RCmrz0ZEC73jQEuypV57qNzjE6Z4z7vHtvprt7b6Y8ULa1u1dXmbxbR+K7ytjKvKXAvTt2FwQ9jq+ZPLnCgLzXZqU4YrtKPJn0srtWlUm83Z8vPD9lDT2TrUW8Ks71O28cqjsIhpu8n0xVu0S9r7samRM8SY7gO8+3Xjwsca08jjaLOmIVQbzpeZ28h1pJu0LtfLwUUqa8Y8ktvXz2FzyScuc77zmZO/wyH7zRLRs9oA6lO1uT9TxbR2I8dEfRO24tXzwGEF877Q/wvFYcO7yaAha81itHuj3C1TsqzvU8ghAKvWqXDDy5DpS8EoLzPMMYz7vq05M82HSIvGOqlbwlV7u7yyEMu4jvHTwILKU7je3JvDFv2bxibzc81Gn3uojQBTxENr68pHf8u4ZM5ryScmc7/F+avCoobLzwwAo6IYYKvbhaJ7uaTqm859bluj3vUDz0KWK8NwLaO3AqjbwrvcC5JmWevIK2EzyIdo+6JmUevdBrS7qw9/O7J3OBvOg+vzwGl1A8UFy/u7zDfrxpLzM7mW1BPJUjgrzFYRC8iEmUPB57+bs5pZE8hh/rOrHY2zx6rda7vu0nOqtyVrz8Mp88bxwqvNV3WjxkMYe8qr5pujOZArsIZ4M8j5CBu8nKZzv6Q9Q8hgDTOwx25Dz2zJk7c5NkO2rxgrvjXys8V6MsvXqt1jtaZnq84iTNO3BXiDwxnNQ7293fvEvXIb2BezU8DuwgPHZjlzyL/v66JdDJO7D3c7xC7fw7pigXO595ULvoMNy64GL9u6evCLoT+C887ZZhPLimOj10wN88lMmLOXtCK7xzZmk8Tm30O+85GbvFrSM9ZAQMvCaENjw+/bO8SY7gPAWJbTzXkyA7AvMaPDeo4zzjQJO80dMkO+Gd2zuUnJA877KnPEwSgLzeB4k83fklvILjjjxb7Wu8amqRPPzmCz2khV+87sNcvFHxEzwrNs88nh/aPIHVqzyCiZg8XnGLu+8MHroMo188yX5UvBoSorlLuIk8jAxivCr7cLxYCwa8f19vuytjSjyYBWi6MVDBPFyvOzxY3oo82HQIPW92oDxXV5m6m1yMvOGP+Lwlo048m7aCuu/+ujqxX027w2TivHM5bjwBi0E8V4SUPHR0zLsdTn67Qhp4PF2Qo7yymqs71+2WPN2fLzx1gq+7sJ19PB62V7xRPac80GtLvENV1rxw0Ja8oA6lPGrxgrzvOZm87bV5vOotijx62lE7ps4gPSfsj7pQAkm8Z+ZxPA04NDp/X288YyOkvIjCortaZvo8aYkpPFYcO7wUJau87h3TvLnhGDzdU5y6Jr8UPXAqjTy+DEA8Ks51vMRTLbzXZqW8DhmcvB6odDwIOgi5vma2O4e0v7zXOao8rIC5O2/CMzwTREM8H+NSPAhZILy/VYG77bX5u/oWWTpc3La7+hZZPHyqhDw5S5s8lLsovJQVHzz5rn887wyePPt+Mrob84m8jGbYPDb0djyyQLU86cWwPNxyND3UaXc8RcuSPGQxBzzJflS8sm2wPKZ0qrusjhy8Mriau3z2F7y8SvA7PiovPFEejzxENj48nh/avIJcHTzLxxU7cFcIvLHmPjq3TMQ8LVKVPLgtrLyTNLe7HgLru7MvAL3XGpK8Q87kvNLhhztLqia8rLsXvPaABr0mvxS96aaYvKDCkbzqWgU6gagwOyBLLLybtgK9XnELvGTXkDwhWY+7V1eZOr7ArLsg/5i7GarIPCGGCrwZMbq8AH1eOjhqs7kaEiK80MXBPNwYvjwSr+67jGbYO+Bi/bvkbQ4712alPCVJWDvDkd28UALJPA0LObxEkLQ6lJwQPJkTS7yzL4A83Bi+uB8QTrygDqU774WsvC1SFTx89hc7Hqj0O2ghUDxpL7O8SiM1vAbEyzyYjFm8q3JWO+O5IbxzDHM8mH72O6A7ILyIdg89V9AnvJ8AQrxq8YI6/OYLvZOOrTs2Tu06e0IrPAiGmzyyIR28M5mCvFWH5ruy9CG8rK00vJLM3TvE+ba87Q9wvNbfs7yG09e8FNkXvB57eTxjyS087TxrvMttn7xL16E7VpVJvMoFRrzt4nS81XfavNh0CLzuw9w6yZ3svN3MKjyzL4A7Vnaxu4GoML0VjYS8yuatuvtRN73DkV28pP7tO10XFTz1Rag8nh/aPC0Ggrv8QAI8bdNoOk4T/rs+hCU8nwDCu+g+P7yU6KO8qybDOksEHTzpeZ08fKoEPU97V7g2Tm284GJ9PLDK+Drh6W67nsVju9XwaLwYb2q64vfRO+fWZbxwKg08cISDvI0axbsCTRE9+rziu4ECJzyfpku5gdWrPKUM0bzwGgE8yl+8vMNk4rsYb+o6AKpZPKWTwryybbC8fFCOPHXcJTviUcg82wpbvNDyPDvj5pw57tG/PA5zkryUbxU7Jf1Eu+l5nTuhHAi7COCRvDgeIDtXsY85EtxpPHbqiDvgYn28B0s9u3xQDrwrkEW5CLMWO1ZJtrsf8TU9Ya1nPMW7Bj0gLBQ9Griru2e5drw+dkK6OBA9u3x9ibzF2p48qybDPLMChbzccrS8v0eePJ8tvTysrTQ8gdUrvGnjn7sYb+o8dr2NPFE9p7zEcsU6etpRvfxfGjuCEAq8mgIWvAG4vLx62tG7JmWevKVmxzynrwi9Hi/mPEmOYDw+/bO8ZNeQO/kIdrzUPHy80bQMPOeparx0wN88y8cVu9AfOLyIdg88Ak0RvPt+srwCeow61+2WN3qA2zzud0m9aRCbvEJ07jsVYIk89N1OO2OqlTsOoI28AnqMvMhw8bnQxcE7mZo8PA04NDqmRy88qr5pvFU7U7xutFC8P96bvNuw5Ls/vwO7UZcdvEk0aryl7Tg7H5c/PFejrDtdkCM8iyv6vOmmmDy5aAo9OB6gvFyvuzve2g08uACxO0JHc7wHeDg8VmjOu1HEmLygh7M86tMTvbc+YbwC8xq9vu0nvBic5TzvWLG7VnaxuxKv7rsZMbo7ThP+Oo6CHjxq8YI2joKeO/atgbwHSz26cP2RO3sVMLthNFm77h3TOuep6jvFBxo7WDgBvdQ8fLw2e+g7LCWauquf0bsgHjE7Er3RvO+yp7z0Vl285wNhPNwYvrlWHLu8rK00vFUOWLxeywG9H/E1PO8rtrz03U483HK0vMx7grl7nKG8PZVavGN9mjyxMlI89b62O2kvM7x1Npy8tz7hu4LjDr290eG6gmqAO/Qp4jvdrZI8DTg0vGN9GruAx8g8Z4x7uxpsmDygtC68Q6/MvLeY17s9wlU8Hi9mO3WvqrsFXPK8CCwlPO/+ujvkmok7jAxiPOHpbjx/jGo6jXQ7vPYmELwbIIU8uHm/uxl9Tby5woC8k1NPvAAxS7wRKH08zz7QvOrTEzm90eG8IKUiOzb0drxRSwo7n1o4vSVXO7zJney7b6Mbvb7ArDzgYv27BQL8OfVFqDxWaE48+dv6u7nCgLvRAKA8CLOWvD0cTLwgHrG67Q/wvO8MnrxnbWO6pnSqPPsFpLy3xdK7bxyqvB7Vb7zK2Eo8UZedOxNxvjw4xCm81R3kvBoSIrrn1uU7s9WJPGlcrrsOv6U8DNBavJScED3vK7Y87eJ0u1FLirsamZO4vbJJPOmmmLziq748+kNUPvRWXTzpTCK8aQI4PR7V77v8jBW8cFcIPGk9Frit6JK77qTEPDHJzzwT+K88dHRMO44oqDogpaK7RAlDPAf/Kb2IHJm8jUdAvMNFyrx6rVY87/66vLFfzbvQTDO78O0FPcW7BrwzEhG8s9WJvBKC8zx8yRy56Gu6vLPVibw9aN87gG1SPGReAr04ajM43EW5O/SDWDwhswU9iKOKuis2Tzz5CPa8LHGtO2m2pLxPe1c8SRXSPO2W4Ts+0Li84RbquwfxxjwlKkC8aVwuu8NFSjyTrcW5T3vXO4YtTjt0wN883HI0vKeCDTvqWoW8+TXxu/vYqDy88Pm8zHsCPR9qxLw2Tm07IVmPvKoY4LvIcPE7v3QZvHx9iTy5lQW8lLsoOpjY7Dt1r6q8ZASMvBVgCT0T+C88b5W4PGpqkTzQTDO8ZxNtOwLUAjyMhfC8XILAvLD387xXsY+73OvCO88RVbx/BXm6LVIVvdAfuLw5LIO8RBemvHvotLvhcGA89UWovF1EkDyYMmM8xCYyPKtTvrwBP647wzdnPNcaEjuCiZi7uIciu2dtYzun3IO7RXGcu9BrS7yzAoU89q0BvfwynztVh2a8Qu18PD8Llzxp4x+04zKwvDhqMzw2x/u7DkaXPIyya7qMwM676Gu6O59MVTmzAgW89iaQvLgtLLvUPHw8/F8avUwSALxzOW65ps4gPT6jPTzcRTm79INYvOqHADsgeCc7rRWOvFzcNji4eb88/DIfvCr7cLxRPSc8yfdiPDOZAruzAgU9XRcVOtEtm7xLi4669RitvCBLrLwMKlG8duoIPL1YUz17byY7w0XKvLN7E73Q8jw8XNy2vGeM+7wSr268DbFCPRIJZbylwD28K2PKu25oPb6rn9E8vaTmPHucoTtd6hk8xTSVO/Q3xTzkmom8mfQyPEVSBDxvwjM8EVX4u+otiryqGGA8sCTvOsshDDx7u7k7COCRvEMo2zxhrec8yhOpPD79M7ysB6s7yZ3su1dXmTsVjQS63HK0vD1o3zwa5Sa7aKhBvC2si7sMo188v84PPCQcXTz7fjI8AFDjutGmqTsYb2q8BS93OxlQ0jsr3Fg7XeoZPVyCwDppAji7sH5lPErJPjwAMcs80S0bPHyqBD3ifsO8ejTIPD5XqrxaOX+8sYxIvFuTdTwtUpU72KGDvNEAILx/MvQ7fH2JOhgjV7ysYaG8YuhFO0uLDjx/MnS8ANdUvHwjk7yCiZg8JpKZvFFLijxXhJS8SbvbvO08azzeNAS8dTacPGEHXrwC8xq9aKhBPFtHYryGLc47h4fEu+7wVz10occ7XChKPPk1cTwO7CC6ZDGHvJoCFjt1Nhy8aS8zvAhnAz2kK2m8YkI8vOoAj7wM/VU7UqUAO2e5drxnE+07sPdzvJ7FY7y938S7ThN+vO0PcLxQ1c07v84PPe9YsTzuHVM8OaURPSBLLD2U6CM8FWAJvVejrLsH/6k7vjk7PF0JMjykWOQ83cwqvLBR6rxk15C8AtSCO8hwcTxpAri7sPdzuQUCfDz2zBm7sm2wu0uLjr0tBoK81XfaPHaQkj3pphi84vfRPMshDDv7fjI9yVHZO5u2gjw+V6q7htNXvI2htrymoaW8avECu+gRxDvKXzy8pKT3u/sFpLxJFdI8cP2RvNzrQrxwKo08dM7CvB1OfrxuaL07JSrAvPmu/zz1vjY8Mqq3vBNEQzkUBpO8bmi9PICazbx8IxO8iNAFO91THL2MZti84RbqPA/6g7ykpHc8piiXPLLHprt7Qqu8bmi9O9dHjbw3tsa51itHPCaxMbwmZZ68GdfDOkJH8zqbXAy80B+4ukk0ajw5/4e7BQL8PC1SlTx/BXm8AH3evFHxk7wg/xg74xOYvGfm8TwHpbO7H5c/u17LgbwlV7u7fCOTPIDHSDuIHJk51ivHPAz9VbxRaiK7E/ivvFt0XTvWK0e9fH0JvRQzjjxpXC683a2SvNG0jLxKfau8ULY1OsO+2Dy9WFO4ddylu11jKLuMhXA8CDqIvCcZizoxnNQ8hkxmPKYatLy/KAa9aT2WPACq2TvRpik8Z4z7u2e5djy+GqO81Dz8vAJ6jL1E3Mc8RUQhO+hd17sfakQ70MXBPIdayTtVDli6GyAFvIH0QzxMEoC83HI0O+otCr3qAA+8YdpivA3ePbygwhE92KEDPW4ORzyGTOY7xa2jPHu7ubxpArg7BYntO1vta7wf8bU81ivHu61CCT08Dmm8ARKzvJp7pLlw/RG9K+o7vNLhhzz0Cko7ycpnvCB4p7vQHzg8CA0NPHZjF7vW/ku8RZ4XvZ95UDtEF6a8FDMOvNvdXzyCtpO8buHLu/nbejwSY1u7DCrROyX9xDtq8YK8kp9ivORtjjqngo28ps6gPHa9jbweidw7MZxUvHUJoTwORpc7Vkm2PBmqyDzYdAi8CA2NPIhJFDtOQHm8418rPB6o9LzVd9q8rIA5vDjEKTwldtM8YdriPIKJGDwGatW8avGCPCoobLvWWMI8H2rEPLHY2zwHHkK9RfiNPPWfnjy4ALE8ucKAuzH2yjrXRw26RGO5OEu4Cb2CL6I7S+WEO+SaCbugh7O8ejRIPC0Ggjt0dEw8lOijPLjTtTz0g1g8abaku43OsTsrY8q8vdHhuwFsKbzIQ/a8lG8VveLYubpJFdI8s04YPNQ8fLsOcxK8LBe3PIK2k7weqPQ7CA0NvBlQ0rstBgK9da+qPPpwTzxFUoQ8Yo7PPAIgFryfAMI8ZAQMO5gy47v7q627y8cVPI42Czz1RSi8gi8iO5L5WLnu0T+8+9govIHVK7vpH6e5Xb0ePCXQSbz1n549RXGcPMjp/7tpXK470VoWPD/eGzya1Ro86Zi1PAceQrynVZK8v3SZPDnSjLutQgm8c2ZpvIyy67wHSz08b3YgvKEciDz8Mp+7ROqqPBmLsDt6gFs7ExfIPN2tkjw5eJY6sMp4Oh57+Tu8HfU6v1WBu0OvzLzVHWQ7Wjl/POOMprvc68K8w+vTPMl+VLwYI9e6ucIAveSaCTxjnDK4iNCFPIFOOjzFrSO9yyGMvEu4ibtWlUm7Ks71vL+hFDxnjPu7\"\n + \ }\n ],\n \"model\": \"text-embedding-ada-002-v2\",\n \"usage\": {\n \"prompt_tokens\": + 2,\n \"total_tokens\": 2\n }\n}\n" + headers: + CF-RAY: + - 936f933fe9eb7e0a-GRU + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Sun, 27 Apr 2025 16:07:51 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-allow-origin: + - '*' + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-model: + - text-embedding-ada-002-v2 + openai-organization: + - crewai-iuxna1 + openai-processing-ms: + - '179' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + via: + - envoy-router-7bbfccd4b9-p6rt4 + x-envoy-upstream-service-time: + - '105' + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '10000000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '9999998' + x-ratelimit-reset-requests: + - 6ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_b1ab10d1ad4421252a7eb1b01ad92f5b + http_version: HTTP/1.1 + status_code: 200 +version: 1 diff --git a/tests/tools/test_search_tools.py b/tests/tools/test_search_tools.py new file mode 100644 index 000000000..eaa0c591c --- /dev/null +++ b/tests/tools/test_search_tools.py @@ -0,0 +1,309 @@ +import os +import tempfile +from pathlib import Path +from unittest.mock import ANY, MagicMock + +import pytest +from embedchain.models.data_type import DataType + +from crewai_tools.tools import ( + CodeDocsSearchTool, + CSVSearchTool, + DirectorySearchTool, + DOCXSearchTool, + GithubSearchTool, + JSONSearchTool, + MDXSearchTool, + PDFSearchTool, + TXTSearchTool, + WebsiteSearchTool, + XMLSearchTool, + YoutubeChannelSearchTool, + YoutubeVideoSearchTool, +) +from crewai_tools.tools.rag.rag_tool import Adapter + +pytestmark = [pytest.mark.vcr(filter_headers=["authorization"])] + + +@pytest.fixture +def mock_adapter(): + mock_adapter = MagicMock(spec=Adapter) + return mock_adapter + + +def test_directory_search_tool(): + with tempfile.TemporaryDirectory() as temp_dir: + test_file = Path(temp_dir) / "test.txt" + test_file.write_text("This is a test file for directory search") + + tool = DirectorySearchTool(directory=temp_dir) + result = tool._run(search_query="test file") + assert "test file" in result.lower() + + +def test_pdf_search_tool(mock_adapter): + mock_adapter.query.return_value = "this is a test" + + tool = PDFSearchTool(pdf="test.pdf", adapter=mock_adapter) + result = tool._run(query="test content") + assert "this is a test" in result.lower() + mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE) + mock_adapter.query.assert_called_once_with("test content") + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = PDFSearchTool(adapter=mock_adapter) + result = tool._run(pdf="test.pdf", query="test content") + assert "this is a test" in result.lower() + mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE) + mock_adapter.query.assert_called_once_with("test content") + + +def test_txt_search_tool(): + with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as temp_file: + temp_file.write(b"This is a test file for txt search") + temp_file_path = temp_file.name + + try: + tool = TXTSearchTool() + tool.add(temp_file_path) + result = tool._run(search_query="test file") + assert "test file" in result.lower() + finally: + os.unlink(temp_file_path) + + +def test_docx_search_tool(mock_adapter): + mock_adapter.query.return_value = "this is a test" + + tool = DOCXSearchTool(docx="test.docx", adapter=mock_adapter) + result = tool._run(search_query="test content") + assert "this is a test" in result.lower() + mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX) + mock_adapter.query.assert_called_once_with("test content") + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = DOCXSearchTool(adapter=mock_adapter) + result = tool._run(docx="test.docx", search_query="test content") + assert "this is a test" in result.lower() + mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX) + mock_adapter.query.assert_called_once_with("test content") + + +def test_json_search_tool(): + with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file: + temp_file.write(b'{"test": "This is a test JSON file"}') + temp_file_path = temp_file.name + + try: + tool = JSONSearchTool() + result = tool._run(search_query="test JSON", json_path=temp_file_path) + assert "test json" in result.lower() + finally: + os.unlink(temp_file_path) + + +def test_xml_search_tool(mock_adapter): + mock_adapter.query.return_value = "this is a test" + + tool = XMLSearchTool(adapter=mock_adapter) + result = tool._run(search_query="test XML", xml="test.xml") + assert "this is a test" in result.lower() + mock_adapter.add.assert_called_once_with("test.xml") + mock_adapter.query.assert_called_once_with("test XML") + + +def test_csv_search_tool(): + with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as temp_file: + temp_file.write(b"name,description\ntest,This is a test CSV file") + temp_file_path = temp_file.name + + try: + tool = CSVSearchTool() + tool.add(temp_file_path) + result = tool._run(search_query="test CSV") + assert "test csv" in result.lower() + finally: + os.unlink(temp_file_path) + + +def test_mdx_search_tool(): + with tempfile.NamedTemporaryFile(suffix=".mdx", delete=False) as temp_file: + temp_file.write(b"# Test MDX\nThis is a test MDX file") + temp_file_path = temp_file.name + + try: + tool = MDXSearchTool() + tool.add(temp_file_path) + result = tool._run(search_query="test MDX") + assert "test mdx" in result.lower() + finally: + os.unlink(temp_file_path) + + +def test_website_search_tool(mock_adapter): + mock_adapter.query.return_value = "this is a test" + + website = "https://crewai.com" + search_query = "what is crewai?" + tool = WebsiteSearchTool(website=website, adapter=mock_adapter) + result = tool._run(search_query=search_query) + + mock_adapter.query.assert_called_once_with("what is crewai?") + mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEB_PAGE) + + assert "this is a test" in result.lower() + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = WebsiteSearchTool(adapter=mock_adapter) + result = tool._run(website=website, search_query=search_query) + + mock_adapter.query.assert_called_once_with("what is crewai?") + mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEB_PAGE) + + assert "this is a test" in result.lower() + + +def test_youtube_video_search_tool(mock_adapter): + mock_adapter.query.return_value = "some video description" + + youtube_video_url = "https://www.youtube.com/watch?v=sample-video-id" + search_query = "what is the video about?" + tool = YoutubeVideoSearchTool( + youtube_video_url=youtube_video_url, + adapter=mock_adapter, + ) + result = tool._run(search_query=search_query) + assert "some video description" in result + + mock_adapter.add.assert_called_once_with( + youtube_video_url, data_type=DataType.YOUTUBE_VIDEO + ) + mock_adapter.query.assert_called_once_with(search_query) + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = YoutubeVideoSearchTool(adapter=mock_adapter) + result = tool._run(youtube_video_url=youtube_video_url, search_query=search_query) + assert "some video description" in result + + mock_adapter.add.assert_called_once_with( + youtube_video_url, data_type=DataType.YOUTUBE_VIDEO + ) + mock_adapter.query.assert_called_once_with(search_query) + + +def test_youtube_channel_search_tool(mock_adapter): + mock_adapter.query.return_value = "channel description" + + youtube_channel_handle = "@crewai" + search_query = "what is the channel about?" + tool = YoutubeChannelSearchTool( + youtube_channel_handle=youtube_channel_handle, adapter=mock_adapter + ) + result = tool._run(search_query=search_query) + assert "channel description" in result + mock_adapter.add.assert_called_once_with( + youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL + ) + mock_adapter.query.assert_called_once_with(search_query) + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = YoutubeChannelSearchTool(adapter=mock_adapter) + result = tool._run( + youtube_channel_handle=youtube_channel_handle, search_query=search_query + ) + assert "channel description" in result + + mock_adapter.add.assert_called_once_with( + youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL + ) + mock_adapter.query.assert_called_once_with(search_query) + + +def test_code_docs_search_tool(mock_adapter): + mock_adapter.query.return_value = "test documentation" + + docs_url = "https://crewai.com/any-docs-url" + search_query = "test documentation" + tool = CodeDocsSearchTool(docs_url=docs_url, adapter=mock_adapter) + result = tool._run(search_query=search_query) + assert "test documentation" in result + mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE) + mock_adapter.query.assert_called_once_with(search_query) + + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = CodeDocsSearchTool(adapter=mock_adapter) + result = tool._run(docs_url=docs_url, search_query=search_query) + assert "test documentation" in result + mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE) + mock_adapter.query.assert_called_once_with(search_query) + + +def test_github_search_tool(mock_adapter): + mock_adapter.query.return_value = "repo description" + + # ensure the provided repo and content types are used after initialization + tool = GithubSearchTool( + gh_token="test_token", + github_repo="crewai/crewai", + content_types=["code"], + adapter=mock_adapter, + ) + result = tool._run(search_query="tell me about crewai repo") + assert "repo description" in result + mock_adapter.add.assert_called_once_with( + "repo:crewai/crewai type:code", data_type="github", loader=ANY + ) + mock_adapter.query.assert_called_once_with("tell me about crewai repo") + + # ensure content types provided by run call is used + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter) + result = tool._run( + github_repo="crewai/crewai", + content_types=["code", "issue"], + search_query="tell me about crewai repo", + ) + assert "repo description" in result + mock_adapter.add.assert_called_once_with( + "repo:crewai/crewai type:code,issue", data_type="github", loader=ANY + ) + mock_adapter.query.assert_called_once_with("tell me about crewai repo") + + # ensure default content types are used if not provided + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter) + result = tool._run( + github_repo="crewai/crewai", + search_query="tell me about crewai repo", + ) + assert "repo description" in result + mock_adapter.add.assert_called_once_with( + "repo:crewai/crewai type:code,repo,pr,issue", data_type="github", loader=ANY + ) + mock_adapter.query.assert_called_once_with("tell me about crewai repo") + + # ensure nothing is added if no repo is provided + mock_adapter.query.reset_mock() + mock_adapter.add.reset_mock() + + tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter) + result = tool._run(search_query="tell me about crewai repo") + mock_adapter.add.assert_not_called() + mock_adapter.query.assert_called_once_with("tell me about crewai repo") From edd4e5bef927139e1b0b20b98954b5d7198c342f Mon Sep 17 00:00:00 2001 From: nicoferdi96 Date: Wed, 7 May 2025 18:34:15 +0200 Subject: [PATCH 329/391] Fix FirecrawlScrapeWebsiteTool (#298) * fix FirecrawlScrapeWebsiteTool: add missing config parameter and correct Dict type annotation - Add required config parameter when creating the tool - Change type hint from `dict` to `Dict` to resolve Pydantic validation issues * Update firecrawl_scrape_website_tool.py - removing optional config - removing timeout from Pydantic model * Removing config from __init__ - removing config from __init__ * Update firecrawl_scrape_website_tool.py - removing timeout --- .../firecrawl_scrape_website_tool.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 954136341..fcb5c6c8d 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Any, Optional, Type, Dict from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr @@ -8,14 +8,8 @@ try: except ImportError: FirecrawlApp = Any - class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") - timeout: Optional[int] = Field( - default=30000, - description="Timeout in milliseconds for the scraping operation. The default value is 30000.", - ) - class FirecrawlScrapeWebsiteTool(BaseTool): """ @@ -31,6 +25,8 @@ class FirecrawlScrapeWebsiteTool(BaseTool): include_tags (list[str]): Tags to include. Default: [] exclude_tags (list[str]): Tags to exclude. Default: [] headers (dict): Headers to include. Default: {} + wait_for (int): Time to wait for page to load in ms. Default: 0 + json_options (dict): Options for JSON extraction. Default: None """ model_config = ConfigDict( @@ -40,7 +36,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): description: str = "Scrape webpages using Firecrawl and return the contents" args_schema: Type[BaseModel] = FirecrawlScrapeWebsiteToolSchema api_key: Optional[str] = None - config: Optional[dict[str, Any]] = Field( + config: Dict[str, Any] = Field( default_factory=lambda: { "formats": ["markdown"], "only_main_content": True, From 64f6f998d8e714925ff180f6c0a4e619d50baa2e Mon Sep 17 00:00:00 2001 From: Harikrishnan K <128063333+HarikrishnanK9@users.noreply.github.com> Date: Thu, 8 May 2025 22:47:17 +0530 Subject: [PATCH 330/391] FileCompressorTool with support for files and subdirectories (#282) * FileCompressorTool with support for files and subdirectories * README.md * Updated files_compressor_tool.py * Enhanced FileCompressorTool different compression formats * Update README.md * Updated with lookup tables * Updated files_compressor_tool.py * Added Test Cases * Removing Test_Cases.md inorder to update with correct test case as per the review * Added Test Cases * Test Cases with patch,MagicMock * Empty lines Removed * Updated Test Case,Ensured Maximum Scenarios * Deleting old one * Updated __init__.py to include FileCompressorTool * Update __init__.py to add FileCompressorTool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/files_compressor_tool/README.md | 119 ++++++++++++++++++ .../files_compressor_tool.py | 117 +++++++++++++++++ .../files_compressor_tool_test2.py | 93 ++++++++++++++ 5 files changed, 331 insertions(+) create mode 100644 src/crewai_tools/tools/files_compressor_tool/README.md create mode 100644 src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py create mode 100644 src/crewai_tools/tools/files_compressor_tool/files_compressor_tool_test2.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index f42750593..9a4af6d9f 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -16,6 +16,7 @@ from .tools import ( EXASearchTool, FileReadTool, FileWriterTool, + FileCompressorTool, FirecrawlCrawlWebsiteTool, FirecrawlScrapeWebsiteTool, FirecrawlSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index d95d08c78..7aba8d4ea 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -18,6 +18,7 @@ from .file_writer_tool.file_writer_tool import FileWriterTool from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import ( FirecrawlCrawlWebsiteTool, ) +from .files_compressor_tool.files_compressor_tool import FileCompressorTool from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( FirecrawlScrapeWebsiteTool, ) diff --git a/src/crewai_tools/tools/files_compressor_tool/README.md b/src/crewai_tools/tools/files_compressor_tool/README.md new file mode 100644 index 000000000..01fdeee7d --- /dev/null +++ b/src/crewai_tools/tools/files_compressor_tool/README.md @@ -0,0 +1,119 @@ +# 📦 FileCompressorTool + +The **FileCompressorTool** is a utility for compressing individual files or entire directories (including nested subdirectories) into different archive formats, such as `.zip` or `.tar` (including `.tar.gz`, `.tar.bz2`, and `.tar.xz`). This tool is useful for archiving logs, documents, datasets, or backups in a compact format, and ensures flexibility in how the archives are created. + +--- + +## Description + +This tool: +- Accepts a **file or directory** as input. +- Supports **recursive compression** of subdirectories. +- Lets you define a **custom output archive path** or defaults to the current directory. +- Handles **overwrite protection** to avoid unintentional data loss. +- Supports multiple compression formats: `.zip`, `.tar`, `.tar.gz`, `.tar.bz2`, and `.tar.xz`. + +--- + +## Arguments + +| Argument | Type | Required | Description | +|---------------|-----------|----------|-----------------------------------------------------------------------------| +| `input_path` | `str` | ✅ | Path to the file or directory you want to compress. | +| `output_path` | `str` | ⌠| Optional path for the resulting archive file. Defaults to `./.`. | +| `overwrite` | `bool` | ⌠| Whether to overwrite an existing archive file. Defaults to `False`. | +| `format` | `str` | ⌠| Compression format to use. Can be one of `zip`, `tar`, `tar.gz`, `tar.bz2`, `tar.xz`. Defaults to `zip`. | + +--- + + +## Usage Example + +```python +from crewai_tools import FileCompressorTool + +# Initialize the tool +tool = FileCompressorTool() + +# Compress a directory with subdirectories and files into a zip archive +result = tool._run( + input_path="./data/project_docs", # Folder containing subfolders & files + output_path="./output/project_docs.zip", # Optional output path (defaults to zip format) + overwrite=True # Allow overwriting if file exists +) +print(result) +# Example output: Successfully compressed './data/project_docs' into './output/project_docs.zip' + +``` + +--- + +## Example Scenarios + +### Compress a single file into a zip archive: +```python +# Compress a single file into a zip archive +result = tool._run(input_path="report.pdf") +# Example output: Successfully compressed 'report.pdf' into './report.zip' +``` + +### Compress a directory with nested folders into a zip archive: +```python +# Compress a directory containing nested subdirectories and files +result = tool._run(input_path="./my_data", overwrite=True) +# Example output: Successfully compressed 'my_data' into './my_data.zip' +``` + +### Use a custom output path with a zip archive: +```python +# Compress a directory and specify a custom zip output location +result = tool._run(input_path="./my_data", output_path="./backups/my_data_backup.zip", overwrite=True) +# Example output: Successfully compressed 'my_data' into './backups/my_data_backup.zip' +``` + +### Prevent overwriting an existing zip file: +```python +# Try to compress a directory without overwriting an existing zip file +result = tool._run(input_path="./my_data", output_path="./backups/my_data_backup.zip", overwrite=False) +# Example output: Output zip './backups/my_data_backup.zip' already exists and overwrite is set to False. +``` + +### Compress into a tar archive: +```python +# Compress a directory into a tar archive +result = tool._run(input_path="./my_data", format="tar", overwrite=True) +# Example output: Successfully compressed 'my_data' into './my_data.tar' +``` + +### Compress into a tar.gz archive: +```python +# Compress a directory into a tar.gz archive +result = tool._run(input_path="./my_data", format="tar.gz", overwrite=True) +# Example output: Successfully compressed 'my_data' into './my_data.tar.gz' +``` + +### Compress into a tar.bz2 archive: +```python +# Compress a directory into a tar.bz2 archive +result = tool._run(input_path="./my_data", format="tar.bz2", overwrite=True) +# Example output: Successfully compressed 'my_data' into './my_data.tar.bz2' +``` + +### Compress into a tar.xz archive: +```python +# Compress a directory into a tar.xz archive +result = tool._run(input_path="./my_data", format="tar.xz", overwrite=True) +# Example output: Successfully compressed 'my_data' into './my_data.tar.xz' +``` + +--- + +## Error Handling and Validations + +- **File Extension Validation**: The tool ensures that the output file extension matches the selected format (e.g., `.zip` for `zip` format, `.tar` for `tar` format, etc.). +- **File/Directory Existence**: If the input path does not exist, an error message will be returned. +- **Overwrite Protection**: If a file already exists at the output path, the tool checks the `overwrite` flag before proceeding. If `overwrite=False`, it prevents overwriting the existing file. + +--- + +This tool provides a flexible and robust way to handle file and directory compression across multiple formats for efficient storage and backups. diff --git a/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py b/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py new file mode 100644 index 000000000..c86fd64e0 --- /dev/null +++ b/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py @@ -0,0 +1,117 @@ +import os +import zipfile +import tarfile +from typing import Type, Optional +from pydantic import BaseModel, Field +from crewai.tools import BaseTool + + +class FileCompressorToolInput(BaseModel): + """Input schema for FileCompressorTool.""" + input_path: str = Field(..., description="Path to the file or directory to compress.") + output_path: Optional[str] = Field(default=None, description="Optional output archive filename.") + overwrite: bool = Field(default=False, description="Whether to overwrite the archive if it already exists.") + format: str = Field(default="zip", description="Compression format ('zip', 'tar', 'tar.gz', 'tar.bz2', 'tar.xz').") + + +class FileCompressorTool(BaseTool): + name: str = "File Compressor Tool" + description: str = ( + "Compresses a file or directory into an archive (.zip currently supported). " + "Useful for archiving logs, documents, or backups." + ) + args_schema: Type[BaseModel] = FileCompressorToolInput + + + def _run(self, input_path: str, output_path: Optional[str] = None, overwrite: bool = False, format: str = "zip") -> str: + + if not os.path.exists(input_path): + return f"Input path '{input_path}' does not exist." + + if not output_path: + output_path = self._generate_output_path(input_path, format) + + FORMAT_EXTENSION = { + "zip": ".zip", + "tar": ".tar", + "tar.gz": ".tar.gz", + "tar.bz2": ".tar.bz2", + "tar.xz": ".tar.xz" + } + + if format not in FORMAT_EXTENSION: + return f"Compression format '{format}' is not supported. Allowed formats: {', '.join(FORMAT_EXTENSION.keys())}" + elif not output_path.endswith(FORMAT_EXTENSION[format]): + return f"Error: If '{format}' format is chosen, output file must have a '{FORMAT_EXTENSION[format]}' extension." + if not self._prepare_output(output_path, overwrite): + return f"Output '{output_path}' already exists and overwrite is set to False." + + try: + format_compression = { + "zip": self._compress_zip, + "tar": self._compress_tar, + "tar.gz": self._compress_tar, + "tar.bz2": self._compress_tar, + "tar.xz": self._compress_tar + } + if format == "zip": + format_compression[format](input_path, output_path) + else: + format_compression[format](input_path, output_path, format) + + return f"Successfully compressed '{input_path}' into '{output_path}'" + except FileNotFoundError: + return f"Error: File not found at path: {input_path}" + except PermissionError: + return f"Error: Permission denied when accessing '{input_path}' or writing '{output_path}'" + except Exception as e: + return f"An unexpected error occurred during compression: {str(e)}" + + + def _generate_output_path(self, input_path: str, format: str) -> str: + """Generates output path based on input path and format.""" + if os.path.isfile(input_path): + base_name = os.path.splitext(os.path.basename(input_path))[0] # Remove extension + else: + base_name = os.path.basename(os.path.normpath(input_path)) # Directory name + return os.path.join(os.getcwd(), f"{base_name}.{format}") + + def _prepare_output(self, output_path: str, overwrite: bool) -> bool: + """Ensures output path is ready for writing.""" + output_dir = os.path.dirname(output_path) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir) + if os.path.exists(output_path) and not overwrite: + return False + return True + + def _compress_zip(self, input_path: str, output_path: str): + """Compresses input into a zip archive.""" + with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + if os.path.isfile(input_path): + zipf.write(input_path, os.path.basename(input_path)) + else: + for root, _, files in os.walk(input_path): + for file in files: + full_path = os.path.join(root, file) + arcname = os.path.relpath(full_path, start=input_path) + zipf.write(full_path, arcname) + + + def _compress_tar(self, input_path: str, output_path: str, format: str): + """Compresses input into a tar archive with the given format.""" + format_mode = { + "tar": "w", + "tar.gz": "w:gz", + "tar.bz2": "w:bz2", + "tar.xz": "w:xz" + } + + if format not in format_mode: + raise ValueError(f"Unsupported tar format: {format}") + + mode = format_mode[format] + + with tarfile.open(output_path, mode) as tarf: + arcname = os.path.basename(input_path) + tarf.add(input_path, arcname=arcname) diff --git a/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool_test2.py b/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool_test2.py new file mode 100644 index 000000000..b30199842 --- /dev/null +++ b/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool_test2.py @@ -0,0 +1,93 @@ + +import os +import pytest +from crewai_tools.tools.files_compressor_tool import FileCompressorTool +from unittest.mock import patch, MagicMock + +@pytest.fixture +def tool(): + return FileCompressorTool() + +@patch("os.path.exists", return_value=False) +def test_input_path_does_not_exist(mock_exists, tool): + result = tool._run("nonexistent_path") + assert "does not exist" in result + +@patch("os.path.exists", return_value=True) +@patch("os.getcwd", return_value="/mocked/cwd") +@patch.object(FileCompressorTool, "_compress_zip") # Mock actual compression +@patch.object(FileCompressorTool, "_prepare_output", return_value=True) +def test_generate_output_path_default(mock_prepare, mock_compress, mock_cwd, mock_exists, tool): + result = tool._run(input_path="mydir", format="zip") + assert "Successfully compressed" in result + mock_compress.assert_called_once() + +@patch("os.path.exists", return_value=True) +@patch.object(FileCompressorTool, "_compress_zip") +@patch.object(FileCompressorTool, "_prepare_output", return_value=True) +def test_zip_compression(mock_prepare, mock_compress, mock_exists, tool): + result = tool._run(input_path="some/path", output_path="archive.zip", format="zip", overwrite=True) + assert "Successfully compressed" in result + mock_compress.assert_called_once() + +@patch("os.path.exists", return_value=True) +@patch.object(FileCompressorTool, "_compress_tar") +@patch.object(FileCompressorTool, "_prepare_output", return_value=True) +def test_tar_gz_compression(mock_prepare, mock_compress, mock_exists, tool): + result = tool._run(input_path="some/path", output_path="archive.tar.gz", format="tar.gz", overwrite=True) + assert "Successfully compressed" in result + mock_compress.assert_called_once() + +@pytest.mark.parametrize("format", ["tar", "tar.bz2", "tar.xz"]) +@patch("os.path.exists", return_value=True) +@patch.object(FileCompressorTool, "_compress_tar") +@patch.object(FileCompressorTool, "_prepare_output", return_value=True) +def test_other_tar_formats(mock_prepare, mock_compress, mock_exists, format, tool): + result = tool._run(input_path="path/to/input", output_path=f"archive.{format}", format=format, overwrite=True) + assert "Successfully compressed" in result + mock_compress.assert_called_once() + +@pytest.mark.parametrize("format", ["rar", "7z"]) +@patch("os.path.exists", return_value=True) #Ensure input_path exists +def test_unsupported_format(_, tool, format): + result = tool._run(input_path="some/path", output_path=f"archive.{format}", format=format) + assert "not supported" in result + +@patch("os.path.exists", return_value=True) +def test_extension_mismatch(_ , tool): + result = tool._run(input_path="some/path", output_path="archive.zip", format="tar.gz") + assert "must have a '.tar.gz' extension" in result + +@patch("os.path.exists", return_value=True) +@patch("os.path.isfile", return_value=True) +@patch("os.path.exists", return_value=True) +def test_existing_output_no_overwrite(_, __, ___, tool): + result = tool._run(input_path="some/path", output_path="archive.zip", format="zip", overwrite=False) + assert "overwrite is set to False" in result + +@patch("os.path.exists", return_value=True) +@patch("zipfile.ZipFile", side_effect=PermissionError) +def test_permission_error(mock_zip, _, tool): + result = tool._run(input_path="file.txt", output_path="file.zip", format="zip", overwrite=True) + assert "Permission denied" in result + +@patch("os.path.exists", return_value=True) +@patch("zipfile.ZipFile", side_effect=FileNotFoundError) +def test_file_not_found_during_zip(mock_zip, _, tool): + result = tool._run(input_path="file.txt", output_path="file.zip", format="zip", overwrite=True) + assert "File not found" in result + +@patch("os.path.exists", return_value=True) +@patch("zipfile.ZipFile", side_effect=Exception("Unexpected")) +def test_general_exception_during_zip(mock_zip, _, tool): + result = tool._run(input_path="file.txt", output_path="file.zip", format="zip", overwrite=True) + assert "unexpected error" in result + +# Test: Output directory is created when missing +@patch("os.makedirs") +@patch("os.path.exists", return_value=False) +def test_prepare_output_makes_dir(mock_exists, mock_makedirs): + tool = FileCompressorTool() + result = tool._prepare_output("some/missing/path/file.zip", overwrite=True) + assert result is True + mock_makedirs.assert_called_once() From 8ecc958e4c376bae80a2dcfe67fe2203fbd13c52 Mon Sep 17 00:00:00 2001 From: Filip Michalsky <31483888+filip-michalsky@users.noreply.github.com> Date: Sat, 10 May 2025 09:53:20 -0400 Subject: [PATCH 331/391] stagehand tool (#277) * stagehand tool * update import paths * updates * improve example * add tests * revert init * imports * add context manager * update tests * update example to run again * update context manager docs * add to pyproject.toml and run uv sync * run uv sync * update lazy import * update test mock * fixing tests * attempt to fix tests --- README.md | 2 +- src/crewai_tools/__init__.py | 27 +- src/crewai_tools/tools/__init__.py | 1 + .../tools/stagehand_tool/.env.example | 5 + .../tools/stagehand_tool/README.md | 273 +++++++ .../tools/stagehand_tool/__init__.py | 2 - .../tools/stagehand_tool/example.py | 116 +++ .../stagehand_tool/stagehand_extract_tool.py | 207 ----- .../tools/stagehand_tool/stagehand_tool.py | 756 ++++++++++++------ tests/tools/stagehand_tool_test.py | 262 ++++++ 10 files changed, 1195 insertions(+), 456 deletions(-) create mode 100644 src/crewai_tools/tools/stagehand_tool/.env.example create mode 100644 src/crewai_tools/tools/stagehand_tool/README.md create mode 100644 src/crewai_tools/tools/stagehand_tool/example.py delete mode 100644 src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py create mode 100644 tests/tools/stagehand_tool_test.py diff --git a/README.md b/README.md index dd2e304e5..4ce6d3807 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ CrewAI provides an extensive collection of powerful tools ready to enhance your - **Web Scraping**: `ScrapeWebsiteTool`, `SeleniumScrapingTool` - **Database Integrations**: `PGSearchTool`, `MySQLSearchTool` - **API Integrations**: `SerperApiTool`, `EXASearchTool` -- **AI-powered Tools**: `DallETool`, `VisionTool` +- **AI-powered Tools**: `DallETool`, `VisionTool`, `StagehandTool` And many more robust tools to simplify your agent integrations. diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 9a4af6d9f..a85a164c0 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,3 +1,11 @@ +from .adapters.enterprise_adapter import EnterpriseActionTool +from .adapters.mcp_adapter import MCPServerAdapter +from .aws import ( + BedrockInvokeAgentTool, + BedrockKBRetrieverTool, + S3ReaderTool, + S3WriterTool, +) from .tools import ( AIMindTool, ApifyActorsTool, @@ -53,6 +61,7 @@ from .tools import ( SnowflakeConfig, SnowflakeSearchTool, SpiderTool, + StagehandTool, TXTSearchTool, VisionTool, WeaviateVectorSearchTool, @@ -60,20 +69,4 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, -) - -from .aws import ( - S3ReaderTool, - S3WriterTool, - BedrockKBRetrieverTool, - BedrockInvokeAgentTool, -) - -from .adapters.mcp_adapter import ( - MCPServerAdapter, -) - - -from .adapters.enterprise_adapter import ( - EnterpriseActionTool -) +) \ No newline at end of file diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 7aba8d4ea..0c397902b 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -67,6 +67,7 @@ from .snowflake_search_tool import ( SnowflakeSearchToolInput, ) from .spider_tool.spider_tool import SpiderTool +from .stagehand_tool.stagehand_tool import StagehandTool from .txt_search_tool.txt_search_tool import TXTSearchTool from .vision_tool.vision_tool import VisionTool from .weaviate_tool.vector_search import WeaviateVectorSearchTool diff --git a/src/crewai_tools/tools/stagehand_tool/.env.example b/src/crewai_tools/tools/stagehand_tool/.env.example new file mode 100644 index 000000000..7a4d2890a --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/.env.example @@ -0,0 +1,5 @@ +ANTHROPIC_API_KEY="your_anthropic_api_key" +OPENAI_API_KEY="your_openai_api_key" +MODEL_API_KEY="your_model_api_key" +BROWSERBASE_API_KEY="your_browserbase_api_key" +BROWSERBASE_PROJECT_ID="your_browserbase_project_id" \ No newline at end of file diff --git a/src/crewai_tools/tools/stagehand_tool/README.md b/src/crewai_tools/tools/stagehand_tool/README.md new file mode 100644 index 000000000..707b99343 --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/README.md @@ -0,0 +1,273 @@ +# Stagehand Web Automation Tool + +This tool integrates the [Stagehand](https://docs.stagehand.dev/) framework with CrewAI, allowing agents to interact with websites and automate browser tasks using natural language instructions. + +## Description + +Stagehand is a powerful browser automation framework built by Browserbase that allows AI agents to: + +- Navigate to websites +- Click buttons, links, and other elements +- Fill in forms +- Extract data from web pages +- Observe and identify elements +- Perform complex workflows + +The StagehandTool wraps the Stagehand Python SDK to provide CrewAI agents with the ability to control a real web browser and interact with websites using three core primitives: + +1. **Act**: Perform actions like clicking, typing, or navigating +2. **Extract**: Extract structured data from web pages +3. **Observe**: Identify and analyze elements on the page + +## Requirements + +Before using this tool, you will need: + +1. A [Browserbase](https://www.browserbase.com/) account with API key and project ID +2. An API key for an LLM (OpenAI or Anthropic Claude) +3. The Stagehand Python SDK installed + +Install the dependencies: + +```bash +pip install stagehand-py +``` + +## Usage + +### Basic Usage + +The StagehandTool can be used in two ways: + +1. **Using a context manager (recommended)**: +```python +from crewai import Agent, Task, Crew +from crewai_tools import StagehandTool +from stagehand.schemas import AvailableModel + +# Initialize the tool with your API keys using a context manager +with StagehandTool( + api_key="your-browserbase-api-key", + project_id="your-browserbase-project-id", + model_api_key="your-llm-api-key", # OpenAI or Anthropic API key + model_name=AvailableModel.CLAUDE_3_7_SONNET_LATEST, # Optional: specify which model to use +) as stagehand_tool: + # Create an agent with the tool + researcher = Agent( + role="Web Researcher", + goal="Find and summarize information from websites", + backstory="I'm an expert at finding information online.", + verbose=True, + tools=[stagehand_tool], + ) + + # Create a task that uses the tool + research_task = Task( + description="Go to https://www.example.com and tell me what you see on the homepage.", + agent=researcher, + ) + + # Run the crew + crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=True, + ) + + result = crew.kickoff() + print(result) + # Resources are automatically cleaned up when exiting the context +``` + +2. **Manual resource management**: +```python +from crewai import Agent, Task, Crew +from crewai_tools import StagehandTool +from stagehand.schemas import AvailableModel + +# Initialize the tool with your API keys +stagehand_tool = StagehandTool( + api_key="your-browserbase-api-key", + project_id="your-browserbase-project-id", + model_api_key="your-llm-api-key", + model_name=AvailableModel.CLAUDE_3_7_SONNET_LATEST, +) + +try: + # Create an agent with the tool + researcher = Agent( + role="Web Researcher", + goal="Find and summarize information from websites", + backstory="I'm an expert at finding information online.", + verbose=True, + tools=[stagehand_tool], + ) + + # Create a task that uses the tool + research_task = Task( + description="Go to https://www.example.com and tell me what you see on the homepage.", + agent=researcher, + ) + + # Run the crew + crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=True, + ) + + result = crew.kickoff() + print(result) +finally: + # Explicitly clean up resources + stagehand_tool.close() +``` + +The context manager approach (option 1) is recommended as it ensures proper cleanup of resources even if exceptions occur. However, both approaches are valid and will properly manage the browser session. + +## Command Types + +The StagehandTool supports three different command types, each designed for specific web automation tasks: + +### 1. Act - Perform Actions on a Page + +The `act` command type (default) allows the agent to perform actions on a webpage, such as clicking buttons, filling forms, navigating, and more. + +**When to use**: Use `act` when you need to interact with a webpage by performing actions like clicking, typing, scrolling, or navigating. + +**Example usage**: +```python +# Perform an action (default behavior) +result = stagehand_tool.run( + instruction="Click the login button", + url="https://example.com", + command_type="act" # Default, so can be omitted +) + +# Fill out a form +result = stagehand_tool.run( + instruction="Fill the contact form with name 'John Doe', email 'john@example.com', and message 'Hello world'", + url="https://example.com/contact" +) + +# Multiple actions in sequence +result = stagehand_tool.run( + instruction="Search for 'AI tools' in the search box and press Enter", + url="https://example.com" +) +``` + +### 2. Extract - Get Data from a Page + +The `extract` command type allows the agent to extract structured data from a webpage, such as product information, article text, or table data. + +**When to use**: Use `extract` when you need to retrieve specific information from a webpage in a structured format. + +**Example usage**: +```python +# Extract all product information +result = stagehand_tool.run( + instruction="Extract all product names, prices, and descriptions", + url="https://example.com/products", + command_type="extract" +) + +# Extract specific information with a selector +result = stagehand_tool.run( + instruction="Extract the main article title and content", + url="https://example.com/blog/article", + command_type="extract", + selector=".article-container" # Optional CSS selector to limit extraction scope +) + +# Extract tabular data +result = stagehand_tool.run( + instruction="Extract the data from the pricing table as a structured list of plans with their features and costs", + url="https://example.com/pricing", + command_type="extract", + selector=".pricing-table" +) +``` + +### 3. Observe - Identify Elements on a Page + +The `observe` command type allows the agent to identify and analyze specific elements on a webpage, returning information about their attributes, location, and suggested actions. + +**When to use**: Use `observe` when you need to identify UI elements, understand page structure, or determine what actions are possible. + +**Example usage**: +```python +# Find interactive elements +result = stagehand_tool.run( + instruction="Find all interactive elements in the navigation menu", + url="https://example.com", + command_type="observe" +) + +# Identify form fields +result = stagehand_tool.run( + instruction="Identify all the input fields in the registration form", + url="https://example.com/register", + command_type="observe", + selector="#registration-form" +) + +# Analyze page structure +result = stagehand_tool.run( + instruction="Find the main content sections of this page", + url="https://example.com/about", + command_type="observe" +) +``` + +## Advanced Configuration + +You can customize the behavior of the StagehandTool by specifying different parameters: + +```python +stagehand_tool = StagehandTool( + api_key="your-browserbase-api-key", + project_id="your-browserbase-project-id", + model_api_key="your-llm-api-key", + model_name=AvailableModel.CLAUDE_3_7_SONNET_LATEST, + dom_settle_timeout_ms=5000, # Wait longer for DOM to settle + headless=True, # Run browser in headless mode (no visible window) + self_heal=True, # Attempt to recover from errors + wait_for_captcha_solves=True, # Wait for CAPTCHA solving + verbose=1, # Control logging verbosity (0-3) +) +``` + +## Tips for Effective Use + +1. **Be specific in instructions**: The more specific your instructions, the better the results. For example, instead of "click the button," use "click the 'Submit' button at the bottom of the contact form." + +2. **Use the right command type**: Choose the appropriate command type based on your task: + - Use `act` for interactions and navigation + - Use `extract` for gathering information + - Use `observe` for understanding page structure + +3. **Leverage selectors**: When extracting data or observing elements, use CSS selectors to narrow the scope and improve accuracy. + +4. **Handle multi-step processes**: For complex workflows, break them down into multiple tool calls, each handling a specific step. + +5. **Error handling**: Implement appropriate error handling in your agent's logic to deal with potential issues like elements not found or pages not loading. + +## Troubleshooting + +- **Session not starting**: Ensure you have valid API keys for both Browserbase and your LLM provider. +- **Elements not found**: Try increasing the `dom_settle_timeout_ms` parameter to give the page more time to load. +- **Actions not working**: Make sure your instructions are clear and specific. You may need to use `observe` first to identify the correct elements. +- **Extract returning incomplete data**: Try refining your instruction or providing a more specific selector. + +## Resources + +- [Stagehand Documentation](https://docs.stagehand.dev/reference/introduction) - Complete reference for the Stagehand framework +- [Browserbase](https://www.browserbase.com) - Browser automation platform +- [Join Slack Community](https://stagehand.dev/slack) - Get help and connect with other users of Stagehand + +## Contact + +For more information about Stagehand, visit [the Stagehand documentation](https://docs.stagehand.dev/). + +For questions about the CrewAI integration, join our [Slack](https://stagehand.dev/slack) or open an issue in this repository. \ No newline at end of file diff --git a/src/crewai_tools/tools/stagehand_tool/__init__.py b/src/crewai_tools/tools/stagehand_tool/__init__.py index cbd90dd15..2b3e24856 100644 --- a/src/crewai_tools/tools/stagehand_tool/__init__.py +++ b/src/crewai_tools/tools/stagehand_tool/__init__.py @@ -1,5 +1,3 @@ -"""Stagehand tool for web automation in CrewAI.""" - from .stagehand_tool import StagehandTool __all__ = ["StagehandTool"] diff --git a/src/crewai_tools/tools/stagehand_tool/example.py b/src/crewai_tools/tools/stagehand_tool/example.py new file mode 100644 index 000000000..0d9735cad --- /dev/null +++ b/src/crewai_tools/tools/stagehand_tool/example.py @@ -0,0 +1,116 @@ +""" +StagehandTool Example + +This example demonstrates how to use the StagehandTool in a CrewAI workflow. +It shows how to use the three main primitives: act, extract, and observe. + +Prerequisites: +1. A Browserbase account with API key and project ID +2. An LLM API key (OpenAI or Anthropic) +3. Installed dependencies: crewai, crewai-tools, stagehand-py + +Usage: +- Set your API keys in environment variables (recommended) +- Or modify the script to include your API keys directly +- Run the script: python stagehand_example.py +""" + +import os + +from crewai import Agent, Crew, Process, Task +from dotenv import load_dotenv +from stagehand.schemas import AvailableModel + +from crewai_tools import StagehandTool + +# Load environment variables from .env file +load_dotenv() + +# Get API keys from environment variables +# You can set these in your shell or in a .env file +browserbase_api_key = os.environ.get("BROWSERBASE_API_KEY") +browserbase_project_id = os.environ.get("BROWSERBASE_PROJECT_ID") +model_api_key = os.environ.get("OPENAI_API_KEY") # or OPENAI_API_KEY + +# Initialize the StagehandTool with your credentials and use context manager +with StagehandTool( + api_key=browserbase_api_key, # New parameter naming + project_id=browserbase_project_id, # New parameter naming + model_api_key=model_api_key, + model_name=AvailableModel.GPT_4O, # Using the enum from schemas +) as stagehand_tool: + # Create a web researcher agent with the StagehandTool + researcher = Agent( + role="Web Researcher", + goal="Find and extract information from websites using different Stagehand primitives", + backstory=( + "You are an expert web automation agent equipped with the StagehandTool. " + "Your primary function is to interact with websites based on natural language instructions. " + "You must carefully choose the correct command (`command_type`) for each task:\n" + "- Use 'act' (the default) for general interactions like clicking buttons ('Click the login button'), " + "filling forms ('Fill the form with username user and password pass'), scrolling, or navigating within the site.\n" + "- Use 'navigate' specifically when you need to go to a new web page; you MUST provide the target URL " + "in the `url` parameter along with the instruction (e.g., instruction='Go to Google', url='https://google.com').\n" + "- Use 'extract' when the goal is to pull structured data from the page. Provide a clear `instruction` " + "describing what data to extract (e.g., 'Extract all product names and prices').\n" + "- Use 'observe' to identify and analyze elements on the current page based on an `instruction` " + "(e.g., 'Find all images in the main content area').\n\n" + "Remember to break down complex tasks into simple, sequential steps in your `instruction`. For example, " + "instead of 'Search for OpenAI on Google and click the first result', use multiple steps with the tool:\n" + "1. Use 'navigate' with url='https://google.com'.\n" + "2. Use 'act' with instruction='Type OpenAI in the search bar'.\n" + "3. Use 'act' with instruction='Click the search button'.\n" + "4. Use 'act' with instruction='Click the first search result link for OpenAI'.\n\n" + "Always be precise in your instructions and choose the most appropriate command and parameters (`instruction`, `url`, `command_type`, `selector`) for the task at hand." + ), + llm="gpt-4o", + verbose=True, + allow_delegation=False, + tools=[stagehand_tool], + ) + + # Define a research task that demonstrates all three primitives + research_task = Task( + description=( + "Demonstrate Stagehand capabilities by performing the following steps:\n" + "1. Go to https://www.stagehand.dev\n" + "2. Extract all the text content from the page\n" + "3. Find the Docs link and click on it\n" + "4. Go to https://httpbin.org/forms/post and observe what elements are available on the page\n" + "5. Provide a summary of what you learned about using these different commands" + ), + expected_output=( + "A demonstration of all three Stagehand primitives (act, extract, observe) " + "with examples of how each was used and what information was gathered." + ), + agent=researcher, + ) + + # Alternative task: Real research using the primitives + web_research_task = Task( + description=( + "Go to google.com and search for 'Stagehand'.\n" + "Then extract the first search result." + ), + expected_output=( + "A summary report about Stagehand's capabilities and pricing, demonstrating how " + "the different primitives can be used together for effective web research." + ), + agent=researcher, + ) + + # Set up the crew + crew = Crew( + agents=[researcher], + tasks=[research_task], # You can switch this to web_research_task if you prefer + verbose=True, + process=Process.sequential, + ) + + # Run the crew and get the result + result = crew.kickoff() + + print("\n==== RESULTS ====\n") + print(result) + +# Resources are automatically cleaned up when exiting the context manager diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py deleted file mode 100644 index 03c14fd43..000000000 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_extract_tool.py +++ /dev/null @@ -1,207 +0,0 @@ -"""Tool for using Stagehand's AI-powered extraction capabilities in CrewAI.""" - -import logging -import os -from typing import Any, Dict, Optional, Type -import subprocess -import json - -from pydantic import BaseModel, Field -from crewai.tools.base_tool import BaseTool - -# Set up logging -logger = logging.getLogger(__name__) - -class StagehandExtractSchema(BaseModel): - """Schema for data extraction using Stagehand. - - Examples: - ```python - # Extract a product price - tool.run( - url="https://example.com/product", - instruction="Extract the price of the item", - schema={ - "price": {"type": "number"} - } - ) - - # Extract article content - tool.run( - url="https://example.com/article", - instruction="Extract the article title and content", - schema={ - "title": {"type": "string"}, - "content": {"type": "string"}, - "date": {"type": "string", "optional": True} - } - ) - ``` - """ - url: str = Field( - ..., - description="The URL of the website to extract data from" - ) - instruction: str = Field( - ..., - description="Instructions for what data to extract", - min_length=1, - max_length=500 - ) - schema: Dict[str, Dict[str, Any]] = Field( - ..., - description="Zod-like schema defining the structure of data to extract" - ) - - -class StagehandExtractTool(BaseTool): - name: str = "StagehandExtractTool" - description: str = ( - "A tool that uses Stagehand's AI-powered extraction to get structured data from websites. " - "Requires a schema defining the structure of data to extract." - ) - args_schema: Type[BaseModel] = StagehandExtractSchema - config: Optional[Dict[str, Any]] = None - - def __init__(self, **kwargs: Any) -> None: - """Initialize the StagehandExtractTool. - - Args: - **kwargs: Additional keyword arguments passed to the base class. - """ - super().__init__(**kwargs) - - # Use provided API key or try environment variable - if not os.getenv("OPENAI_API_KEY"): - raise ValueError( - "Set OPENAI_API_KEY environment variable, mandatory for Stagehand" - ) - - def _convert_to_zod_schema(self, schema: Dict[str, Dict[str, Any]]) -> str: - """Convert Python schema definition to Zod schema string.""" - zod_parts = [] - for field_name, field_def in schema.items(): - field_type = field_def["type"] - is_optional = field_def.get("optional", False) - - if field_type == "string": - zod_type = "z.string()" - elif field_type == "number": - zod_type = "z.number()" - elif field_type == "boolean": - zod_type = "z.boolean()" - elif field_type == "array": - item_type = field_def.get("items", {"type": "string"}) - zod_type = f"z.array({self._convert_to_zod_schema({'item': item_type})})" - else: - zod_type = "z.string()" # Default to string for unknown types - - if is_optional: - zod_type += ".optional()" - - zod_parts.append(f"{field_name}: {zod_type}") - - return f"z.object({{ {', '.join(zod_parts)} }})" - - def _run(self, url: str, instruction: str, schema: Dict[str, Dict[str, Any]]) -> Any: - """Execute a Stagehand extract command. - - Args: - url: The URL to extract data from - instruction: What data to extract - schema: Schema defining the structure of data to extract - - Returns: - The extracted data matching the provided schema - """ - logger.debug( - "Starting extraction - URL: %s, Instruction: %s, Schema: %s", - url, - instruction, - schema - ) - - # Convert Python schema to Zod schema - zod_schema = self._convert_to_zod_schema(schema) - - # Prepare the Node.js command - command = [ - "node", - "-e", - f""" - const {{ Stagehand }} = require('@browserbasehq/stagehand'); - const z = require('zod'); - - async function run() {{ - console.log('Initializing Stagehand...'); - const stagehand = new Stagehand({{ - apiKey: '{os.getenv("OPENAI_API_KEY")}', - env: 'LOCAL' - }}); - - try {{ - console.log('Initializing browser...'); - await stagehand.init(); - - console.log('Navigating to:', '{url}'); - await stagehand.page.goto('{url}'); - - console.log('Extracting data...'); - const result = await stagehand.page.extract({{ - instruction: '{instruction}', - schema: {zod_schema} - }}); - - process.stdout.write('RESULT_START'); - process.stdout.write(JSON.stringify({{ data: result, success: true }})); - process.stdout.write('RESULT_END'); - - await stagehand.close(); - }} catch (error) {{ - console.error('Extraction failed:', error); - process.stdout.write('RESULT_START'); - process.stdout.write(JSON.stringify({{ - error: error.message, - name: error.name, - success: false - }})); - process.stdout.write('RESULT_END'); - process.exit(1); - }} - }} - - run(); - """ - ] - - try: - # Execute Node.js script - result = subprocess.run( - command, - check=True, - capture_output=True, - text=True - ) - - # Extract the JSON result using markers - if 'RESULT_START' in result.stdout and 'RESULT_END' in result.stdout: - json_str = result.stdout.split('RESULT_START')[1].split('RESULT_END')[0] - try: - parsed_result = json.loads(json_str) - logger.info("Successfully parsed result: %s", parsed_result) - if parsed_result.get('success', False): - return parsed_result.get('data') - else: - raise Exception(f"Extraction failed: {parsed_result.get('error', 'Unknown error')}") - except json.JSONDecodeError as e: - logger.error("Failed to parse JSON output: %s", json_str) - raise Exception(f"Invalid JSON response: {e}") - else: - logger.error("No valid result markers found in output") - raise ValueError("No valid output from Stagehand command") - - except subprocess.CalledProcessError as e: - logger.error("Node.js script failed with exit code %d", e.returncode) - if e.stderr: - logger.error("Error output: %s", e.stderr) - raise Exception(f"Stagehand command failed: {e}") \ No newline at end of file diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 7a3f1e06b..5a4d5f485 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -1,33 +1,48 @@ -""" -A tool for using Stagehand's AI-powered web automation capabilities in CrewAI. - -This tool provides access to Stagehand's three core APIs: -- act: Perform web interactions -- extract: Extract information from web pages -- observe: Monitor web page changes - -Each function takes atomic instructions to increase reliability. -""" - +import asyncio +import json import logging -import os -from functools import lru_cache -from typing import Any, Dict, List, Optional, Type, Union +from typing import Dict, List, Optional, Type, Union, Any -from crewai.tools.base_tool import BaseTool from pydantic import BaseModel, Field -# Set up logging -logger = logging.getLogger(__name__) +# Define a flag to track whether stagehand is available +_HAS_STAGEHAND = False -# Define STAGEHAND_AVAILABLE at module level -STAGEHAND_AVAILABLE = False try: - import stagehand - - STAGEHAND_AVAILABLE = True + from stagehand import Stagehand, StagehandConfig, StagehandPage + from stagehand.schemas import ( + ActOptions, + AvailableModel, + ExtractOptions, + ObserveOptions, + ) + from stagehand.utils import configure_logging + _HAS_STAGEHAND = True except ImportError: - pass # Keep STAGEHAND_AVAILABLE as False + # Define type stubs for when stagehand is not installed + Stagehand = Any + StagehandPage = Any + StagehandConfig = Any + ActOptions = Any + ExtractOptions = Any + ObserveOptions = Any + + # Mock configure_logging function + def configure_logging(level=None, remove_logger_name=None, quiet_dependencies=None): + pass + + # Define only what's needed for class defaults + class AvailableModel: + CLAUDE_3_7_SONNET_LATEST = "anthropic.claude-3-7-sonnet-20240607" + +from crewai.tools import BaseTool + + +class StagehandCommandType(str): + ACT = "act" + EXTRACT = "extract" + OBSERVE = "observe" + NAVIGATE = "navigate" class StagehandResult(BaseModel): @@ -50,253 +65,536 @@ class StagehandResult(BaseModel): ) -class StagehandToolConfig(BaseModel): - """Configuration for the StagehandTool. - - Attributes: - api_key: OpenAI API key for Stagehand authentication - timeout: Maximum time in seconds to wait for operations (default: 30) - retry_attempts: Number of times to retry failed operations (default: 3) - """ - - api_key: str = Field(..., description="OpenAI API key for Stagehand authentication") - timeout: int = Field( - 30, description="Maximum time in seconds to wait for operations" - ) - retry_attempts: int = Field( - 3, description="Number of times to retry failed operations" - ) - - class StagehandToolSchema(BaseModel): - """Schema for the StagehandTool input parameters. + """Input for StagehandTool.""" - Examples: - ```python - # Using the 'act' API to click a button - tool.run( - api_method="act", - instruction="Click the 'Sign In' button" - ) - - # Using the 'extract' API to get text - tool.run( - api_method="extract", - instruction="Get the text content of the main article" - ) - - # Using the 'observe' API to monitor changes - tool.run( - api_method="observe", - instruction="Watch for changes in the shopping cart count" - ) - ``` - """ - - api_method: str = Field( - ..., - description="The Stagehand API to use: 'act' for interactions, 'extract' for getting content, or 'observe' for monitoring changes", - pattern="^(act|extract|observe)$", - ) instruction: str = Field( ..., - description="An atomic instruction for Stagehand to execute. Instructions should be simple and specific to increase reliability.", - min_length=1, - max_length=500, + description="Natural language instruction describing what you want to do on the website. Be specific about the action you want to perform, data to extract, or elements to observe. If your task is complex, break it down into simple, sequential steps. For example: 'Step 1: Navigate to https://example.com; Step 2: Click the login button; Step 3: Enter your credentials; Step 4: Submit the form.' Complex tasks like 'Search for OpenAI' should be broken down as: 'Step 1: Navigate to https://google.com; Step 2: Type OpenAI in the search box; Step 3: Press Enter or click the search button'.", + ) + url: Optional[str] = Field( + None, + description="The URL to navigate to before executing the instruction. MUST be used with 'navigate' command. ", + ) + command_type: Optional[str] = Field( + "act", + description="""The type of command to execute (choose one): + - 'act': Perform an action like clicking buttons, filling forms, etc. (default) + - 'navigate': Specifically navigate to a URL + - 'extract': Extract structured data from the page + - 'observe': Identify and analyze elements on the page + """, ) class StagehandTool(BaseTool): - """A tool for using Stagehand's AI-powered web automation capabilities. + """ + A tool that uses Stagehand to automate web browser interactions using natural language. - This tool provides access to Stagehand's three core APIs: - - act: Perform web interactions (e.g., clicking buttons, filling forms) - - extract: Extract information from web pages (e.g., getting text content) - - observe: Monitor web page changes (e.g., watching for updates) + Stagehand allows AI agents to interact with websites through a browser, + performing actions like clicking buttons, filling forms, and extracting data. - Each function takes atomic instructions to increase reliability. + The tool supports four main command types: + 1. act - Perform actions like clicking, typing, scrolling, or navigating + 2. navigate - Specifically navigate to a URL (shorthand for act with navigation) + 3. extract - Extract structured data from web pages + 4. observe - Identify and analyze elements on a page - Required Environment Variables: - OPENAI_API_KEY: API key for OpenAI (required by Stagehand) + Usage patterns: + 1. Using as a context manager (recommended): + ```python + with StagehandTool() as tool: + agent = Agent(tools=[tool]) + # ... use the agent + ``` - Examples: - ```python - tool = StagehandTool() + 2. Manual resource management: + ```python + tool = StagehandTool() + try: + agent = Agent(tools=[tool]) + # ... use the agent + finally: + tool.close() + ``` - # Perform a web interaction - result = tool.run( - api_method="act", - instruction="Click the 'Sign In' button" - ) + Usage examples: + - Navigate to a website: instruction="Go to the homepage", url="https://example.com" + - Click a button: instruction="Click the login button" + - Fill a form: instruction="Fill the login form with username 'user' and password 'pass'" + - Extract data: instruction="Extract all product prices and names", command_type="extract" + - Observe elements: instruction="Find all navigation menu items", command_type="observe" + - Complex tasks: instruction="Step 1: Navigate to https://example.com; Step 2: Scroll down to the 'Features' section; Step 3: Click 'Learn More'", command_type="act" - # Extract content from a page - content = tool.run( - api_method="extract", - instruction="Get the text content of the main article" - ) - - # Monitor for changes - changes = tool.run( - api_method="observe", - instruction="Watch for changes in the shopping cart count" - ) - ``` + Example of breaking down "Search for OpenAI" into multiple steps: + 1. First navigation: instruction="Go to Google", url="https://google.com", command_type="navigate" + 2. Enter search term: instruction="Type 'OpenAI' in the search box", command_type="act" + 3. Submit search: instruction="Press the Enter key or click the search button", command_type="act" + 4. Click on result: instruction="Click on the OpenAI website link in the search results", command_type="act" """ - name: str = "StagehandTool" - description: str = ( - "A tool that uses Stagehand's AI-powered web automation to interact with websites. " - "It can perform actions (click, type, etc.), extract content, and observe changes. " - "Each instruction should be atomic (simple and specific) to increase reliability." - ) + name: str = "Web Automation Tool" + description: str = """Use this tool to control a web browser and interact with websites using natural language. + + Capabilities: + - Navigate to websites and follow links + - Click buttons, links, and other elements + - Fill in forms and input fields + - Search within websites + - Extract information from web pages + - Identify and analyze elements on a page + + To use this tool, provide a natural language instruction describing what you want to do. + For different types of tasks, specify the command_type: + - 'act': For performing actions (default) + - 'navigate': For navigating to a URL (shorthand for act with navigation) + - 'extract': For getting data from the page + - 'observe': For finding and analyzing elements + """ args_schema: Type[BaseModel] = StagehandToolSchema + # Stagehand configuration + api_key: Optional[str] = None + project_id: Optional[str] = None + model_api_key: Optional[str] = None + model_name: Optional[AvailableModel] = AvailableModel.CLAUDE_3_7_SONNET_LATEST + server_url: Optional[str] = "http://api.stagehand.browserbase.com/v1" + headless: bool = False + dom_settle_timeout_ms: int = 3000 + self_heal: bool = True + wait_for_captcha_solves: bool = True + verbose: int = 1 + + # Instance variables + _stagehand: Optional[Stagehand] = None + _page: Optional[StagehandPage] = None + _session_id: Optional[str] = None + _logger: Optional[logging.Logger] = None + _testing: bool = False + def __init__( - self, config: StagehandToolConfig | None = None, **kwargs: Any - ) -> None: - """Initialize the StagehandTool. - - Args: - config: Optional configuration for the tool. If not provided, - will attempt to use OPENAI_API_KEY from environment. - **kwargs: Additional keyword arguments passed to the base class. - - Raises: - ImportError: If the stagehand package is not installed - ValueError: If no API key is provided via config or environment - """ + self, + api_key: Optional[str] = None, + project_id: Optional[str] = None, + model_api_key: Optional[str] = None, + model_name: Optional[str] = None, + server_url: Optional[str] = None, + session_id: Optional[str] = None, + headless: Optional[bool] = None, + dom_settle_timeout_ms: Optional[int] = None, + self_heal: Optional[bool] = None, + wait_for_captcha_solves: Optional[bool] = None, + verbose: Optional[int] = None, + _testing: bool = False, # Flag to bypass dependency check in tests + **kwargs, + ): + # Set testing flag early so that other init logic can rely on it + self._testing = _testing super().__init__(**kwargs) - if not STAGEHAND_AVAILABLE: - import click + # Set up logger + self._logger = logging.getLogger(__name__) - if click.confirm( - "You are missing the 'stagehand-sdk' package. Would you like to install it?" - ): - import subprocess + # For backward compatibility + browserbase_api_key = kwargs.get("browserbase_api_key") + browserbase_project_id = kwargs.get("browserbase_project_id") - subprocess.run(["uv", "add", "stagehand-sdk"], check=True) + if api_key: + self.api_key = api_key + elif browserbase_api_key: + self.api_key = browserbase_api_key - # Use config if provided, otherwise try environment variable - if config is not None: - self.config = config - else: - api_key = os.getenv("OPENAI_API_KEY") - if not api_key: - raise ValueError( - "Either provide config with api_key or set OPENAI_API_KEY environment variable" + if project_id: + self.project_id = project_id + elif browserbase_project_id: + self.project_id = browserbase_project_id + + if model_api_key: + self.model_api_key = model_api_key + if model_name: + self.model_name = model_name + if server_url: + self.server_url = server_url + if headless is not None: + self.headless = headless + if dom_settle_timeout_ms is not None: + self.dom_settle_timeout_ms = dom_settle_timeout_ms + if self_heal is not None: + self.self_heal = self_heal + if wait_for_captcha_solves is not None: + self.wait_for_captcha_solves = wait_for_captcha_solves + if verbose is not None: + self.verbose = verbose + + self._session_id = session_id + + # Configure logging based on verbosity level + log_level = logging.ERROR + if self.verbose == 1: + log_level = logging.INFO + elif self.verbose == 2: + log_level = logging.WARNING + elif self.verbose >= 3: + log_level = logging.DEBUG + + configure_logging( + level=log_level, remove_logger_name=True, quiet_dependencies=True + ) + + self._check_required_credentials() + + def _check_required_credentials(self): + """Validate that required credentials are present.""" + # Check if stagehand is available, but only if we're not in testing mode + if not self._testing and not _HAS_STAGEHAND: + raise ImportError( + "`stagehand-py` package not found, please run `uv add stagehand-py`" + ) + + if not self.api_key: + raise ValueError("api_key is required (or set BROWSERBASE_API_KEY in env).") + if not self.project_id: + raise ValueError( + "project_id is required (or set BROWSERBASE_PROJECT_ID in env)." + ) + if not self.model_api_key: + raise ValueError( + "model_api_key is required (or set OPENAI_API_KEY or ANTHROPIC_API_KEY in env)." + ) + + async def _setup_stagehand(self, session_id: Optional[str] = None): + """Initialize Stagehand if not already set up.""" + + # If we're in testing mode, return mock objects + if self._testing: + if not self._stagehand: + # Create a minimal mock for testing with non-async methods + class MockPage: + def act(self, options): + mock_result = type('MockResult', (), {})() + mock_result.model_dump = lambda: {"message": "Action completed successfully"} + return mock_result + + def goto(self, url): + return None + + def extract(self, options): + mock_result = type('MockResult', (), {})() + mock_result.model_dump = lambda: {"data": "Extracted content"} + return mock_result + + def observe(self, options): + mock_result1 = type('MockResult', (), {"description": "Test element", "method": "click"})() + return [mock_result1] + + class MockStagehand: + def __init__(self): + self.page = MockPage() + self.session_id = "test-session-id" + + def init(self): + return None + + def close(self): + return None + + self._stagehand = MockStagehand() + # No need to await the init call in test mode + self._stagehand.init() + self._page = self._stagehand.page + self._session_id = self._stagehand.session_id + + return self._stagehand, self._page + + # Normal initialization for non-testing mode + if not self._stagehand: + self._logger.debug("Initializing Stagehand") + # Create model client options with the API key + model_client_options = {"apiKey": self.model_api_key} + + # Build the StagehandConfig object + config = StagehandConfig( + env="BROWSERBASE", + api_key=self.api_key, + project_id=self.project_id, + headless=self.headless, + dom_settle_timeout_ms=self.dom_settle_timeout_ms, + model_name=self.model_name, + self_heal=self.self_heal, + wait_for_captcha_solves=self.wait_for_captcha_solves, + model_client_options=model_client_options, + verbose=self.verbose, + session_id=session_id or self._session_id, + ) + + # Initialize Stagehand with config and server_url + self._stagehand = Stagehand(config=config, server_url=self.server_url) + + # Initialize the Stagehand instance + await self._stagehand.init() + self._page = self._stagehand.page + self._session_id = self._stagehand.session_id + self._logger.info(f"Session ID: {self._stagehand.session_id}") + self._logger.info( + f"Browser session: https://www.browserbase.com/sessions/{self._stagehand.session_id}" + ) + + return self._stagehand, self._page + + async def _async_run( + self, + instruction: str, + url: Optional[str] = None, + command_type: str = "act", + ) -> StagehandResult: + """Asynchronous implementation of the tool.""" + try: + # Special handling for test mode to avoid coroutine issues + if self._testing: + # Return predefined mock results based on command type + if command_type.lower() == "act": + return StagehandResult( + success=True, + data={"message": "Action completed successfully"} + ) + elif command_type.lower() == "navigate": + return StagehandResult( + success=True, + data={ + "url": url or "https://example.com", + "message": f"Successfully navigated to {url or 'https://example.com'}", + }, + ) + elif command_type.lower() == "extract": + return StagehandResult( + success=True, + data={"data": "Extracted content", "metadata": {"source": "test"}} + ) + elif command_type.lower() == "observe": + return StagehandResult( + success=True, + data=[ + {"index": 1, "description": "Test element", "method": "click"} + ], + ) + else: + return StagehandResult( + success=False, + data={}, + error=f"Unknown command type: {command_type}" + ) + + # Normal execution for non-test mode + stagehand, page = await self._setup_stagehand(self._session_id) + + self._logger.info( + f"Executing {command_type} with instruction: {instruction}" + ) + + # Process according to command type + if command_type.lower() == "act": + # Create act options + act_options = ActOptions( + action=instruction, + model_name=self.model_name, + dom_settle_timeout_ms=self.dom_settle_timeout_ms, ) - self.config = StagehandToolConfig( - api_key=api_key, timeout=30, retry_attempts=3 - ) - @lru_cache(maxsize=100) - def _cached_run(self, api_method: str, instruction: str) -> Any: - """Execute a cached Stagehand command. + # Execute the act command + result = await page.act(act_options) + self._logger.info(f"Act operation completed: {result}") + return StagehandResult(success=True, data=result.model_dump()) - This method is cached to improve performance for repeated operations. + elif command_type.lower() == "navigate": + # For navigation, use the goto method directly + target_url = url - Args: - api_method: The Stagehand API to use ('act', 'extract', or 'observe') - instruction: An atomic instruction for Stagehand to execute + if not target_url: + error_msg = "No URL provided for navigation. Please provide a URL." + self._logger.error(error_msg) + return StagehandResult(success=False, data={}, error=error_msg) - Returns: - The raw result from the Stagehand API call + # Navigate using the goto method + result = await page.goto(target_url) + self._logger.info(f"Navigate operation completed to {target_url}") + return StagehandResult( + success=True, + data={ + "url": target_url, + "message": f"Successfully navigated to {target_url}", + }, + ) - Raises: - ValueError: If an invalid api_method is provided - Exception: If the Stagehand API call fails - """ - logger.debug( - "Cache operation - Method: %s, Instruction length: %d", - api_method, - len(instruction), - ) + elif command_type.lower() == "extract": + # Create extract options + extract_options = ExtractOptions( + instruction=instruction, + model_name=self.model_name, + dom_settle_timeout_ms=self.dom_settle_timeout_ms, + use_text_extract=True, + ) - # Initialize Stagehand with configuration - logger.info( - "Initializing Stagehand (timeout=%ds, retries=%d)", - self.config.timeout, - self.config.retry_attempts, - ) - st = stagehand.Stagehand( - api_key=self.config.api_key, - timeout=self.config.timeout, - retry_attempts=self.config.retry_attempts, - ) + # Execute the extract command + result = await page.extract(extract_options) + self._logger.info(f"Extract operation completed successfully {result}") + return StagehandResult(success=True, data=result.model_dump()) + + elif command_type.lower() == "observe": + # Create observe options + observe_options = ObserveOptions( + instruction=instruction, + model_name=self.model_name, + only_visible=True, + dom_settle_timeout_ms=self.dom_settle_timeout_ms, + ) + + # Execute the observe command + results = await page.observe(observe_options) + + # Format the observation results + formatted_results = [] + for i, result in enumerate(results): + formatted_results.append( + { + "index": i + 1, + "description": result.description, + "method": result.method, + } + ) + + self._logger.info( + f"Observe operation completed with {len(formatted_results)} elements found" + ) + return StagehandResult(success=True, data=formatted_results) - # Call the appropriate Stagehand API based on the method - logger.info( - "Executing %s operation with instruction: %s", api_method, instruction[:100] - ) - try: - if api_method == "act": - result = st.act(instruction) - elif api_method == "extract": - result = st.extract(instruction) - elif api_method == "observe": - result = st.observe(instruction) else: - raise ValueError(f"Unknown api_method: {api_method}") - - logger.info("Successfully executed %s operation", api_method) - return result + error_msg = f"Unknown command type: {command_type}. Please use 'act', 'navigate', 'extract', or 'observe'." + self._logger.error(error_msg) + return StagehandResult(success=False, data={}, error=error_msg) except Exception as e: - logger.warning( - "Operation failed (method=%s, error=%s), will be retried on next attempt", - api_method, - str(e), - ) - raise + error_msg = f"Error using Stagehand: {str(e)}" + self._logger.error(f"Operation failed: {error_msg}") + return StagehandResult(success=False, data={}, error=error_msg) - def _run(self, api_method: str, instruction: str, **kwargs: Any) -> StagehandResult: - """Execute a Stagehand command using the specified API method. + def _run( + self, + instruction: str, + url: Optional[str] = None, + command_type: str = "act", + ) -> str: + """ + Run the Stagehand tool with the given instruction. Args: - api_method: The Stagehand API to use ('act', 'extract', or 'observe') - instruction: An atomic instruction for Stagehand to execute - **kwargs: Additional keyword arguments passed to the Stagehand API + instruction: Natural language instruction for browser automation + url: Optional URL to navigate to before executing the instruction + command_type: Type of command to execute ('act', 'extract', or 'observe') Returns: - StagehandResult containing the operation result and status + The result of the browser automation task """ + # Create an event loop if we're not already in one try: - # Log operation context - logger.debug( - "Starting operation - Method: %s, Instruction length: %d, Args: %s", - api_method, - len(instruction), - kwargs, - ) + loop = asyncio.get_event_loop() + if loop.is_running(): + # We're in an existing event loop, use it + result = asyncio.run_coroutine_threadsafe( + self._async_run(instruction, url, command_type), loop + ).result() + else: + # We have a loop but it's not running + result = loop.run_until_complete( + self._async_run(instruction, url, command_type) + ) - # Use cached execution - result = self._cached_run(api_method, instruction) - logger.info("Operation completed successfully") - return StagehandResult(success=True, data=result) + # Format the result for output + if result.success: + if command_type.lower() == "act": + return f"Action result: {result.data.get('message', 'Completed')}" + elif command_type.lower() == "extract": + return f"Extracted data: {json.dumps(result.data, indent=2)}" + elif command_type.lower() == "observe": + formatted_results = [] + for element in result.data: + formatted_results.append( + f"Element {element['index']}: {element['description']}" + ) + if element.get("method"): + formatted_results.append( + f"Suggested action: {element['method']}" + ) - except stagehand.AuthenticationError as e: - logger.error( - "Authentication failed - Method: %s, Error: %s", api_method, str(e) - ) - return StagehandResult( - success=False, data={}, error=f"Authentication failed: {str(e)}" - ) - except stagehand.APIError as e: - logger.error("API error - Method: %s, Error: %s", api_method, str(e)) - return StagehandResult(success=False, data={}, error=f"API error: {str(e)}") - except stagehand.BrowserError as e: - logger.error("Browser error - Method: %s, Error: %s", api_method, str(e)) - return StagehandResult( - success=False, data={}, error=f"Browser error: {str(e)}" - ) - except Exception as e: - logger.error( - "Unexpected error - Method: %s, Error type: %s, Message: %s", - api_method, - type(e).__name__, - str(e), - ) - return StagehandResult( - success=False, data={}, error=f"Unexpected error: {str(e)}" - ) + return "\n".join(formatted_results) + else: + return json.dumps(result.data, indent=2) + else: + return f"Error: {result.error}" + + except RuntimeError: + # No event loop exists, create one + result = asyncio.run(self._async_run(instruction, url, command_type)) + + if result.success: + if isinstance(result.data, dict): + return json.dumps(result.data, indent=2) + else: + return str(result.data) + else: + return f"Error: {result.error}" + + async def _async_close(self): + """Asynchronously clean up Stagehand resources.""" + # Skip for test mode + if self._testing: + self._stagehand = None + self._page = None + return + + if self._stagehand: + await self._stagehand.close() + self._stagehand = None + if self._page: + self._page = None + + def close(self): + """Clean up Stagehand resources.""" + # Skip actual closing for testing mode + if self._testing: + self._stagehand = None + self._page = None + return + + if self._stagehand: + try: + # Handle both synchronous and asynchronous cases + if hasattr(self._stagehand, "close"): + if asyncio.iscoroutinefunction(self._stagehand.close): + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + asyncio.run_coroutine_threadsafe(self._async_close(), loop).result() + else: + loop.run_until_complete(self._async_close()) + except RuntimeError: + asyncio.run(self._async_close()) + else: + # Handle non-async close method (for mocks) + self._stagehand.close() + except Exception as e: + # Log but don't raise - we're cleaning up + if self._logger: + self._logger.error(f"Error closing Stagehand: {str(e)}") + + self._stagehand = None + + if self._page: + self._page = None + + def __enter__(self): + """Enter the context manager.""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the context manager and clean up resources.""" + self.close() diff --git a/tests/tools/stagehand_tool_test.py b/tests/tools/stagehand_tool_test.py new file mode 100644 index 000000000..25c71c934 --- /dev/null +++ b/tests/tools/stagehand_tool_test.py @@ -0,0 +1,262 @@ +import sys +from unittest.mock import MagicMock, patch + +import pytest + +# Create mock classes that will be used by our fixture +class MockStagehandModule: + def __init__(self): + self.Stagehand = MagicMock() + self.StagehandConfig = MagicMock() + self.StagehandPage = MagicMock() + +class MockStagehandSchemas: + def __init__(self): + self.ActOptions = MagicMock() + self.ExtractOptions = MagicMock() + self.ObserveOptions = MagicMock() + self.AvailableModel = MagicMock() + +class MockStagehandUtils: + def __init__(self): + self.configure_logging = MagicMock() + +@pytest.fixture(scope="module", autouse=True) +def mock_stagehand_modules(): + """Mock stagehand modules at the start of this test module.""" + # Store original modules if they exist + original_modules = {} + for module_name in ["stagehand", "stagehand.schemas", "stagehand.utils"]: + if module_name in sys.modules: + original_modules[module_name] = sys.modules[module_name] + + # Create and inject mock modules + mock_stagehand = MockStagehandModule() + mock_stagehand_schemas = MockStagehandSchemas() + mock_stagehand_utils = MockStagehandUtils() + + sys.modules["stagehand"] = mock_stagehand + sys.modules["stagehand.schemas"] = mock_stagehand_schemas + sys.modules["stagehand.utils"] = mock_stagehand_utils + + # Import after mocking + from crewai_tools.tools.stagehand_tool.stagehand_tool import StagehandResult, StagehandTool + + # Make these available to tests in this module + sys.modules[__name__].StagehandResult = StagehandResult + sys.modules[__name__].StagehandTool = StagehandTool + + yield + + # Restore original modules + for module_name, module in original_modules.items(): + sys.modules[module_name] = module + + +class MockStagehandPage(MagicMock): + def act(self, options): + mock_result = MagicMock() + mock_result.model_dump.return_value = { + "message": "Action completed successfully" + } + return mock_result + + def goto(self, url): + return MagicMock() + + def extract(self, options): + mock_result = MagicMock() + mock_result.model_dump.return_value = { + "data": "Extracted content", + "metadata": {"source": "test"}, + } + return mock_result + + def observe(self, options): + result1 = MagicMock() + result1.description = "Button element" + result1.method = "click" + + result2 = MagicMock() + result2.description = "Input field" + result2.method = "type" + + return [result1, result2] + + +class MockStagehand(MagicMock): + def init(self): + self.session_id = "test-session-id" + self.page = MockStagehandPage() + + def close(self): + pass + + +@pytest.fixture +def mock_stagehand_instance(): + with patch( + "crewai_tools.tools.stagehand_tool.stagehand_tool.Stagehand", + return_value=MockStagehand(), + ) as mock: + yield mock + + +@pytest.fixture +def stagehand_tool(): + return StagehandTool( + api_key="test_api_key", + project_id="test_project_id", + model_api_key="test_model_api_key", + _testing=True, # Enable testing mode to bypass dependency check + ) + + +def test_stagehand_tool_initialization(): + """Test that the StagehandTool initializes with the correct default values.""" + tool = StagehandTool( + api_key="test_api_key", + project_id="test_project_id", + model_api_key="test_model_api_key", + _testing=True, # Enable testing mode + ) + + assert tool.api_key == "test_api_key" + assert tool.project_id == "test_project_id" + assert tool.model_api_key == "test_model_api_key" + assert tool.headless is False + assert tool.dom_settle_timeout_ms == 3000 + assert tool.self_heal is True + assert tool.wait_for_captcha_solves is True + + +@patch("crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True) +def test_act_command(mock_run, stagehand_tool): + """Test the 'act' command functionality.""" + # Setup mock + mock_run.return_value = "Action result: Action completed successfully" + + # Run the tool + result = stagehand_tool._run( + instruction="Click the submit button", command_type="act" + ) + + # Assertions + assert "Action result" in result + assert "Action completed successfully" in result + + +@patch("crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True) +def test_navigate_command(mock_run, stagehand_tool): + """Test the 'navigate' command functionality.""" + # Setup mock + mock_run.return_value = "Successfully navigated to https://example.com" + + # Run the tool + result = stagehand_tool._run( + instruction="Go to example.com", + url="https://example.com", + command_type="navigate", + ) + + # Assertions + assert "https://example.com" in result + + +@patch("crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True) +def test_extract_command(mock_run, stagehand_tool): + """Test the 'extract' command functionality.""" + # Setup mock + mock_run.return_value = "Extracted data: {\"data\": \"Extracted content\", \"metadata\": {\"source\": \"test\"}}" + + # Run the tool + result = stagehand_tool._run( + instruction="Extract all product names and prices", command_type="extract" + ) + + # Assertions + assert "Extracted data" in result + assert "Extracted content" in result + + +@patch("crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True) +def test_observe_command(mock_run, stagehand_tool): + """Test the 'observe' command functionality.""" + # Setup mock + mock_run.return_value = "Element 1: Button element\nSuggested action: click\nElement 2: Input field\nSuggested action: type" + + # Run the tool + result = stagehand_tool._run( + instruction="Find all interactive elements", command_type="observe" + ) + + # Assertions + assert "Element 1: Button element" in result + assert "Element 2: Input field" in result + assert "Suggested action: click" in result + assert "Suggested action: type" in result + + +@patch("crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True) +def test_error_handling(mock_run, stagehand_tool): + """Test error handling in the tool.""" + # Setup mock + mock_run.return_value = "Error: Browser automation error" + + # Run the tool + result = stagehand_tool._run( + instruction="Click a non-existent button", command_type="act" + ) + + # Assertions + assert "Error:" in result + assert "Browser automation error" in result + + +def test_initialization_parameters(): + """Test that the StagehandTool initializes with the correct parameters.""" + # Create tool with custom parameters + tool = StagehandTool( + api_key="custom_api_key", + project_id="custom_project_id", + model_api_key="custom_model_api_key", + headless=True, + dom_settle_timeout_ms=5000, + self_heal=False, + wait_for_captcha_solves=False, + verbose=3, + _testing=True, # Enable testing mode + ) + + # Verify the tool was initialized with the correct parameters + assert tool.api_key == "custom_api_key" + assert tool.project_id == "custom_project_id" + assert tool.model_api_key == "custom_model_api_key" + assert tool.headless is True + assert tool.dom_settle_timeout_ms == 5000 + assert tool.self_heal is False + assert tool.wait_for_captcha_solves is False + assert tool.verbose == 3 + + +def test_close_method(): + """Test that the close method cleans up resources correctly.""" + # Create the tool with testing mode + tool = StagehandTool( + api_key="test_api_key", + project_id="test_project_id", + model_api_key="test_model_api_key", + _testing=True, + ) + + # Setup mock stagehand instance + tool._stagehand = MagicMock() + tool._stagehand.close = MagicMock() # Non-async mock + tool._page = MagicMock() + + # Call the close method + tool.close() + + # Verify resources were cleaned up + assert tool._stagehand is None + assert tool._page is None From 8b887b4eb32399045fe703aa3220e4c582f15b21 Mon Sep 17 00:00:00 2001 From: Mark McDonald Date: Wed, 14 May 2025 05:13:07 +0800 Subject: [PATCH 332/391] Fix `Callable` reference in QdrantVectorSearchTool (#304) The built-in `callable` type is not subscriptable, and thus not usable in a type annotation. When this tool is used, this warning is generated: ``` .../_generate_schema.py:623: UserWarning: is not a Python type (it may be an instance of an object), Pydantic will allow any object with no validation since we cannot even enforce that the input is an instance of the given type. To get rid of this error wrap the type with `pydantic.SkipValidation`. ``` This change fixes the warning. --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 3ef467264..29f172cdf 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -1,6 +1,6 @@ import json import os -from typing import Any, Optional, Type +from typing import Any, Callable, Optional, Type try: @@ -70,7 +70,7 @@ class QdrantVectorSearchTool(BaseTool): default=None, description="The API key for the Qdrant server", ) - custom_embedding_fn: Optional[callable] = Field( + custom_embedding_fn: Optional[Callable] = Field( default=None, description="A custom embedding function to use for vectorization. If not provided, the default model will be used.", ) From ba6a85d342c5795978ec173c696f135eed0f10ce Mon Sep 17 00:00:00 2001 From: Ranuga Disansa <79456372+Programmer-RD-AI@users.noreply.github.com> Date: Wed, 14 May 2025 22:15:12 +0530 Subject: [PATCH 333/391] Add TavilyExtractorTool and TavilySearchTool with documentation (#279) * feat(tavily): add TavilyExtractorTool and TavilySearchTool with documentation * feat(tavily): enhance TavilyExtractorTool and TavilySearchTool with additional parameters and improved error handling * fix(tavily): update installation instructions for 'tavily-python' package in TavilyExtractorTool and TavilySearchTool --------- Co-authored-by: lorenzejay --- .../tools/tavily_extractor_tool/README.md | 99 +++++++ .../tavily_extractor_tool.py | 166 ++++++++++++ .../tools/tavily_search_tool/README.md | 115 ++++++++ .../tavily_search_tool/tavily_search_tool.py | 245 ++++++++++++++++++ 4 files changed, 625 insertions(+) create mode 100644 src/crewai_tools/tools/tavily_extractor_tool/README.md create mode 100644 src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py create mode 100644 src/crewai_tools/tools/tavily_search_tool/README.md create mode 100644 src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py diff --git a/src/crewai_tools/tools/tavily_extractor_tool/README.md b/src/crewai_tools/tools/tavily_extractor_tool/README.md new file mode 100644 index 000000000..8e2794dd1 --- /dev/null +++ b/src/crewai_tools/tools/tavily_extractor_tool/README.md @@ -0,0 +1,99 @@ +# TavilyExtractorTool + +## Description + +The `TavilyExtractorTool` allows CrewAI agents to extract structured content from web pages using the Tavily API. It can process single URLs or lists of URLs and provides options for controlling the extraction depth and including images. + +## Installation + +To use the `TavilyExtractorTool`, you need to install the `tavily-python` library: + +```shell +pip install 'crewai[tools]' tavily-python +``` + +You also need to set your Tavily API key as an environment variable: + +```bash +export TAVILY_API_KEY='your-tavily-api-key' +``` + +## Example + +Here's how to initialize and use the `TavilyExtractorTool` within a CrewAI agent: + +```python +import os +from crewai import Agent, Task, Crew +from crewai_tools import TavilyExtractorTool + +# Ensure TAVILY_API_KEY is set in your environment +# os.environ["TAVILY_API_KEY"] = "YOUR_API_KEY" + +# Initialize the tool +tavily_tool = TavilyExtractorTool() + +# Create an agent that uses the tool +extractor_agent = Agent( + role='Web Content Extractor', + goal='Extract key information from specified web pages', + backstory='You are an expert at extracting relevant content from websites using the Tavily API.', + tools=[tavily_tool], + verbose=True +) + +# Define a task for the agent +extract_task = Task( + description='Extract the main content from the URL https://example.com using basic extraction depth.', + expected_output='A JSON string containing the extracted content from the URL.', + agent=extractor_agent, + tool_inputs={ + 'urls': 'https://example.com', + 'extract_depth': 'basic' + } +) + +# Create and run the crew +crew = Crew( + agents=[extractor_agent], + tasks=[extract_task], + verbose=2 +) + +result = crew.kickoff() +print(result) + +# Example with multiple URLs and advanced extraction +extract_multiple_task = Task( + description='Extract content from https://example.com and https://anotherexample.org using advanced extraction.', + expected_output='A JSON string containing the extracted content from both URLs.', + agent=extractor_agent, + tool_inputs={ + 'urls': ['https://example.com', 'https://anotherexample.org'], + 'extract_depth': 'advanced', + 'include_images': True + } +) + +result_multiple = crew.kickoff(inputs={'urls': ['https://example.com', 'https://anotherexample.org'], 'extract_depth': 'advanced', 'include_images': True}) # If task doesn't specify inputs directly +print(result_multiple) + +``` + +## Arguments + +The `TavilyExtractorTool` accepts the following arguments during initialization or when running the tool: + +- `api_key` (Optional[str]): Your Tavily API key. If not provided during initialization, it defaults to the `TAVILY_API_KEY` environment variable. +- `proxies` (Optional[dict[str, str]]): Proxies to use for the API requests. Defaults to `None`. + +When running the tool (`_run` or `_arun` methods, or via agent execution), it uses the `TavilyExtractorToolSchema` and expects the following inputs: + +- `urls` (Union[List[str], str]): **Required**. A single URL string or a list of URL strings to extract data from. +- `include_images` (Optional[bool]): Whether to include images in the extraction results. Defaults to `False`. +- `extract_depth` (Literal["basic", "advanced"]): The depth of extraction. Use `"basic"` for faster, surface-level extraction or `"advanced"` for more comprehensive extraction. Defaults to `"basic"`. +- `timeout` (int): The maximum time in seconds to wait for the extraction request to complete. Defaults to `60`. + +## Response Format + +The tool returns a JSON string representing the structured data extracted from the provided URL(s). The exact structure depends on the content of the pages and the `extract_depth` used. Refer to the [Tavily API documentation](https://docs.tavily.com/docs/tavily-api/python-sdk#extract) for details on the response structure. diff --git a/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py new file mode 100644 index 000000000..0320ab104 --- /dev/null +++ b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py @@ -0,0 +1,166 @@ +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from typing import Optional, Type, Any, Union, List, Literal +from dotenv import load_dotenv +import os +import json + +load_dotenv() +try: + from tavily import TavilyClient, AsyncTavilyClient + + TAVILY_AVAILABLE = True +except ImportError: + TAVILY_AVAILABLE = False + TavilyClient = Any + AsyncTavilyClient = Any + + +class TavilyExtractorToolSchema(BaseModel): + """Input schema for TavilyExtractorTool.""" + + urls: Union[List[str], str] = Field( + ..., + description="The URL(s) to extract data from. Can be a single URL or a list of URLs.", + ) + + +class TavilyExtractorTool(BaseTool): + """ + Tool that uses the Tavily API to extract content from web pages. + + Attributes: + client: Synchronous Tavily client. + async_client: Asynchronous Tavily client. + name: The name of the tool. + description: The description of the tool. + args_schema: The schema for the tool's arguments. + api_key: The Tavily API key. + proxies: Optional proxies for the API requests. + include_images: Whether to include images in the extraction. + extract_depth: The depth of extraction. + timeout: The timeout for the extraction request in seconds. + """ + + model_config = {"arbitrary_types_allowed": True} + client: Optional[TavilyClient] = None + async_client: Optional[AsyncTavilyClient] = None + name: str = "TavilyExtractorTool" + description: str = "Extracts content from one or more web pages using the Tavily API. Returns structured data." + args_schema: Type[BaseModel] = TavilyExtractorToolSchema + api_key: Optional[str] = Field( + default_factory=lambda: os.getenv("TAVILY_API_KEY"), + description="The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.", + ) + proxies: Optional[dict[str, str]] = Field( + default=None, + description="Optional proxies to use for the Tavily API requests.", + ) + include_images: bool = Field( + default=False, + description="Whether to include images in the extraction.", + ) + extract_depth: Literal["basic", "advanced"] = Field( + default="basic", + description="The depth of extraction. 'basic' for basic extraction, 'advanced' for advanced extraction.", + ) + timeout: int = Field( + default=60, + description="The timeout for the extraction request in seconds.", + ) + + def __init__(self, **kwargs: Any): + """ + Initializes the TavilyExtractorTool. + + Args: + **kwargs: Additional keyword arguments. + """ + super().__init__(**kwargs) + if TAVILY_AVAILABLE: + self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies) + self.async_client = AsyncTavilyClient( + api_key=self.api_key, proxies=self.proxies + ) + else: + try: + import click + import subprocess + except ImportError: + raise ImportError( + "The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. " + "Please install 'tavily-python' manually (e.g., 'uv add tavily-python') and ensure 'click' and 'subprocess' are available." + ) + + if click.confirm( + "You are missing the 'tavily-python' package, which is required for TavilyExtractorTool. Would you like to install it?" + ): + try: + subprocess.run(["pip", "install", "tavily-python"], check=True) + raise ImportError( + "'tavily-python' has been installed. Please restart your Python application to use the TavilyExtractorTool." + ) + except subprocess.CalledProcessError as e: + raise ImportError( + f"Attempted to install 'tavily-python' but failed: {e}. " + f"Please install it manually to use the TavilyExtractorTool." + ) + else: + raise ImportError( + "The 'tavily-python' package is required to use the TavilyExtractorTool. " + "Please install it with: uv add tavily-python" + ) + + def _run( + self, + urls: Union[List[str], str], + ) -> str: + """ + Synchronously extracts content from the given URL(s). + + Args: + urls: The URL(s) to extract data from. + + Returns: + A JSON string containing the extracted data. + """ + if not self.client: + raise ValueError( + "Tavily client is not initialized. Ensure 'tavily-python' is installed and API key is set." + ) + + return json.dumps( + self.client.extract( + urls=urls, + extract_depth=self.extract_depth, + include_images=self.include_images, + timeout=self.timeout, + ), + indent=2, + ) + + async def _arun( + self, + urls: Union[List[str], str], + ) -> str: + """ + Asynchronously extracts content from the given URL(s). + + Args: + urls: The URL(s) to extract data from. + + Returns: + A JSON string containing the extracted data. + """ + if not self.async_client: + raise ValueError( + "Tavily async client is not initialized. Ensure 'tavily-python' is installed and API key is set." + ) + + results = await self.async_client.extract( + urls=urls, + extract_depth=self.extract_depth, + include_images=self.include_images, + timeout=self.timeout, + ) + return json.dumps(results, indent=2) diff --git a/src/crewai_tools/tools/tavily_search_tool/README.md b/src/crewai_tools/tools/tavily_search_tool/README.md new file mode 100644 index 000000000..185b19887 --- /dev/null +++ b/src/crewai_tools/tools/tavily_search_tool/README.md @@ -0,0 +1,115 @@ +# Tavily Search Tool + +## Description + +The `TavilySearchTool` provides an interface to the Tavily Search API, enabling CrewAI agents to perform comprehensive web searches. It allows for specifying search depth, topics, time ranges, included/excluded domains, and whether to include direct answers, raw content, or images in the results. The tool returns the search results as a JSON string. + +## Installation + +To use the `TavilySearchTool`, you need to install the `tavily-python` library: + +```shell +pip install 'crewai[tools]' tavily-python +``` + +## Environment Variables + +Ensure your Tavily API key is set as an environment variable: + +```bash +export TAVILY_API_KEY='your_tavily_api_key' +``` + +## Example + +Here's how to initialize and use the `TavilySearchTool` within a CrewAI agent: + +```python +import os +from crewai import Agent, Task, Crew +from crewai_tools import TavilySearchTool + +# Ensure the TAVILY_API_KEY environment variable is set +# os.environ["TAVILY_API_KEY"] = "YOUR_TAVILY_API_KEY" + +# Initialize the tool +tavily_tool = TavilySearchTool() + +# Create an agent that uses the tool +researcher = Agent( + role='Market Researcher', + goal='Find information about the latest AI trends', + backstory='An expert market researcher specializing in technology.', + tools=[tavily_tool], + verbose=True +) + +# Create a task for the agent +research_task = Task( + description='Search for the top 3 AI trends in 2024.', + expected_output='A JSON report summarizing the top 3 AI trends found.', + agent=researcher +) + +# Form the crew and kick it off +crew = Crew( + agents=[researcher], + tasks=[research_task], + verbose=2 +) + +result = crew.kickoff() +print(result) + +# Example of using specific parameters +detailed_search_result = tavily_tool.run( + query="What are the recent advancements in large language models?", + search_depth="advanced", + topic="general", + max_results=5, + include_answer=True +) +print(detailed_search_result) +``` + +## Arguments + +The `TavilySearchTool` accepts the following arguments during initialization or when calling the `run` method: + +- `query` (str): **Required**. The search query string. +- `search_depth` (Literal["basic", "advanced"], optional): The depth of the search. Defaults to `"basic"`. +- `topic` (Literal["general", "news", "finance"], optional): The topic to focus the search on. Defaults to `"general"`. +- `time_range` (Literal["day", "week", "month", "year"], optional): The time range for the search. Defaults to `None`. +- `days` (int, optional): The number of days to search back. Relevant if `time_range` is not set. Defaults to `7`. +- `max_results` (int, optional): The maximum number of search results to return. Defaults to `5`. +- `include_domains` (Sequence[str], optional): A list of domains to prioritize in the search. Defaults to `None`. +- `exclude_domains` (Sequence[str], optional): A list of domains to exclude from the search. Defaults to `None`. +- `include_answer` (Union[bool, Literal["basic", "advanced"]], optional): Whether to include a direct answer synthesized from the search results. Defaults to `False`. +- `include_raw_content` (bool, optional): Whether to include the raw HTML content of the searched pages. Defaults to `False`. +- `include_images` (bool, optional): Whether to include image results. Defaults to `False`. +- `timeout` (int, optional): The request timeout in seconds. Defaults to `60`. +- `api_key` (str, optional): Your Tavily API key. If not provided, it's read from the `TAVILY_API_KEY` environment variable. +- `proxies` (dict[str, str], optional): A dictionary of proxies to use for the API request. Defaults to `None`. + +## Custom Configuration + +You can configure the tool during initialization: + +```python +# Example: Initialize with a default max_results and specific API key +custom_tavily_tool = TavilySearchTool( + api_key="YOUR_SPECIFIC_TAVILY_KEY", + config={ + 'max_results': 10, + 'search_depth': 'advanced' + } +) + +# The agent will use these defaults unless overridden in the task input +agent_with_custom_tool = Agent( + # ... agent configuration ... + tools=[custom_tavily_tool] +) +``` + +Note: The `config` dictionary allows setting default values for the arguments defined in `TavilySearchToolSchema`. These defaults can be overridden when the tool is executed if the specific parameters are provided in the agent's action input. diff --git a/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py new file mode 100644 index 000000000..1179be90d --- /dev/null +++ b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py @@ -0,0 +1,245 @@ +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from typing import Optional, Type, Any, Union, Literal, Sequence +from dotenv import load_dotenv +import os +import json + +load_dotenv() +try: + from tavily import TavilyClient, AsyncTavilyClient + + TAVILY_AVAILABLE = True +except ImportError: + TAVILY_AVAILABLE = False + TavilyClient = Any + AsyncTavilyClient = Any + + +class TavilySearchToolSchema(BaseModel): + """Input schema for TavilySearchTool.""" + + query: str = Field(..., description="The search query string.") + + +class TavilySearchTool(BaseTool): + """ + Tool that uses the Tavily Search API to perform web searches. + + Attributes: + client: An instance of TavilyClient. + async_client: An instance of AsyncTavilyClient. + name: The name of the tool. + description: A description of the tool's purpose. + args_schema: The schema for the tool's arguments. + api_key: The Tavily API key. + proxies: Optional proxies for the API requests. + search_depth: The depth of the search. + topic: The topic to focus the search on. + time_range: The time range for the search. + days: The number of days to search back. + max_results: The maximum number of results to return. + include_domains: A list of domains to include in the search. + exclude_domains: A list of domains to exclude from the search. + include_answer: Whether to include a direct answer to the query. + include_raw_content: Whether to include the raw content of the search results. + include_images: Whether to include images in the search results. + timeout: The timeout for the search request in seconds. + max_content_length_per_result: Maximum length for the 'content' of each search result. + """ + + model_config = {"arbitrary_types_allowed": True} + client: Optional[TavilyClient] = None + async_client: Optional[AsyncTavilyClient] = None + name: str = "Tavily Search" + description: str = ( + "A tool that performs web searches using the Tavily Search API. " + "It returns a JSON object containing the search results." + ) + args_schema: Type[BaseModel] = TavilySearchToolSchema + api_key: Optional[str] = Field( + default_factory=lambda: os.getenv("TAVILY_API_KEY"), + description="The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.", + ) + proxies: Optional[dict[str, str]] = Field( + default=None, + description="Optional proxies to use for the Tavily API requests.", + ) + search_depth: Literal["basic", "advanced"] = Field( + default="basic", description="The depth of the search." + ) + topic: Literal["general", "news", "finance"] = Field( + default="general", description="The topic to focus the search on." + ) + time_range: Optional[Literal["day", "week", "month", "year"]] = Field( + default=None, description="The time range for the search." + ) + days: int = Field(default=7, description="The number of days to search back.") + max_results: int = Field( + default=5, description="The maximum number of results to return." + ) + include_domains: Optional[Sequence[str]] = Field( + default=None, description="A list of domains to include in the search." + ) + exclude_domains: Optional[Sequence[str]] = Field( + default=None, description="A list of domains to exclude from the search." + ) + include_answer: Union[bool, Literal["basic", "advanced"]] = Field( + default=False, description="Whether to include a direct answer to the query." + ) + include_raw_content: bool = Field( + default=False, + description="Whether to include the raw content of the search results.", + ) + include_images: bool = Field( + default=False, description="Whether to include images in the search results." + ) + timeout: int = Field( + default=60, description="The timeout for the search request in seconds." + ) + max_content_length_per_result: int = Field( + default=1000, + description="Maximum length for the 'content' of each search result to avoid context window issues.", + ) + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + if TAVILY_AVAILABLE: + self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies) + self.async_client = AsyncTavilyClient( + api_key=self.api_key, proxies=self.proxies + ) + else: + try: + import click + import subprocess + except ImportError: + raise ImportError( + "The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. " + "Please install 'tavily-python' manually (e.g., 'pip install tavily-python') and ensure 'click' and 'subprocess' are available." + ) + + if click.confirm( + "You are missing the 'tavily-python' package, which is required for TavilySearchTool. Would you like to install it?" + ): + try: + subprocess.run(["uv", "add", "tavily-python"], check=True) + raise ImportError( + "'tavily-python' has been installed. Please restart your Python application to use the TavilySearchTool." + ) + except subprocess.CalledProcessError as e: + raise ImportError( + f"Attempted to install 'tavily-python' but failed: {e}. " + f"Please install it manually to use the TavilySearchTool." + ) + else: + raise ImportError( + "The 'tavily-python' package is required to use the TavilySearchTool. " + "Please install it with: uv add tavily-python" + ) + + def _run( + self, + query: str, + ) -> str: + """ + Synchronously performs a search using the Tavily API. + Content of each result is truncated to `max_content_length_per_result`. + + Args: + query: The search query string. + + Returns: + A JSON string containing the search results with truncated content. + """ + if not self.client: + raise ValueError( + "Tavily client is not initialized. Ensure 'tavily-python' is installed and API key is set." + ) + + raw_results = self.client.search( + query=query, + search_depth=self.search_depth, + topic=self.topic, + time_range=self.time_range, + days=self.days, + max_results=self.max_results, + include_domains=self.include_domains, + exclude_domains=self.exclude_domains, + include_answer=self.include_answer, + include_raw_content=self.include_raw_content, + include_images=self.include_images, + timeout=self.timeout, + ) + + if ( + isinstance(raw_results, dict) + and "results" in raw_results + and isinstance(raw_results["results"], list) + ): + for item in raw_results["results"]: + if ( + isinstance(item, dict) + and "content" in item + and isinstance(item["content"], str) + ): + if len(item["content"]) > self.max_content_length_per_result: + item["content"] = ( + item["content"][: self.max_content_length_per_result] + + "..." + ) + + return json.dumps(raw_results, indent=2) + + async def _arun( + self, + query: str, + ) -> str: + """ + Asynchronously performs a search using the Tavily API. + Content of each result is truncated to `max_content_length_per_result`. + + Args: + query: The search query string. + + Returns: + A JSON string containing the search results with truncated content. + """ + if not self.async_client: + raise ValueError( + "Tavily async client is not initialized. Ensure 'tavily-python' is installed and API key is set." + ) + + raw_results = await self.async_client.search( + query=query, + search_depth=self.search_depth, + topic=self.topic, + time_range=self.time_range, + days=self.days, + max_results=self.max_results, + include_domains=self.include_domains, + exclude_domains=self.exclude_domains, + include_answer=self.include_answer, + include_raw_content=self.include_raw_content, + include_images=self.include_images, + timeout=self.timeout, + ) + + if ( + isinstance(raw_results, dict) + and "results" in raw_results + and isinstance(raw_results["results"], list) + ): + for item in raw_results["results"]: + if ( + isinstance(item, dict) + and "content" in item + and isinstance(item["content"], str) + ): + if len(item["content"]) > self.max_content_length_per_result: + item["content"] = ( + item["content"][: self.max_content_length_per_result] + + "..." + ) + + return json.dumps(raw_results, indent=2) From 0dbcbde11946f56c7141babe7b4ebe984435a0e3 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 14 May 2025 15:38:11 -0300 Subject: [PATCH 334/391] fix: remove logging.basicConfig definition from Tool (#305) --- src/crewai_tools/adapters/mcp_adapter.py | 7 +++---- src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | 5 +---- .../tools/snowflake_search_tool/snowflake_search_tool.py | 1 - 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/adapters/mcp_adapter.py b/src/crewai_tools/adapters/mcp_adapter.py index 2f5cc71f8..bcb38818d 100644 --- a/src/crewai_tools/adapters/mcp_adapter.py +++ b/src/crewai_tools/adapters/mcp_adapter.py @@ -1,6 +1,8 @@ from __future__ import annotations + import logging -from typing import Any, TYPE_CHECKING +from typing import TYPE_CHECKING, Any + from crewai.tools import BaseTool """ @@ -8,9 +10,6 @@ MCPServer for CrewAI. """ -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" -) logger = logging.getLogger(__name__) if TYPE_CHECKING: diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index 629016189..f734aa885 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -2,15 +2,12 @@ import datetime import json import logging import os -from typing import Any, Type, Optional +from typing import Any, Optional, Type import requests from crewai.tools import BaseTool from pydantic import BaseModel, Field -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" -) logger = logging.getLogger(__name__) diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py index 3db816899..bacec2917 100644 --- a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -21,7 +21,6 @@ except ImportError: SNOWFLAKE_AVAILABLE = False # Configure logging -logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Cache for query results From 5d5377cfb92460c5352b1ae380d1063e156ac825 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Tue, 27 May 2025 09:54:35 -0700 Subject: [PATCH 335/391] feat: add ZapierActionTool and ZapierActionsAdapter for integrating with Zapier actions (#311) * feat: add ZapierActionTool and ZapierActionsAdapter for integrating with Zapier actions - Introduced ZapierActionTool to execute Zapier actions with dynamic parameter handling. - Added ZapierActionsAdapter to fetch available Zapier actions and convert them into BaseTool instances. - Updated __init__.py files to include new tools and ensure proper imports. - Created README.md for ZapierActionTools with installation instructions and usage examples. * fix: restore ZapierActionTool import and enhance logging in Zapier adapter - Reintroduced the import of ZapierActionTool in __init__.py for proper accessibility. - Added logging for error handling in ZapierActionsAdapter to improve debugging. - Updated ZapierActionTools factory function to include logging for missing API key. --- src/crewai_tools/__init__.py | 4 +- src/crewai_tools/adapters/zapier_adapter.py | 122 ++++++++++++++++++ src/crewai_tools/tools/__init__.py | 1 + .../tools/zapier_action_tool/README.md | 91 +++++++++++++ .../zapier_action_tool/zapier_action_tool.py | 33 +++++ 5 files changed, 250 insertions(+), 1 deletion(-) create mode 100644 src/crewai_tools/adapters/zapier_adapter.py create mode 100644 src/crewai_tools/tools/zapier_action_tool/README.md create mode 100644 src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a85a164c0..f49e4149b 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -69,4 +69,6 @@ from .tools import ( XMLSearchTool, YoutubeChannelSearchTool, YoutubeVideoSearchTool, -) \ No newline at end of file + ZapierActionTools, +) +from .adapters.zapier_adapter import ZapierActionTool diff --git a/src/crewai_tools/adapters/zapier_adapter.py b/src/crewai_tools/adapters/zapier_adapter.py new file mode 100644 index 000000000..78c996964 --- /dev/null +++ b/src/crewai_tools/adapters/zapier_adapter.py @@ -0,0 +1,122 @@ +import os +import logging +from typing import List + +import requests +from crewai.tools import BaseTool +from pydantic import Field, create_model + +ACTIONS_URL = "https://actions.zapier.com/api/v2/ai-actions" + +logger = logging.getLogger(__name__) + + +class ZapierActionTool(BaseTool): + """ + A tool that wraps a Zapier action + """ + + name: str = Field(description="Tool name") + description: str = Field(description="Tool description") + action_id: str = Field(description="Zapier action ID") + api_key: str = Field(description="Zapier API key") + + def _run(self, **kwargs) -> str: + """Execute the Zapier action""" + headers = {"x-api-key": self.api_key, "Content-Type": "application/json"} + + instructions = kwargs.pop( + "instructions", "Execute this action with the provided parameters" + ) + + if not kwargs: + action_params = {"instructions": instructions, "params": {}} + else: + formatted_params = {} + for key, value in kwargs.items(): + formatted_params[key] = { + "value": value, + "mode": "guess", + } + action_params = {"instructions": instructions, "params": formatted_params} + + execute_url = f"{ACTIONS_URL}/{self.action_id}/execute/" + response = requests.request( + "POST", execute_url, headers=headers, json=action_params + ) + + response.raise_for_status() + + return response.json() + + +class ZapierActionsAdapter: + """ + Adapter for Zapier Actions + """ + + api_key: str + + def __init__(self, api_key: str = None): + self.api_key = api_key or os.getenv("ZAPIER_API_KEY") + if not self.api_key: + logger.error("Zapier Actions API key is required") + raise ValueError("Zapier Actions API key is required") + + def get_zapier_actions(self): + headers = { + "x-api-key": self.api_key, + } + response = requests.request("GET", ACTIONS_URL, headers=headers) + response.raise_for_status() + + response_json = response.json() + return response_json + + def tools(self) -> List[BaseTool]: + """Convert Zapier actions to BaseTool instances""" + actions_response = self.get_zapier_actions() + tools = [] + + for action in actions_response.get("results", []): + tool_name = ( + action["meta"]["action_label"] + .replace(" ", "_") + .replace(":", "") + .lower() + ) + + params = action.get("params", {}) + args_fields = {} + + args_fields["instructions"] = ( + str, + Field(description="Instructions for how to execute this action"), + ) + + for param_name, param_info in params.items(): + field_type = ( + str # Default to string, could be enhanced based on param_info + ) + field_description = ( + param_info.get("description", "") + if isinstance(param_info, dict) + else "" + ) + args_fields[param_name] = ( + field_type, + Field(description=field_description), + ) + + args_schema = create_model(f"{tool_name.title()}Schema", **args_fields) + + tool = ZapierActionTool( + name=tool_name, + description=action["description"], + action_id=action["id"], + api_key=self.api_key, + args_schema=args_schema, + ) + tools.append(tool) + + return tools diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 0c397902b..c10f152ef 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -77,3 +77,4 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .zapier_action_tool.zapier_action_tool import ZapierActionTools diff --git a/src/crewai_tools/tools/zapier_action_tool/README.md b/src/crewai_tools/tools/zapier_action_tool/README.md new file mode 100644 index 000000000..5a6dad43b --- /dev/null +++ b/src/crewai_tools/tools/zapier_action_tool/README.md @@ -0,0 +1,91 @@ +# Zapier Action Tools + +## Description + +This tool enables CrewAI agents to interact with Zapier actions, allowing them to automate workflows and integrate with hundreds of applications through Zapier's platform. The tool dynamically creates BaseTool instances for each available Zapier action, making it easy to incorporate automation into your AI workflows. + +## Installation + +Install the crewai_tools package by executing the following command in your terminal: + +```shell +uv pip install 'crewai[tools]' +``` + +## Example + +To utilize the ZapierActionTools for different use cases, follow these examples: + +```python +from crewai_tools import ZapierActionTools +from crewai import Agent + +# Get all available Zapier actions you are connected to. +tools = ZapierActionTools( + zapier_api_key="your-zapier-api-key" +) + +# Or specify only certain actions you want to use +tools = ZapierActionTools( + zapier_api_key="your-zapier-api-key", + action_list=["gmail_find_email", "slack_send_message", "google_sheets_create_row"] +) + +# Adding the tools to an agent +zapier_agent = Agent( + name="zapier_agent", + role="You are a helpful assistant that can automate tasks using Zapier integrations.", + llm="gpt-4o-mini", + tools=tools, + goal="Automate workflows and integrate with various applications", + backstory="You are a Zapier automation expert that helps users connect and automate their favorite apps.", + verbose=True, +) + +# Example usage +result = zapier_agent.kickoff( + "Find emails from john@example.com in Gmail" +) +``` + +## Arguments + +- `zapier_api_key` : Your Zapier API key for authentication. Can also be set via `ZAPIER_API_KEY` environment variable. (Required) +- `action_list` : A list of specific Zapier action names to include. If not provided, all available actions will be returned. (Optional) + +## Environment Variables + +You can set your Zapier API key as an environment variable instead of passing it directly: + +```bash +export ZAPIER_API_KEY="your-zapier-api-key" +``` + +Then use the tool without explicitly passing the API key: + +```python +from crewai_tools import ZapierActionTools + +# API key will be automatically loaded from environment +tools = ZapierActionTools( + action_list=["gmail_find_email", "slack_send_message"] +) +``` + +## Getting Your Zapier API Key + +1. Log in to your Zapier account +2. Go to https://zapier.com/app/developer/ +3. Create a new app or use an existing one +4. Navigate to the "Authentication" section +5. Copy your API key + +## Available Actions + +The tool will dynamically discover all available Zapier actions associated with your API key. Common actions include: + +- Gmail operations (find emails, send emails) +- Slack messaging +- Google Sheets operations +- Calendar events +- And hundreds more depending on your Zapier integrations diff --git a/src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py b/src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py new file mode 100644 index 000000000..190ef3fc3 --- /dev/null +++ b/src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py @@ -0,0 +1,33 @@ +import os +import logging +from typing import List, Optional +from crewai.tools import BaseTool +from crewai_tools.adapters.zapier_adapter import ZapierActionsAdapter + +logger = logging.getLogger(__name__) + + +def ZapierActionTools( + zapier_api_key: Optional[str] = None, action_list: Optional[List[str]] = None +) -> List[BaseTool]: + """Factory function that returns Zapier action tools. + + Args: + zapier_api_key: The API key for Zapier. + action_list: Optional list of specific tool names to include. + + Returns: + A list of Zapier action tools. + """ + if zapier_api_key is None: + zapier_api_key = os.getenv("ZAPIER_API_KEY") + if zapier_api_key is None: + logger.error("ZAPIER_API_KEY is not set") + raise ValueError("ZAPIER_API_KEY is not set") + adapter = ZapierActionsAdapter(zapier_api_key) + all_tools = adapter.tools() + + if action_list is None: + return all_tools + + return [tool for tool in all_tools if tool.name in action_list] From 72b3a8c70a930c8f5fcd70a41388841af984b169 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 30 May 2025 09:13:11 -0300 Subject: [PATCH 336/391] feat: allow to provide the driver and options to be used by Selenium (#316) --- .../selenium_scraping_tool.py | 13 +++++++--- tests/tools/selenium_scraping_tool_test.py | 24 +++++++++++++++---- 2 files changed, 29 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 57211e64e..3976facef 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -91,9 +91,16 @@ class SeleniumScrapingTool(BaseTool): "`selenium` and `webdriver-manager` package not found, please run `uv add selenium webdriver-manager`" ) - options: Options = Options() - options.add_argument("--headless") - self.driver = webdriver.Chrome(options=options) + if 'driver' not in kwargs: + if 'options' not in kwargs: + options: Options = Options() + options.add_argument("--headless") + else: + options = kwargs['options'] + self.driver = webdriver.Chrome(options=options) + else: + self.driver = kwargs['driver'] + self._by = By if cookie is not None: self.cookie = cookie diff --git a/tests/tools/selenium_scraping_tool_test.py b/tests/tools/selenium_scraping_tool_test.py index b360df3a1..0e285d136 100644 --- a/tests/tools/selenium_scraping_tool_test.py +++ b/tests/tools/selenium_scraping_tool_test.py @@ -2,9 +2,8 @@ import os import tempfile from unittest.mock import MagicMock, patch -import pytest from bs4 import BeautifulSoup - +from selenium.webdriver.chrome.options import Options from crewai_tools.tools.selenium_scraping_tool.selenium_scraping_tool import ( SeleniumScrapingTool, ) @@ -24,9 +23,7 @@ def mock_driver_with_html(html_content): def initialize_tool_with(mock_driver): - tool = SeleniumScrapingTool() - tool.driver = mock_driver - + tool = SeleniumScrapingTool(driver=mock_driver) return tool @@ -48,6 +45,17 @@ def test_tool_initialization(mocked_chrome): except: pass +@patch("selenium.webdriver.Chrome") +def test_tool_initialization_with_options(mocked_chrome): + mocked_chrome.return_value = MagicMock() + + options = Options() + options.add_argument("--disable-gpu") + + SeleniumScrapingTool(options=options) + + mocked_chrome.assert_called_once_with(options=options) + @patch("selenium.webdriver.Chrome") def test_scrape_without_css_selector(_mocked_chrome_driver): @@ -113,3 +121,9 @@ def test_scrape_with_driver_error(_mocked_chrome_driver): result = tool._run(website_url="https://example.com") assert result == "Error scraping website: WebDriver error occurred" mock_driver.close.assert_called_once() + +@patch("selenium.webdriver.Chrome") +def test_initialization_with_driver(_mocked_chrome_driver): + mock_driver = MagicMock() + tool = initialize_tool_with(mock_driver) + assert tool.driver == mock_driver From 748f4382328d24d23695a05fc90f6fef35825845 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 3 Jun 2025 11:11:17 -0300 Subject: [PATCH 337/391] Support to generate a tool spec file for each published released (#313) * feat: generate tool specs file based on their schema definition * generate tool spec after publishing a new release * feat: support add available env-vars to tool.specs.json * refactor: use better identifier names on tool specs * feat: change tool specs generation to run daily * feat: add auth token to notify api about tool changes * refactor: use humanized_name instead of verbose_name * refactor: generate tool spec after pushing to main This commit also fix the remote upstream & updated the notify api --- .../tools/serper_dev_tool/serper_dev_tool.py | 7 +- tests/test_generate_tool_specs.py | 190 ++++++++++++++++++ 2 files changed, 195 insertions(+), 2 deletions(-) create mode 100644 tests/test_generate_tool_specs.py diff --git a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py index f734aa885..23f15dd92 100644 --- a/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py +++ b/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py @@ -2,10 +2,10 @@ import datetime import json import logging import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field logger = logging.getLogger(__name__) @@ -45,6 +45,9 @@ class SerperDevTool(BaseTool): country: Optional[str] = "" location: Optional[str] = "" locale: Optional[str] = "" + env_vars: List[EnvVar] = [ + EnvVar(name="SERPER_API_KEY", description="API key for Serper", required=True), + ] def _get_search_url(self, search_type: str) -> str: """Get the appropriate endpoint URL based on search type.""" diff --git a/tests/test_generate_tool_specs.py b/tests/test_generate_tool_specs.py new file mode 100644 index 000000000..cb3b18cd5 --- /dev/null +++ b/tests/test_generate_tool_specs.py @@ -0,0 +1,190 @@ +import json +from typing import List, Optional + +import pytest +from pydantic import BaseModel, Field +from unittest import mock + +from generate_tool_specs import ToolSpecExtractor +from crewai.tools.base_tool import EnvVar + +class MockToolSchema(BaseModel): + query: str = Field(..., description="The query parameter") + count: int = Field(5, description="Number of results to return") + filters: Optional[List[str]] = Field(None, description="Optional filters to apply") + + +class MockTool: + name = "Mock Search Tool" + description = "A tool that mocks search functionality" + args_schema = MockToolSchema + +@pytest.fixture +def extractor(): + ext = ToolSpecExtractor() + MockTool.__pydantic_core_schema__ = create_mock_schema(MockTool) + MockTool.args_schema.__pydantic_core_schema__ = create_mock_schema_args(MockTool.args_schema) + return ext + + +def create_mock_schema(cls): + return { + "type": "model", + "cls": cls, + "schema": { + "type": "model-fields", + "fields": { + "name": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": cls.name}, "metadata": {}}, + "description": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": cls.description}, "metadata": {}}, + "args_schema": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "is-subclass", "cls": BaseModel}, "default": cls.args_schema}, "metadata": {}}, + "env_vars": { + "type": "model-field", "schema": {"type": "default", "schema": {"type": "list", "items_schema": {"type": "model", "cls": "INSPECT CLASS", "schema": {"type": "model-fields", "fields": {"name": {"type": "model-field", "schema": {"type": "str"}, "metadata": {}}, "description": {"type": "model-field", "schema": {"type": "str"}, "metadata": {}}, "required": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "bool"}, "default": True}, "metadata": {}}, "default": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "nullable", "schema": {"type": "str"}}, "default": None}, "metadata": {}},}, "model_name": "EnvVar", "computed_fields": []}, "custom_init": False, "root_model": False, "config": {"title": "EnvVar"}, "ref": "crewai.tools.base_tool.EnvVar:4593650640", "metadata": {"pydantic_js_functions": ["INSPECT __get_pydantic_json_schema__"]}}}, "default": [EnvVar(name='SERPER_API_KEY', description='API key for Serper', required=True, default=None), EnvVar(name='API_RATE_LIMIT', description='API rate limit', required=False, default="100")]}, "metadata": {} + } + }, + "model_name": cls.__name__ + } + } + + +def create_mock_schema_args(cls): + return { + "type": "model", + "cls": cls, + "schema": { + "type": "model-fields", + "fields": { + "query": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": "The query parameter"}}, + "count": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "int"}, "default": 5}, "metadata": {"pydantic_js_updates": {"description": "Number of results to return"}}}, + "filters": {"type": "model-field", "schema": {"type": "nullable", "schema": {"type": "list", "items_schema": {"type": "str"}}}} + }, + "model_name": cls.__name__ + } + } + + +def test_unwrap_schema(extractor): + nested_schema = { + "type": "function-after", + "schema": {"type": "default", "schema": {"type": "str", "value": "test"}} + } + result = extractor._unwrap_schema(nested_schema) + assert result["type"] == "str" + assert result["value"] == "test" + + +@pytest.mark.parametrize( + "field, fallback, expected", + [ + ({"schema": {"default": "test_value"}}, None, "test_value"), + ({}, "fallback_value", "fallback_value"), + ({"schema": {"default": 123}}, "fallback_value", "fallback_value") + ] +) +def test_extract_field_default(extractor, field, fallback, expected): + result = extractor._extract_field_default(field, fallback=fallback) + assert result == expected + + +@pytest.mark.parametrize( + "schema, expected", + [ + ({"type": "str"}, "str"), + ({"type": "list", "items_schema": {"type": "str"}}, "list[str]"), + ({"type": "dict", "keys_schema": {"type": "str"}, "values_schema": {"type": "int"}}, "dict[str, int]"), + ({"type": "union", "choices": [{"type": "str"}, {"type": "int"}]}, "union[str, int]"), + ({"type": "custom_type"}, "custom_type"), + ({}, "unknown"), + ] +) +def test_schema_type_to_str(extractor, schema, expected): + assert extractor._schema_type_to_str(schema) == expected + + +@pytest.mark.parametrize( + "info, expected_type", + [ + ({"schema": {"type": "str"}}, "str"), + ({"schema": {"type": "nullable", "schema": {"type": "int"}}}, "int"), + ({"schema": {"type": "default", "schema": {"type": "list", "items_schema": {"type": "str"}}}}, "list[str]"), + ] +) +def test_extract_param_type(extractor, info, expected_type): + assert extractor._extract_param_type(info) == expected_type + + +def test_extract_tool_info(extractor): + with mock.patch("generate_tool_specs.dir", return_value=["MockTool"]), \ + mock.patch("generate_tool_specs.getattr", return_value=MockTool): + extractor.extract_all_tools() + + assert len(extractor.tools_spec) == 1 + tool_info = extractor.tools_spec[0] + + assert tool_info["name"] == "MockTool" + assert tool_info["humanized_name"] == "Mock Search Tool" + assert tool_info["description"] == "A tool that mocks search functionality" + + assert len(tool_info["env_vars"]) == 2 + api_key_var, rate_limit_var = tool_info["env_vars"] + + assert api_key_var["name"] == "SERPER_API_KEY" + assert api_key_var["description"] == "API key for Serper" + assert api_key_var["required"] == True + assert api_key_var["default"] == None + + assert rate_limit_var["name"] == "API_RATE_LIMIT" + assert rate_limit_var["description"] == "API rate limit" + assert rate_limit_var["required"] == False + assert rate_limit_var["default"] == "100" + + assert len(tool_info["run_params"]) == 3 + + params = {p["name"]: p for p in tool_info["run_params"]} + assert params["query"]["description"] == "The query parameter" + assert params["query"]["type"] == "str" + + assert params["count"]["description"] == "Number of results to return" + assert params["count"]["type"] == "int" + + assert params["filters"]["description"] == "" + assert params["filters"]["type"] == "list[str]" + + +def test_save_to_json(extractor, tmp_path): + extractor.tools_spec = [{ + "name": "TestTool", + "humanized_name": "Test Tool", + "description": "A test tool", + "run_params": [ + {"name": "param1", "description": "Test parameter", "type": "str"} + ] + }] + + file_path = tmp_path / "output.json" + extractor.save_to_json(str(file_path)) + + assert file_path.exists() + + with open(file_path, "r") as f: + data = json.load(f) + + assert "tools" in data + assert len(data["tools"]) == 1 + assert data["tools"][0]["humanized_name"] == "Test Tool" + assert data["tools"][0]["run_params"][0]["name"] == "param1" + + +@pytest.mark.integration +def test_full_extraction_process(): + extractor = ToolSpecExtractor() + specs = extractor.extract_all_tools() + + assert len(specs) > 0 + + for tool in specs: + assert "name" in tool + assert "humanized_name" in tool and tool["humanized_name"] + assert "description" in tool + assert isinstance(tool["run_params"], list) + for param in tool["run_params"]: + assert "name" in param and param["name"] \ No newline at end of file From dc2d4af8eabb8b258c7c00f5dacb23946a28c581 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 11 Jun 2025 10:02:56 -0300 Subject: [PATCH 338/391] fix: ensure the entire file will be read when the start_line is None (#325) --- .../tools/file_read_tool/file_read_tool.py | 10 ++++++---- tests/file_read_tool_test.py | 5 +++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/crewai_tools/tools/file_read_tool/file_read_tool.py b/src/crewai_tools/tools/file_read_tool/file_read_tool.py index 3447cb0d6..4e04e3a7d 100644 --- a/src/crewai_tools/tools/file_read_tool/file_read_tool.py +++ b/src/crewai_tools/tools/file_read_tool/file_read_tool.py @@ -59,11 +59,13 @@ class FileReadTool(BaseTool): def _run( self, - **kwargs: Any, + file_path: Optional[str] = None, + start_line: Optional[int] = 1, + line_count: Optional[int] = None, ) -> str: - file_path = kwargs.get("file_path", self.file_path) - start_line = kwargs.get("start_line", 1) - line_count = kwargs.get("line_count", None) + file_path = file_path or self.file_path + start_line = start_line or 1 + line_count = line_count or None if file_path is None: return ( diff --git a/tests/file_read_tool_test.py b/tests/file_read_tool_test.py index a0f2c695e..174b32229 100644 --- a/tests/file_read_tool_test.py +++ b/tests/file_read_tool_test.py @@ -139,6 +139,11 @@ def test_file_read_tool_zero_or_negative_start_line(): with patch("builtins.open", mock_open(read_data=file_content)): tool = FileReadTool() + # Test with start_line = None + result = tool._run(file_path=test_file, start_line=None) + expected = "".join(lines) # Should read the entire file + assert result == expected + # Test with start_line = 0 result = tool._run(file_path=test_file, start_line=0) expected = "".join(lines) # Should read the entire file From 5a99f0776514a74078adf9a1f8a27c199c998fd3 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Sat, 14 Jun 2025 12:21:18 -0700 Subject: [PATCH 339/391] =?UTF-8?q?refactor:=20remove=20token=20validation?= =?UTF-8?q?=20from=20EnterpriseActionKitToolAdapter=E2=80=A6=20(#331)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: remove token validation from EnterpriseActionKitToolAdapter and CrewaiEnterpriseTools This commit simplifies the initialization of the EnterpriseActionKitToolAdapter and CrewaiEnterpriseTools by removing the explicit validation for the enterprise action token. The token can now be set to None without raising an error, allowing for more flexible usage. * added loggers for monitoring * fixed typo --- src/crewai_tools/adapters/enterprise_adapter.py | 2 -- .../crewai_enterprise_tools.py | 12 ++++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index e6e64647a..34238602e 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -125,8 +125,6 @@ class EnterpriseActionKitToolAdapter: enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, ): """Initialize the adapter with an enterprise action token.""" - if not enterprise_action_token: - raise ValueError("enterprise_action_token is required") self.enterprise_action_token = enterprise_action_token self._actions_schema = {} diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index a1dc2970b..7fc97d179 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -4,9 +4,12 @@ Crewai Enterprise Tools import os import typing as t +import logging from crewai.tools import BaseTool from crewai_tools.adapters.enterprise_adapter import EnterpriseActionKitToolAdapter +logger = logging.getLogger(__name__) + def CrewaiEnterpriseTools( enterprise_token: t.Optional[str] = None, @@ -18,7 +21,7 @@ def CrewaiEnterpriseTools( Args: enterprise_token: The token for accessing enterprise actions. - If not provided, will try to use CREWAI_ENTEPRISE_TOOLS_TOKEN env var. + If not provided, will try to use CREWAI_ENTERPRISE_TOOLS_TOKEN env var. actions_list: Optional list of specific tool names to include. If provided, only tools with these names will be returned. @@ -26,11 +29,8 @@ def CrewaiEnterpriseTools( A list of BaseTool instances for enterprise actions """ if enterprise_token is None: - enterprise_token = os.environ.get("CREWAI_ENTEPRISE_TOOLS_TOKEN") - if enterprise_token is None: - raise ValueError( - "No enterprise token provided. Please provide a token or set the CREWAI_ENTEPRISE_TOOLS_TOKEN environment variable." - ) + enterprise_token = os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") + logger.warning("No enterprise token provided") adapter_kwargs = {"enterprise_action_token": enterprise_token} From fac32d9503a734a23b03939262c6c3a9a3347925 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Mon, 16 Jun 2025 11:09:19 -0300 Subject: [PATCH 340/391] Support to collect extra package dependencies of Tools (#330) * feat: add explictly package_dependencies in the Tools * feat: collect package_dependencies from Tool to add in tool.specs.json * feat: add default value in run_params Tool' specs * fix: support get boolean values This commit also refactor test to make easier define newest attributes into a Tool --- .../aws/bedrock/agents/invoke_agent_tool.py | 29 +++--- .../bedrock/knowledge_base/retriever_tool.py | 51 ++++++----- src/crewai_tools/aws/s3/reader_tool.py | 3 +- src/crewai_tools/aws/s3/writer_tool.py | 3 +- .../tools/ai_mind_tool/ai_mind_tool.py | 13 +-- .../apify_actors_tool/apify_actors_tool.py | 1 + .../browserbase_load_tool.py | 3 +- .../tools/dalle_tool/dalle_tool.py | 4 +- .../databricks_query_tool.py | 1 + .../tools/exa_tools/exa_search_tool.py | 3 +- .../firecrawl_crawl_website_tool.py | 3 +- .../firecrawl_scrape_website_tool.py | 3 +- .../firecrawl_search_tool.py | 3 +- .../hyperbrowser_load_tool.py | 7 +- .../tools/linkup/linkup_search_tool.py | 3 +- .../tools/multion_tool/multion_tool.py | 3 +- .../patronus_local_evaluator_tool.py | 3 +- .../qdrant_search_tool.py | 3 +- .../scrapegraph_scrape_tool.py | 3 +- .../scrapfly_scrape_website_tool.py | 3 +- .../selenium_scraping_tool.py | 3 +- .../tools/serpapi_tool/serpapi_base_tool.py | 4 +- .../snowflake_search_tool.py | 1 + .../tools/spider_tool/spider_tool.py | 3 +- .../tools/stagehand_tool/stagehand_tool.py | 1 + .../tavily_extractor_tool.py | 1 + .../tavily_search_tool/tavily_search_tool.py | 3 +- .../tools/weaviate_tool/vector_search.py | 4 +- tests/test_generate_tool_specs.py | 91 +++++++------------ 29 files changed, 129 insertions(+), 127 deletions(-) diff --git a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py index c064b9b2d..65280fe7b 100644 --- a/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py +++ b/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py @@ -1,4 +1,4 @@ -from typing import Type, Optional, Dict, Any +from typing import Type, Optional, Dict, Any, List import os import json import uuid @@ -29,6 +29,7 @@ class BedrockInvokeAgentTool(BaseTool): session_id: str = None enable_trace: bool = False end_session: bool = False + package_dependencies: List[str] = ["boto3"] def __init__( self, @@ -51,7 +52,7 @@ class BedrockInvokeAgentTool(BaseTool): description (Optional[str]): Custom description for the tool """ super().__init__(**kwargs) - + # Get values from environment variables if not provided self.agent_id = agent_id or os.getenv('BEDROCK_AGENT_ID') self.agent_alias_id = agent_alias_id or os.getenv('BEDROCK_AGENT_ALIAS_ID') @@ -62,7 +63,7 @@ class BedrockInvokeAgentTool(BaseTool): # Update the description if provided if description: self.description = description - + # Validate parameters self._validate_parameters() @@ -74,17 +75,17 @@ class BedrockInvokeAgentTool(BaseTool): raise BedrockValidationError("agent_id cannot be empty") if not isinstance(self.agent_id, str): raise BedrockValidationError("agent_id must be a string") - + # Validate agent_alias_id if not self.agent_alias_id: raise BedrockValidationError("agent_alias_id cannot be empty") if not isinstance(self.agent_alias_id, str): raise BedrockValidationError("agent_alias_id must be a string") - + # Validate session_id if provided if self.session_id and not isinstance(self.session_id, str): raise BedrockValidationError("session_id must be a string") - + except BedrockValidationError as e: raise BedrockValidationError(f"Parameter validation failed: {str(e)}") @@ -123,7 +124,7 @@ Below is the users query or task. Complete it and answer it consicely and to the # Process the response completion = "" - + # Check if response contains a completion field if 'completion' in response: # Process streaming response format @@ -134,7 +135,7 @@ Below is the users query or task. Complete it and answer it consicely and to the completion += chunk_bytes.decode('utf-8') else: completion += str(chunk_bytes) - + # If no completion found in streaming format, try direct format if not completion and 'chunk' in response and 'bytes' in response['chunk']: chunk_bytes = response['chunk']['bytes'] @@ -142,31 +143,31 @@ Below is the users query or task. Complete it and answer it consicely and to the completion = chunk_bytes.decode('utf-8') else: completion = str(chunk_bytes) - + # If still no completion, return debug info if not completion: debug_info = { "error": "Could not extract completion from response", "response_keys": list(response.keys()) } - + # Add more debug info if 'chunk' in response: debug_info["chunk_keys"] = list(response['chunk'].keys()) - + raise BedrockAgentError(f"Failed to extract completion: {json.dumps(debug_info, indent=2)}") - + return completion except ClientError as e: error_code = "Unknown" error_message = str(e) - + # Try to extract error code if available if hasattr(e, 'response') and 'Error' in e.response: error_code = e.response['Error'].get('Code', 'Unknown') error_message = e.response['Error'].get('Message', str(e)) - + raise BedrockAgentError(f"Error ({error_code}): {error_message}") except BedrockAgentError: # Re-raise BedrockAgentError exceptions diff --git a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py index 15c74077c..06fd3ce38 100644 --- a/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py +++ b/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py @@ -26,6 +26,7 @@ class BedrockKBRetrieverTool(BaseTool): retrieval_configuration: Optional[Dict[str, Any]] = None guardrail_configuration: Optional[Dict[str, Any]] = None next_token: Optional[str] = None + package_dependencies: List[str] = ["boto3"] def __init__( self, @@ -46,13 +47,13 @@ class BedrockKBRetrieverTool(BaseTool): next_token (Optional[str], optional): Token for retrieving the next batch of results. Defaults to None. """ super().__init__(**kwargs) - + # Get knowledge_base_id from environment variable if not provided self.knowledge_base_id = knowledge_base_id or os.getenv('BEDROCK_KB_ID') self.number_of_results = number_of_results self.guardrail_configuration = guardrail_configuration self.next_token = next_token - + # Initialize retrieval_configuration with provided parameters or use the one provided if retrieval_configuration is None: self.retrieval_configuration = self._build_retrieval_configuration() @@ -67,16 +68,16 @@ class BedrockKBRetrieverTool(BaseTool): def _build_retrieval_configuration(self) -> Dict[str, Any]: """Build the retrieval configuration based on provided parameters. - + Returns: Dict[str, Any]: The constructed retrieval configuration """ vector_search_config = {} - + # Add number of results if provided if self.number_of_results is not None: vector_search_config["numberOfResults"] = self.number_of_results - + return {"vectorSearchConfiguration": vector_search_config} def _validate_parameters(self): @@ -91,7 +92,7 @@ class BedrockKBRetrieverTool(BaseTool): raise BedrockValidationError("knowledge_base_id must be 10 characters or less") if not all(c.isalnum() for c in self.knowledge_base_id): raise BedrockValidationError("knowledge_base_id must contain only alphanumeric characters") - + # Validate next_token if provided if self.next_token: if not isinstance(self.next_token, str): @@ -100,23 +101,23 @@ class BedrockKBRetrieverTool(BaseTool): raise BedrockValidationError("next_token must be between 1 and 2048 characters") if ' ' in self.next_token: raise BedrockValidationError("next_token cannot contain spaces") - + # Validate number_of_results if provided if self.number_of_results is not None: if not isinstance(self.number_of_results, int): raise BedrockValidationError("number_of_results must be an integer") if self.number_of_results < 1: raise BedrockValidationError("number_of_results must be greater than 0") - + except BedrockValidationError as e: raise BedrockValidationError(f"Parameter validation failed: {str(e)}") def _process_retrieval_result(self, result: Dict[str, Any]) -> Dict[str, Any]: """Process a single retrieval result from Bedrock Knowledge Base. - + Args: result (Dict[str, Any]): Raw result from Bedrock Knowledge Base - + Returns: Dict[str, Any]: Processed result with standardized format """ @@ -124,12 +125,12 @@ class BedrockKBRetrieverTool(BaseTool): content_obj = result.get('content', {}) content = content_obj.get('text', '') content_type = content_obj.get('type', 'text') - + # Extract location information location = result.get('location', {}) location_type = location.get('type', 'unknown') source_uri = None - + # Map for location types and their URI fields location_mapping = { 's3Location': {'field': 'uri', 'type': 'S3'}, @@ -141,7 +142,7 @@ class BedrockKBRetrieverTool(BaseTool): 'kendraDocumentLocation': {'field': 'uri', 'type': 'KendraDocument'}, 'sqlLocation': {'field': 'query', 'type': 'SQL'} } - + # Extract the URI based on location type for loc_key, config in location_mapping.items(): if loc_key in location: @@ -149,7 +150,7 @@ class BedrockKBRetrieverTool(BaseTool): if not location_type or location_type == 'unknown': location_type = config['type'] break - + # Create result object result_object = { 'content': content, @@ -157,22 +158,22 @@ class BedrockKBRetrieverTool(BaseTool): 'source_type': location_type, 'source_uri': source_uri } - + # Add optional fields if available if 'score' in result: result_object['score'] = result['score'] - + if 'metadata' in result: result_object['metadata'] = result['metadata'] - + # Handle byte content if present if 'byteContent' in content_obj: result_object['byte_content'] = content_obj['byteContent'] - + # Handle row content if present if 'row' in content_obj: result_object['row_content'] = content_obj['row'] - + return result_object def _run(self, query: str) -> str: @@ -201,10 +202,10 @@ class BedrockKBRetrieverTool(BaseTool): # Add optional parameters if provided if self.retrieval_configuration: retrieve_params['retrievalConfiguration'] = self.retrieval_configuration - + if self.guardrail_configuration: retrieve_params['guardrailConfiguration'] = self.guardrail_configuration - + if self.next_token: retrieve_params['nextToken'] = self.next_token @@ -223,10 +224,10 @@ class BedrockKBRetrieverTool(BaseTool): response_object["results"] = results else: response_object["message"] = "No results found for the given query." - + if "nextToken" in response: response_object["nextToken"] = response["nextToken"] - + if "guardrailAction" in response: response_object["guardrailAction"] = response["guardrailAction"] @@ -236,12 +237,12 @@ class BedrockKBRetrieverTool(BaseTool): except ClientError as e: error_code = "Unknown" error_message = str(e) - + # Try to extract error code if available if hasattr(e, 'response') and 'Error' in e.response: error_code = e.response['Error'].get('Code', 'Unknown') error_message = e.response['Error'].get('Message', str(e)) - + raise BedrockKnowledgeBaseError(f"Error ({error_code}): {error_message}") except Exception as e: raise BedrockKnowledgeBaseError(f"Unexpected error: {str(e)}") \ No newline at end of file diff --git a/src/crewai_tools/aws/s3/reader_tool.py b/src/crewai_tools/aws/s3/reader_tool.py index 4b3b9a394..c3f1fa4eb 100644 --- a/src/crewai_tools/aws/s3/reader_tool.py +++ b/src/crewai_tools/aws/s3/reader_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Type +from typing import Any, Type, List import os from crewai.tools import BaseTool @@ -15,6 +15,7 @@ class S3ReaderTool(BaseTool): name: str = "S3 Reader Tool" description: str = "Reads a file from Amazon S3 given an S3 file path" args_schema: Type[BaseModel] = S3ReaderToolInput + package_dependencies: List[str] = ["boto3"] def _run(self, file_path: str) -> str: try: diff --git a/src/crewai_tools/aws/s3/writer_tool.py b/src/crewai_tools/aws/s3/writer_tool.py index f0aaddb28..2e1528d13 100644 --- a/src/crewai_tools/aws/s3/writer_tool.py +++ b/src/crewai_tools/aws/s3/writer_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Type +from typing import Type, List import os from crewai.tools import BaseTool @@ -14,6 +14,7 @@ class S3WriterTool(BaseTool): name: str = "S3 Writer Tool" description: str = "Writes content to a file in Amazon S3 given an S3 file path" args_schema: Type[BaseModel] = S3WriterToolInput + package_dependencies: List[str] = ["boto3"] def _run(self, file_path: str, content: str) -> str: try: diff --git a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py index b38426e09..1a96f62ff 100644 --- a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py +++ b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py @@ -1,10 +1,10 @@ import os import secrets -from typing import Any, Dict, List, Optional, Text, Type +from typing import Any, Dict, List, Optional, Type from crewai.tools import BaseTool from openai import OpenAI -from pydantic import BaseModel +from pydantic import BaseModel, Field class AIMindToolConstants: @@ -16,7 +16,7 @@ class AIMindToolConstants: class AIMindToolInputSchema(BaseModel): """Input for AIMind Tool.""" - query: str = "Question in natural language to ask the AI-Mind" + query: str = Field(description="Question in natural language to ask the AI-Mind") class AIMindTool(BaseTool): @@ -31,9 +31,10 @@ class AIMindTool(BaseTool): args_schema: Type[BaseModel] = AIMindToolInputSchema api_key: Optional[str] = None datasources: Optional[List[Dict[str, Any]]] = None - mind_name: Optional[Text] = None + mind_name: Optional[str] = None + package_dependencies: List[str] = ["minds-sdk"] - def __init__(self, api_key: Optional[Text] = None, **kwargs): + def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) self.api_key = api_key or os.getenv("MINDS_API_KEY") if not self.api_key: @@ -72,7 +73,7 @@ class AIMindTool(BaseTool): def _run( self, - query: Text + query: str ): # Run the query on the AI-Mind. # The Minds API is OpenAI compatible and therefore, the OpenAI client can be used. diff --git a/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py index 37ae7312b..44c4839e8 100644 --- a/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py +++ b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py @@ -38,6 +38,7 @@ class ApifyActorsTool(BaseTool): print(f"Content: {result.get('markdown', 'N/A')[:100]}...") """ actor_tool: '_ApifyActorsTool' = Field(description="Apify Actor Tool") + package_dependencies: List[str] = ["langchain-apify"] def __init__( self, diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index 3a2462f5e..f946baf73 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,5 +1,5 @@ import os -from typing import Any, Optional, Type +from typing import Any, Optional, Type, List from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -19,6 +19,7 @@ class BrowserbaseLoadTool(BaseTool): session_id: Optional[str] = None proxy: Optional[bool] = None browserbase: Optional[Any] = None + package_dependencies: List[str] = ["browserbase"] def __init__( self, diff --git a/src/crewai_tools/tools/dalle_tool/dalle_tool.py b/src/crewai_tools/tools/dalle_tool/dalle_tool.py index 7040de11a..8957d9636 100644 --- a/src/crewai_tools/tools/dalle_tool/dalle_tool.py +++ b/src/crewai_tools/tools/dalle_tool/dalle_tool.py @@ -3,13 +3,13 @@ from typing import Type from crewai.tools import BaseTool from openai import OpenAI -from pydantic import BaseModel +from pydantic import BaseModel, Field class ImagePromptSchema(BaseModel): """Input for Dall-E Tool.""" - image_description: str = "Description of the image to be generated by Dall-E." + image_description: str = Field(description="Description of the image to be generated by Dall-E.") class DallETool(BaseTool): diff --git a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py index 428cea5d3..fe73179cb 100644 --- a/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py +++ b/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py @@ -69,6 +69,7 @@ class DatabricksQueryTool(BaseTool): default_warehouse_id: Optional[str] = None _workspace_client: Optional["WorkspaceClient"] = None + package_dependencies: List[str] = ["databricks-sdk"] def __init__( self, diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index f094b0495..d626c03ed 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Any, Optional, Type, List from pydantic import BaseModel, Field from crewai.tools import BaseTool @@ -35,6 +35,7 @@ class EXASearchTool(BaseTool): content: Optional[bool] = False summary: Optional[bool] = False type: Optional[str] = "auto" + package_dependencies: List[str] = ["exa_py"] def __init__( self, diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index ee7e5e3d9..6642fbd54 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Any, Optional, Type, List from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr @@ -55,6 +55,7 @@ class FirecrawlCrawlWebsiteTool(BaseTool): } ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) + package_dependencies: List[str] = ["firecrawl-py"] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index fcb5c6c8d..acb1c0af5 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type, Dict +from typing import Any, Optional, Type, Dict, List from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr @@ -48,6 +48,7 @@ class FirecrawlScrapeWebsiteTool(BaseTool): ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) + package_dependencies: List[str] = ["firecrawl-py"] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 8b563778c..0fb091b68 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Any, Dict, Optional, Type +from typing import TYPE_CHECKING, Any, Dict, Optional, Type, List from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr @@ -57,6 +57,7 @@ class FirecrawlSearchTool(BaseTool): } ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) + package_dependencies: List[str] = ["firecrawl-py"] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py index b802d1859..5359427b0 100644 --- a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py +++ b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py @@ -1,5 +1,5 @@ import os -from typing import Any, Optional, Type, Dict, Literal, Union +from typing import Any, Optional, Type, Dict, Literal, Union, List from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -25,6 +25,7 @@ class HyperbrowserLoadTool(BaseTool): args_schema: Type[BaseModel] = HyperbrowserLoadToolSchema api_key: Optional[str] = None hyperbrowser: Optional[Any] = None + package_dependencies: List[str] = ["hyperbrowser"] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -65,7 +66,7 @@ class HyperbrowserLoadTool(BaseTool): if "scrape_options" in params: params["scrape_options"] = ScrapeOptions(**params["scrape_options"]) return params - + def _extract_content(self, data: Union[Any, None]): """Extract content from response data.""" content = "" @@ -81,7 +82,7 @@ class HyperbrowserLoadTool(BaseTool): raise ImportError( "`hyperbrowser` package not found, please run `pip install hyperbrowser`" ) - + params = self._prepare_params(params) if operation == 'scrape': diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index 4eb2d82b3..c35c7fac3 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, List from crewai.tools import BaseTool @@ -23,6 +23,7 @@ class LinkupSearchTool(BaseTool): "Performs an API call to Linkup to retrieve contextual information." ) _client: LinkupClient = PrivateAttr() # type: ignore + package_dependencies: List[str] = ["linkup-sdk"] def __init__(self, api_key: str): """ diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index d49321dc0..3c8d17819 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -1,6 +1,6 @@ """Multion tool spec.""" -from typing import Any, Optional +from typing import Any, Optional, List from crewai.tools import BaseTool @@ -16,6 +16,7 @@ class MultiOnTool(BaseTool): session_id: Optional[str] = None local: bool = False max_steps: int = 3 + package_dependencies: List[str] = ["multion"] def __init__( self, diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py index 602b45864..30b78a3c4 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Any, Type +from typing import TYPE_CHECKING, Any, Type, List from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field @@ -41,6 +41,7 @@ class PatronusLocalEvaluatorTool(BaseTool): evaluated_model_gold_answer: str model_config = ConfigDict(arbitrary_types_allowed=True) + package_dependencies: List[str] = ["patronus"] def __init__( self, diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 29f172cdf..73e373ae8 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -1,6 +1,6 @@ import json import os -from typing import Any, Callable, Optional, Type +from typing import Any, Callable, Optional, Type, List try: @@ -74,6 +74,7 @@ class QdrantVectorSearchTool(BaseTool): default=None, description="A custom embedding function to use for vectorization. If not provided, the default model will be used.", ) + package_dependencies: List[str] = ["qdrant-client"] def __init__(self, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 70764c294..bc3bd667b 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -1,5 +1,5 @@ import os -from typing import TYPE_CHECKING, Any, Optional, Type +from typing import TYPE_CHECKING, Any, Optional, Type, List from urllib.parse import urlparse from crewai.tools import BaseTool @@ -67,6 +67,7 @@ class ScrapegraphScrapeTool(BaseTool): api_key: Optional[str] = None enable_logging: bool = False _client: Optional["Client"] = None + package_dependencies: List[str] = ["scrapegraph-py"] def __init__( self, diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index 4d6b72b61..60fc75e16 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Dict, Literal, Optional, Type +from typing import Any, Dict, Literal, Optional, Type, List from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -28,6 +28,7 @@ class ScrapflyScrapeWebsiteTool(BaseTool): args_schema: Type[BaseModel] = ScrapflyScrapeWebsiteToolSchema api_key: str = None scrapfly: Optional[Any] = None + package_dependencies: List[str] = ["scrapfly-sdk"] def __init__(self, api_key: str): super().__init__() diff --git a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py index 3976facef..5f7365c8a 100644 --- a/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py +++ b/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py @@ -1,6 +1,6 @@ import re import time -from typing import Any, Optional, Type +from typing import Any, Optional, Type, List from urllib.parse import urlparse from crewai.tools import BaseTool @@ -58,6 +58,7 @@ class SeleniumScrapingTool(BaseTool): css_element: Optional[str] = None return_html: Optional[bool] = False _by: Optional[Any] = None + package_dependencies: List[str] = ["selenium", "webdriver-manager"] def __init__( self, diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index f6e639a37..c0a5ca9c9 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -1,6 +1,6 @@ import os import re -from typing import Any, Optional, Union +from typing import Any, Optional, Union, List from crewai.tools import BaseTool @@ -8,6 +8,8 @@ from crewai.tools import BaseTool class SerpApiBaseTool(BaseTool): """Base class for SerpApi functionality with shared capabilities.""" + package_dependencies: List[str] = ["serpapi"] + client: Optional[Any] = None def __init__(self, **kwargs): diff --git a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py index bacec2917..a4cd21044 100644 --- a/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py +++ b/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py @@ -99,6 +99,7 @@ class SnowflakeSearchTool(BaseTool): _pool_lock: Optional[asyncio.Lock] = None _thread_pool: Optional[ThreadPoolExecutor] = None _model_rebuilt: bool = False + package_dependencies: List[str] = ["snowflake-connector-python", "snowflake-sqlalchemy", "cryptography"] def __init__(self, **data): """Initialize SnowflakeSearchTool.""" diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index ff52a35dc..853833261 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Dict, Literal, Optional, Type +from typing import Any, Dict, Literal, Optional, Type, List from urllib.parse import unquote, urlparse from crewai.tools import BaseTool @@ -53,6 +53,7 @@ class SpiderTool(BaseTool): spider: Any = None log_failures: bool = True config: SpiderToolConfig = SpiderToolConfig() + package_dependencies: List[str] = ["spider-client"] def __init__( self, diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 5a4d5f485..557c6cb6f 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -88,6 +88,7 @@ class StagehandToolSchema(BaseModel): class StagehandTool(BaseTool): + package_dependencies: List[str] = ["stagehand"] """ A tool that uses Stagehand to automate web browser interactions using natural language. diff --git a/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py index 0320ab104..043e01fac 100644 --- a/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py +++ b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py @@ -26,6 +26,7 @@ class TavilyExtractorToolSchema(BaseModel): class TavilyExtractorTool(BaseTool): + package_dependencies: List[str] = ["tavily-python"] """ Tool that uses the Tavily API to extract content from web pages. diff --git a/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py index 1179be90d..16841c380 100644 --- a/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py +++ b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py @@ -1,6 +1,6 @@ from crewai.tools import BaseTool from pydantic import BaseModel, Field -from typing import Optional, Type, Any, Union, Literal, Sequence +from typing import Optional, Type, Any, Union, Literal, Sequence, List from dotenv import load_dotenv import os import json @@ -101,6 +101,7 @@ class TavilySearchTool(BaseTool): default=1000, description="Maximum length for the 'content' of each search result to avoid context window issues.", ) + package_dependencies: List[str] = ["tavily-python"] def __init__(self, **kwargs: Any): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index d363ba7e1..fa332f231 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -1,6 +1,6 @@ import json import os -from typing import Any, Optional, Type +from typing import Any, Optional, Type, List try: import weaviate @@ -31,6 +31,7 @@ class WeaviateToolSchema(BaseModel): class WeaviateVectorSearchTool(BaseTool): """Tool to search the Weaviate database""" + package_dependencies: List[str] = ["weaviate-client"] name: str = "WeaviateVectorSearchTool" description: str = "A tool to search the Weaviate database for relevant information on internal documents." args_schema: Type[BaseModel] = WeaviateToolSchema @@ -48,6 +49,7 @@ class WeaviateVectorSearchTool(BaseTool): ..., description="The API key for the Weaviate cluster", ) + package_dependencies: List[str] = ["weaviate-client"] def __init__(self, **kwargs): super().__init__(**kwargs) diff --git a/tests/test_generate_tool_specs.py b/tests/test_generate_tool_specs.py index cb3b18cd5..1315fca49 100644 --- a/tests/test_generate_tool_specs.py +++ b/tests/test_generate_tool_specs.py @@ -1,12 +1,12 @@ import json -from typing import List, Optional +from typing import List, Optional, Type import pytest from pydantic import BaseModel, Field from unittest import mock from generate_tool_specs import ToolSpecExtractor -from crewai.tools.base_tool import EnvVar +from crewai.tools.base_tool import BaseTool, EnvVar class MockToolSchema(BaseModel): query: str = Field(..., description="The query parameter") @@ -14,54 +14,26 @@ class MockToolSchema(BaseModel): filters: Optional[List[str]] = Field(None, description="Optional filters to apply") -class MockTool: - name = "Mock Search Tool" - description = "A tool that mocks search functionality" - args_schema = MockToolSchema +class MockTool(BaseTool): + name: str = "Mock Search Tool" + description: str = "A tool that mocks search functionality" + args_schema: Type[BaseModel] = MockToolSchema + + another_parameter: str = Field("Another way to define a default value", description="") + my_parameter: str = Field("This is default value", description="What a description") + my_parameter_bool: bool = Field(False) + package_dependencies: List[str] = Field(["this-is-a-required-package", "another-required-package"], description="") + env_vars: List[EnvVar] = [ + EnvVar(name="SERPER_API_KEY", description="API key for Serper", required=True, default=None), + EnvVar(name="API_RATE_LIMIT", description="API rate limit", required=False, default="100") + ] @pytest.fixture def extractor(): ext = ToolSpecExtractor() - MockTool.__pydantic_core_schema__ = create_mock_schema(MockTool) - MockTool.args_schema.__pydantic_core_schema__ = create_mock_schema_args(MockTool.args_schema) return ext -def create_mock_schema(cls): - return { - "type": "model", - "cls": cls, - "schema": { - "type": "model-fields", - "fields": { - "name": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": cls.name}, "metadata": {}}, - "description": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": cls.description}, "metadata": {}}, - "args_schema": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "is-subclass", "cls": BaseModel}, "default": cls.args_schema}, "metadata": {}}, - "env_vars": { - "type": "model-field", "schema": {"type": "default", "schema": {"type": "list", "items_schema": {"type": "model", "cls": "INSPECT CLASS", "schema": {"type": "model-fields", "fields": {"name": {"type": "model-field", "schema": {"type": "str"}, "metadata": {}}, "description": {"type": "model-field", "schema": {"type": "str"}, "metadata": {}}, "required": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "bool"}, "default": True}, "metadata": {}}, "default": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "nullable", "schema": {"type": "str"}}, "default": None}, "metadata": {}},}, "model_name": "EnvVar", "computed_fields": []}, "custom_init": False, "root_model": False, "config": {"title": "EnvVar"}, "ref": "crewai.tools.base_tool.EnvVar:4593650640", "metadata": {"pydantic_js_functions": ["INSPECT __get_pydantic_json_schema__"]}}}, "default": [EnvVar(name='SERPER_API_KEY', description='API key for Serper', required=True, default=None), EnvVar(name='API_RATE_LIMIT', description='API rate limit', required=False, default="100")]}, "metadata": {} - } - }, - "model_name": cls.__name__ - } - } - - -def create_mock_schema_args(cls): - return { - "type": "model", - "cls": cls, - "schema": { - "type": "model-fields", - "fields": { - "query": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "str"}, "default": "The query parameter"}}, - "count": {"type": "model-field", "schema": {"type": "default", "schema": {"type": "int"}, "default": 5}, "metadata": {"pydantic_js_updates": {"description": "Number of results to return"}}}, - "filters": {"type": "model-field", "schema": {"type": "nullable", "schema": {"type": "list", "items_schema": {"type": "str"}}}} - }, - "model_name": cls.__name__ - } - } - - def test_unwrap_schema(extractor): nested_schema = { "type": "function-after", @@ -72,19 +44,6 @@ def test_unwrap_schema(extractor): assert result["value"] == "test" -@pytest.mark.parametrize( - "field, fallback, expected", - [ - ({"schema": {"default": "test_value"}}, None, "test_value"), - ({}, "fallback_value", "fallback_value"), - ({"schema": {"default": 123}}, "fallback_value", "fallback_value") - ] -) -def test_extract_field_default(extractor, field, fallback, expected): - result = extractor._extract_field_default(field, fallback=fallback) - assert result == expected - - @pytest.mark.parametrize( "schema, expected", [ @@ -112,7 +71,7 @@ def test_extract_param_type(extractor, info, expected_type): assert extractor._extract_param_type(info) == expected_type -def test_extract_tool_info(extractor): +def test_extract_all_tools(extractor): with mock.patch("generate_tool_specs.dir", return_value=["MockTool"]), \ mock.patch("generate_tool_specs.getattr", return_value=MockTool): extractor.extract_all_tools() @@ -120,6 +79,16 @@ def test_extract_tool_info(extractor): assert len(extractor.tools_spec) == 1 tool_info = extractor.tools_spec[0] + assert tool_info.keys() == { + "name", + "humanized_name", + "description", + "run_params", + "env_vars", + "init_params", + "package_dependencies", + } + assert tool_info["name"] == "MockTool" assert tool_info["humanized_name"] == "Mock Search Tool" assert tool_info["description"] == "A tool that mocks search functionality" @@ -142,12 +111,16 @@ def test_extract_tool_info(extractor): params = {p["name"]: p for p in tool_info["run_params"]} assert params["query"]["description"] == "The query parameter" assert params["query"]["type"] == "str" + assert params["query"]["default"] == "" - assert params["count"]["description"] == "Number of results to return" assert params["count"]["type"] == "int" + assert params["count"]["default"] == 5 - assert params["filters"]["description"] == "" + assert params["filters"]["description"] == "Optional filters to apply" assert params["filters"]["type"] == "list[str]" + assert params["filters"]["default"] == "" + + assert tool_info["package_dependencies"] == ["this-is-a-required-package", "another-required-package"] def test_save_to_json(extractor, tmp_path): From 2cca45b45a66a6da062c7da986c3166769c67ef8 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 17 Jun 2025 08:52:51 -0300 Subject: [PATCH 341/391] refactor: renaming init_params and run_params to reflect their schema. (#332) (#333) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We’re currently using the JSON Schema standard for these fields --- tests/test_generate_tool_specs.py | 161 +++++++++++++++--------------- 1 file changed, 80 insertions(+), 81 deletions(-) diff --git a/tests/test_generate_tool_specs.py b/tests/test_generate_tool_specs.py index 1315fca49..73034a174 100644 --- a/tests/test_generate_tool_specs.py +++ b/tests/test_generate_tool_specs.py @@ -44,83 +44,98 @@ def test_unwrap_schema(extractor): assert result["value"] == "test" -@pytest.mark.parametrize( - "schema, expected", - [ - ({"type": "str"}, "str"), - ({"type": "list", "items_schema": {"type": "str"}}, "list[str]"), - ({"type": "dict", "keys_schema": {"type": "str"}, "values_schema": {"type": "int"}}, "dict[str, int]"), - ({"type": "union", "choices": [{"type": "str"}, {"type": "int"}]}, "union[str, int]"), - ({"type": "custom_type"}, "custom_type"), - ({}, "unknown"), - ] -) -def test_schema_type_to_str(extractor, schema, expected): - assert extractor._schema_type_to_str(schema) == expected - - -@pytest.mark.parametrize( - "info, expected_type", - [ - ({"schema": {"type": "str"}}, "str"), - ({"schema": {"type": "nullable", "schema": {"type": "int"}}}, "int"), - ({"schema": {"type": "default", "schema": {"type": "list", "items_schema": {"type": "str"}}}}, "list[str]"), - ] -) -def test_extract_param_type(extractor, info, expected_type): - assert extractor._extract_param_type(info) == expected_type - - -def test_extract_all_tools(extractor): +@pytest.fixture +def mock_tool_extractor(extractor): with mock.patch("generate_tool_specs.dir", return_value=["MockTool"]), \ mock.patch("generate_tool_specs.getattr", return_value=MockTool): extractor.extract_all_tools() - assert len(extractor.tools_spec) == 1 - tool_info = extractor.tools_spec[0] + return extractor.tools_spec[0] - assert tool_info.keys() == { - "name", - "humanized_name", - "description", - "run_params", - "env_vars", - "init_params", - "package_dependencies", - } +def test_extract_basic_tool_info(mock_tool_extractor): + tool_info = mock_tool_extractor - assert tool_info["name"] == "MockTool" - assert tool_info["humanized_name"] == "Mock Search Tool" - assert tool_info["description"] == "A tool that mocks search functionality" + assert tool_info.keys() == { + "name", + "humanized_name", + "description", + "run_params_schema", + "env_vars", + "init_params_schema", + "package_dependencies", + } - assert len(tool_info["env_vars"]) == 2 - api_key_var, rate_limit_var = tool_info["env_vars"] + assert tool_info["name"] == "MockTool" + assert tool_info["humanized_name"] == "Mock Search Tool" + assert tool_info["description"] == "A tool that mocks search functionality" - assert api_key_var["name"] == "SERPER_API_KEY" - assert api_key_var["description"] == "API key for Serper" - assert api_key_var["required"] == True - assert api_key_var["default"] == None +def test_extract_init_params_schema(mock_tool_extractor): + tool_info = mock_tool_extractor + init_params_schema = tool_info["init_params_schema"] - assert rate_limit_var["name"] == "API_RATE_LIMIT" - assert rate_limit_var["description"] == "API rate limit" - assert rate_limit_var["required"] == False - assert rate_limit_var["default"] == "100" + assert init_params_schema.keys() == { + "$defs", + "properties", + "title", + "type", + } - assert len(tool_info["run_params"]) == 3 + another_parameter = init_params_schema['properties']['another_parameter'] + assert another_parameter["description"] == "" + assert another_parameter["default"] == "Another way to define a default value" + assert another_parameter["type"] == "string" - params = {p["name"]: p for p in tool_info["run_params"]} - assert params["query"]["description"] == "The query parameter" - assert params["query"]["type"] == "str" - assert params["query"]["default"] == "" + my_parameter = init_params_schema['properties']['my_parameter'] + assert my_parameter["description"] == "What a description" + assert my_parameter["default"] == "This is default value" + assert my_parameter["type"] == "string" - assert params["count"]["type"] == "int" - assert params["count"]["default"] == 5 + my_parameter_bool = init_params_schema['properties']['my_parameter_bool'] + assert my_parameter_bool["default"] == False + assert my_parameter_bool["type"] == "boolean" - assert params["filters"]["description"] == "Optional filters to apply" - assert params["filters"]["type"] == "list[str]" - assert params["filters"]["default"] == "" +def test_extract_env_vars(mock_tool_extractor): + tool_info = mock_tool_extractor - assert tool_info["package_dependencies"] == ["this-is-a-required-package", "another-required-package"] + assert len(tool_info["env_vars"]) == 2 + api_key_var, rate_limit_var = tool_info["env_vars"] + assert api_key_var["name"] == "SERPER_API_KEY" + assert api_key_var["description"] == "API key for Serper" + assert api_key_var["required"] == True + assert api_key_var["default"] == None + + assert rate_limit_var["name"] == "API_RATE_LIMIT" + assert rate_limit_var["description"] == "API rate limit" + assert rate_limit_var["required"] == False + assert rate_limit_var["default"] == "100" + +def test_extract_run_params_schema(mock_tool_extractor): + tool_info = mock_tool_extractor + + run_params_schema = tool_info["run_params_schema"] + assert run_params_schema.keys() == { + "properties", + "required", + "title", + "type", + } + + query_param = run_params_schema["properties"]["query"] + assert query_param["description"] == "The query parameter" + assert query_param["type"] == "string" + + count_param = run_params_schema["properties"]["count"] + assert count_param["type"] == "integer" + assert count_param["default"] == 5 + + filters_param = run_params_schema["properties"]["filters"] + assert filters_param["description"] == "Optional filters to apply" + assert filters_param["default"] == None + assert filters_param['anyOf'] == [{'items': {'type': 'string'}, 'type': 'array'}, {'type': 'null'}] + +def test_extract_package_dependencies(mock_tool_extractor): + tool_info = mock_tool_extractor + assert tool_info["package_dependencies"] == ["this-is-a-required-package", "another-required-package"] def test_save_to_json(extractor, tmp_path): @@ -128,7 +143,7 @@ def test_save_to_json(extractor, tmp_path): "name": "TestTool", "humanized_name": "Test Tool", "description": "A test tool", - "run_params": [ + "run_params_schema": [ {"name": "param1", "description": "Test parameter", "type": "str"} ] }] @@ -144,20 +159,4 @@ def test_save_to_json(extractor, tmp_path): assert "tools" in data assert len(data["tools"]) == 1 assert data["tools"][0]["humanized_name"] == "Test Tool" - assert data["tools"][0]["run_params"][0]["name"] == "param1" - - -@pytest.mark.integration -def test_full_extraction_process(): - extractor = ToolSpecExtractor() - specs = extractor.extract_all_tools() - - assert len(specs) > 0 - - for tool in specs: - assert "name" in tool - assert "humanized_name" in tool and tool["humanized_name"] - assert "description" in tool - assert isinstance(tool["run_params"], list) - for param in tool["run_params"]: - assert "name" in param and param["name"] \ No newline at end of file + assert data["tools"][0]["run_params_schema"][0]["name"] == "param1" From 9e92b84bccf73e7cc113ef1083f6f2c1891789e0 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 20 Jun 2025 09:06:11 -0300 Subject: [PATCH 342/391] feat: mapping explicitly tool environment variables (#338) --- .../tools/apify_actors_tool/apify_actors_tool.py | 5 ++++- .../tools/brave_search_tool/brave_search_tool.py | 7 +++++-- .../tools/browserbase_load_tool/browserbase_load_tool.py | 6 +++++- .../tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py | 5 ++++- .../tools/patronus_eval_tool/patronus_eval_tool.py | 5 ++++- .../scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | 5 ++++- src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py | 5 ++++- .../tools/serply_api_tool/serply_web_search_tool.py | 7 +++++-- .../tools/tavily_extractor_tool/tavily_extractor_tool.py | 5 ++++- .../tools/tavily_search_tool/tavily_search_tool.py | 5 ++++- src/crewai_tools/tools/weaviate_tool/vector_search.py | 5 ++++- 11 files changed, 47 insertions(+), 13 deletions(-) diff --git a/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py index 44c4839e8..127169676 100644 --- a/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py +++ b/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py @@ -1,4 +1,4 @@ -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import Field from typing import TYPE_CHECKING, Any, Dict, List import os @@ -7,6 +7,9 @@ if TYPE_CHECKING: from langchain_apify import ApifyActorsTool as _ApifyActorsTool class ApifyActorsTool(BaseTool): + env_vars: List[EnvVar] = [ + EnvVar(name="APIFY_API_TOKEN", description="API token for Apify platform access", required=True), + ] """Tool that runs Apify Actors. To use, you should have the environment variable `APIFY_API_TOKEN` set diff --git a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 11035739d..1f96d452a 100644 --- a/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -1,10 +1,10 @@ import datetime import os import time -from typing import Any, ClassVar, Optional, Type +from typing import Any, ClassVar, List, Optional, Type import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -48,6 +48,9 @@ class BraveSearchTool(BaseTool): save_file: bool = False _last_request_time: ClassVar[float] = 0 _min_request_interval: ClassVar[float] = 1.0 # seconds + env_vars: List[EnvVar] = [ + EnvVar(name="BRAVE_API_KEY", description="API key for Brave Search", required=True), + ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py index f946baf73..b6b3612dc 100644 --- a/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py +++ b/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py @@ -1,7 +1,7 @@ import os from typing import Any, Optional, Type, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -20,6 +20,10 @@ class BrowserbaseLoadTool(BaseTool): proxy: Optional[bool] = None browserbase: Optional[Any] = None package_dependencies: List[str] = ["browserbase"] + env_vars: List[EnvVar] = [ + EnvVar(name="BROWSERBASE_API_KEY", description="API key for Browserbase services", required=False), + EnvVar(name="BROWSERBASE_PROJECT_ID", description="Project ID for Browserbase services", required=False), + ] def __init__( self, diff --git a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py index 5359427b0..a2571b94b 100644 --- a/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py +++ b/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py @@ -1,7 +1,7 @@ import os from typing import Any, Optional, Type, Dict, Literal, Union, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -26,6 +26,9 @@ class HyperbrowserLoadTool(BaseTool): api_key: Optional[str] = None hyperbrowser: Optional[Any] = None package_dependencies: List[str] = ["hyperbrowser"] + env_vars: List[EnvVar] = [ + EnvVar(name="HYPERBROWSER_API_KEY", description="API key for Hyperbrowser services", required=False), + ] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py index be1f410e2..bc9a60aae 100644 --- a/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py +++ b/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py @@ -4,7 +4,7 @@ import warnings from typing import Any, Dict, List, Optional import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar class PatronusEvalTool(BaseTool): @@ -13,6 +13,9 @@ class PatronusEvalTool(BaseTool): evaluators: List[Dict[str, str]] = [] criteria: List[Dict[str, str]] = [] description: str = "" + env_vars: List[EnvVar] = [ + EnvVar(name="PATRONUS_API_KEY", description="API key for Patronus evaluation services", required=True), + ] def __init__(self, **kwargs: Any): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index bc3bd667b..04a544fa6 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -2,7 +2,7 @@ import os from typing import TYPE_CHECKING, Any, Optional, Type, List from urllib.parse import urlparse -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, field_validator # Type checking import @@ -68,6 +68,9 @@ class ScrapegraphScrapeTool(BaseTool): enable_logging: bool = False _client: Optional["Client"] = None package_dependencies: List[str] = ["scrapegraph-py"] + env_vars: List[EnvVar] = [ + EnvVar(name="SCRAPEGRAPH_API_KEY", description="API key for Scrapegraph AI services", required=False), + ] def __init__( self, diff --git a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py index c0a5ca9c9..aa73d63d5 100644 --- a/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py +++ b/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py @@ -2,13 +2,16 @@ import os import re from typing import Any, Optional, Union, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar class SerpApiBaseTool(BaseTool): """Base class for SerpApi functionality with shared capabilities.""" package_dependencies: List[str] = ["serpapi"] + env_vars: List[EnvVar] = [ + EnvVar(name="SERPAPI_API_KEY", description="API key for SerpApi searches", required=True), + ] client: Optional[Any] = None diff --git a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py index b4d1ae4b5..6801f4065 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py @@ -1,9 +1,9 @@ import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type from urllib.parse import urlencode import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -26,6 +26,9 @@ class SerplyWebSearchTool(BaseTool): proxy_location: Optional[str] = "US" query_payload: Optional[dict] = {} headers: Optional[dict] = {} + env_vars: List[EnvVar] = [ + EnvVar(name="SERPLY_API_KEY", description="API key for Serply services", required=True), + ] def __init__( self, diff --git a/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py index 043e01fac..5e8a760ee 100644 --- a/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py +++ b/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py @@ -1,4 +1,4 @@ -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field from typing import Optional, Type, Any, Union, List, Literal from dotenv import load_dotenv @@ -27,6 +27,9 @@ class TavilyExtractorToolSchema(BaseModel): class TavilyExtractorTool(BaseTool): package_dependencies: List[str] = ["tavily-python"] + env_vars: List[EnvVar] = [ + EnvVar(name="TAVILY_API_KEY", description="API key for Tavily extraction service", required=True), + ] """ Tool that uses the Tavily API to extract content from web pages. diff --git a/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py index 16841c380..2f9d6dcca 100644 --- a/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py +++ b/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py @@ -1,4 +1,4 @@ -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field from typing import Optional, Type, Any, Union, Literal, Sequence, List from dotenv import load_dotenv @@ -102,6 +102,9 @@ class TavilySearchTool(BaseTool): description="Maximum length for the 'content' of each search result to avoid context window issues.", ) package_dependencies: List[str] = ["tavily-python"] + env_vars: List[EnvVar] = [ + EnvVar(name="TAVILY_API_KEY", description="API key for Tavily search service", required=True), + ] def __init__(self, **kwargs: Any): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index fa332f231..13efb018f 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -15,7 +15,7 @@ except ImportError: Vectorizers = Any Auth = Any -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -41,6 +41,9 @@ class WeaviateVectorSearchTool(BaseTool): collection_name: Optional[str] = None limit: Optional[int] = Field(default=3) headers: Optional[dict] = None + env_vars: List[EnvVar] = [ + EnvVar(name="OPENAI_API_KEY", description="OpenAI API key for embedding generation and retrieval", required=True), + ] weaviate_cluster_url: str = Field( ..., description="The URL of the Weaviate cluster", From 31b3dd2b94a0a7ae8744e1e0f4c4dad8f895d9c8 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 20 Jun 2025 10:27:10 -0300 Subject: [PATCH 343/391] feat: Add ToolCollection class for named tool access (#339) This change allows accessing tools by name (tools["tool_name"]) in addition to index (tools[0]), making it more intuitive and convenient to work with multiple tools without needing to remember their position in the list --- src/crewai_tools/adapters/mcp_adapter.py | 6 +- src/crewai_tools/adapters/tool_collection.py | 59 ++++++ .../crewai_enterprise_tools.py | 12 +- .../{mcp_adapter.py => mcp_adapter_test.py} | 7 +- tests/tools/crewai_enterprise_tools_test.py | 70 +++++++ tests/tools/tool_collection_test.py | 172 ++++++++++++++++++ 6 files changed, 316 insertions(+), 10 deletions(-) create mode 100644 src/crewai_tools/adapters/tool_collection.py rename tests/adapters/{mcp_adapter.py => mcp_adapter_test.py} (96%) create mode 100644 tests/tools/crewai_enterprise_tools_test.py create mode 100644 tests/tools/tool_collection_test.py diff --git a/src/crewai_tools/adapters/mcp_adapter.py b/src/crewai_tools/adapters/mcp_adapter.py index bcb38818d..bfff480eb 100644 --- a/src/crewai_tools/adapters/mcp_adapter.py +++ b/src/crewai_tools/adapters/mcp_adapter.py @@ -4,7 +4,7 @@ import logging from typing import TYPE_CHECKING, Any from crewai.tools import BaseTool - +from crewai_tools.adapters.tool_collection import ToolCollection """ MCPServer for CrewAI. @@ -114,7 +114,7 @@ class MCPServerAdapter: self._adapter.__exit__(None, None, None) @property - def tools(self) -> list[BaseTool]: + def tools(self) -> ToolCollection[BaseTool]: """The CrewAI tools available from the MCP server. Raises: @@ -127,7 +127,7 @@ class MCPServerAdapter: raise ValueError( "MCP server not started, run `mcp_server.start()` first before accessing `tools`" ) - return self._tools + return ToolCollection(self._tools) def __enter__(self): """ diff --git a/src/crewai_tools/adapters/tool_collection.py b/src/crewai_tools/adapters/tool_collection.py new file mode 100644 index 000000000..f0ec9a288 --- /dev/null +++ b/src/crewai_tools/adapters/tool_collection.py @@ -0,0 +1,59 @@ +from typing import List, Optional, Union, TypeVar, Generic, Dict +from crewai.tools import BaseTool + +T = TypeVar('T', bound=BaseTool) + +class ToolCollection(list, Generic[T]): + """ + A collection of tools that can be accessed by index or name + + This class extends the built-in list to provide dictionary-like + access to tools based on their name property. + + Usage: + tools = ToolCollection(list_of_tools) + # Access by index (regular list behavior) + first_tool = tools[0] + # Access by name (new functionality) + search_tool = tools["search"] + """ + + def __init__(self, tools: Optional[List[T]] = None): + super().__init__(tools or []) + self._name_cache: Dict[str, T] = {} + self._build_name_cache() + + def _build_name_cache(self) -> None: + self._name_cache = {tool.name: tool for tool in self} + + def __getitem__(self, key: Union[int, str]) -> T: + if isinstance(key, str): + return self._name_cache[key] + return super().__getitem__(key) + + def append(self, tool: T) -> None: + super().append(tool) + self._name_cache[tool.name] = tool + + def extend(self, tools: List[T]) -> None: + super().extend(tools) + self._build_name_cache() + + def insert(self, index: int, tool: T) -> None: + super().insert(index, tool) + self._name_cache[tool.name] = tool + + def remove(self, tool: T) -> None: + super().remove(tool) + if tool.name in self._name_cache: + del self._name_cache[tool.name] + + def pop(self, index: int = -1) -> T: + tool = super().pop(index) + if tool.name in self._name_cache: + del self._name_cache[tool.name] + return tool + + def clear(self) -> None: + super().clear() + self._name_cache.clear() diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 7fc97d179..8e7275e69 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -7,6 +7,7 @@ import typing as t import logging from crewai.tools import BaseTool from crewai_tools.adapters.enterprise_adapter import EnterpriseActionKitToolAdapter +from crewai_tools.adapters.tool_collection import ToolCollection logger = logging.getLogger(__name__) @@ -16,7 +17,7 @@ def CrewaiEnterpriseTools( actions_list: t.Optional[t.List[str]] = None, enterprise_action_kit_project_id: t.Optional[str] = None, enterprise_action_kit_project_url: t.Optional[str] = None, -) -> t.List[BaseTool]: +) -> ToolCollection[BaseTool]: """Factory function that returns crewai enterprise tools. Args: @@ -24,9 +25,11 @@ def CrewaiEnterpriseTools( If not provided, will try to use CREWAI_ENTERPRISE_TOOLS_TOKEN env var. actions_list: Optional list of specific tool names to include. If provided, only tools with these names will be returned. + enterprise_action_kit_project_id: Optional ID of the Enterprise Action Kit project. + enterprise_action_kit_project_url: Optional URL of the Enterprise Action Kit project. Returns: - A list of BaseTool instances for enterprise actions + A ToolCollection of BaseTool instances for enterprise actions """ if enterprise_token is None: enterprise_token = os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") @@ -47,7 +50,8 @@ def CrewaiEnterpriseTools( all_tools = adapter.tools() if actions_list is None: - return all_tools + return ToolCollection(all_tools) # Filter tools based on the provided list - return [tool for tool in all_tools if tool.name in actions_list] + filtered_tools = [tool for tool in all_tools if tool.name in actions_list] + return ToolCollection(filtered_tools) diff --git a/tests/adapters/mcp_adapter.py b/tests/adapters/mcp_adapter_test.py similarity index 96% rename from tests/adapters/mcp_adapter.py rename to tests/adapters/mcp_adapter_test.py index 569a10ae6..f2b08bc16 100644 --- a/tests/adapters/mcp_adapter.py +++ b/tests/adapters/mcp_adapter_test.py @@ -4,7 +4,7 @@ import pytest from mcp import StdioServerParameters from crewai_tools import MCPServerAdapter - +from crewai_tools.adapters.tool_collection import ToolCollection @pytest.fixture def echo_server_script(): @@ -18,7 +18,7 @@ def echo_server_script(): def echo_tool(text: str) -> str: """Echo the input text""" return f"Echo: {text}" - + mcp.run() ''' ) @@ -68,6 +68,7 @@ def test_context_manager_syntax(echo_server_script): command="uv", args=["run", "python", "-c", echo_server_script] ) with MCPServerAdapter(serverparams) as tools: + assert isinstance(tools, ToolCollection) assert len(tools) == 1 assert tools[0].name == "echo_tool" assert tools[0].run(text="hello") == "Echo: hello" @@ -91,7 +92,7 @@ def test_try_finally_syntax(echo_server_script): assert tools[0].run(text="hello") == "Echo: hello" finally: mcp_server_adapter.stop() - + def test_try_finally_syntax_sse(echo_sse_server): sse_serverparams = echo_sse_server mcp_server_adapter = MCPServerAdapter(sse_serverparams) diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py new file mode 100644 index 000000000..384093e0f --- /dev/null +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -0,0 +1,70 @@ +import os +import unittest +from unittest.mock import patch, MagicMock + +from crewai.tools import BaseTool +from crewai_tools.tools import CrewaiEnterpriseTools +from crewai_tools.adapters.tool_collection import ToolCollection + + +class TestCrewaiEnterpriseTools(unittest.TestCase): + def setUp(self): + self.mock_tools = [ + self._create_mock_tool("tool1", "Tool 1 Description"), + self._create_mock_tool("tool2", "Tool 2 Description"), + self._create_mock_tool("tool3", "Tool 3 Description"), + ] + self.adapter_patcher = patch( + 'crewai_tools.tools.crewai_enterprise_tools.crewai_enterprise_tools.EnterpriseActionKitToolAdapter' + ) + self.MockAdapter = self.adapter_patcher.start() + + mock_adapter_instance = self.MockAdapter.return_value + mock_adapter_instance.tools.return_value = self.mock_tools + + def tearDown(self): + self.adapter_patcher.stop() + + def _create_mock_tool(self, name, description): + mock_tool = MagicMock(spec=BaseTool) + mock_tool.name = name + mock_tool.description = description + return mock_tool + + @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) + def test_returns_tool_collection(self): + tools = CrewaiEnterpriseTools() + self.assertIsInstance(tools, ToolCollection) + + @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) + def test_returns_all_tools_when_no_actions_list(self): + tools = CrewaiEnterpriseTools() + self.assertEqual(len(tools), 3) + self.assertEqual(tools[0].name, "tool1") + self.assertEqual(tools[1].name, "tool2") + self.assertEqual(tools[2].name, "tool3") + + @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) + def test_filters_tools_by_actions_list(self): + tools = CrewaiEnterpriseTools(actions_list=["tool1", "tool3"]) + self.assertEqual(len(tools), 2) + self.assertEqual(tools[0].name, "tool1") + self.assertEqual(tools[1].name, "tool3") + + def test_uses_provided_parameters(self): + CrewaiEnterpriseTools( + enterprise_token="test-token", + enterprise_action_kit_project_id="project-id", + enterprise_action_kit_project_url="project-url" + ) + + self.MockAdapter.assert_called_once_with( + enterprise_action_token="test-token", + enterprise_action_kit_project_id="project-id", + enterprise_action_kit_project_url="project-url" + ) + + @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) + def test_uses_environment_token(self): + CrewaiEnterpriseTools() + self.MockAdapter.assert_called_once_with(enterprise_action_token="env-token") \ No newline at end of file diff --git a/tests/tools/tool_collection_test.py b/tests/tools/tool_collection_test.py new file mode 100644 index 000000000..fb4f35c95 --- /dev/null +++ b/tests/tools/tool_collection_test.py @@ -0,0 +1,172 @@ +import unittest +from unittest.mock import MagicMock + +from crewai.tools import BaseTool +from crewai_tools.adapters.tool_collection import ToolCollection + + +class TestToolCollection(unittest.TestCase): + def setUp(self): + + self.search_tool = self._create_mock_tool("search", "Search Tool") + self.calculator_tool = self._create_mock_tool("calculator", "Calculator Tool") + self.translator_tool = self._create_mock_tool("translator", "Translator Tool") + + self.tools = ToolCollection([ + self.search_tool, + self.calculator_tool, + self.translator_tool + ]) + + def _create_mock_tool(self, name, description): + mock_tool = MagicMock(spec=BaseTool) + mock_tool.name = name + mock_tool.description = description + return mock_tool + + def test_initialization(self): + self.assertEqual(len(self.tools), 3) + self.assertEqual(self.tools[0].name, "search") + self.assertEqual(self.tools[1].name, "calculator") + self.assertEqual(self.tools[2].name, "translator") + + def test_empty_initialization(self): + empty_collection = ToolCollection() + self.assertEqual(len(empty_collection), 0) + self.assertEqual(empty_collection._name_cache, {}) + + def test_initialization_with_none(self): + collection = ToolCollection(None) + self.assertEqual(len(collection), 0) + self.assertEqual(collection._name_cache, {}) + + def test_access_by_index(self): + self.assertEqual(self.tools[0], self.search_tool) + self.assertEqual(self.tools[1], self.calculator_tool) + self.assertEqual(self.tools[2], self.translator_tool) + + def test_access_by_name(self): + self.assertEqual(self.tools["search"], self.search_tool) + self.assertEqual(self.tools["calculator"], self.calculator_tool) + self.assertEqual(self.tools["translator"], self.translator_tool) + + def test_key_error_for_invalid_name(self): + with self.assertRaises(KeyError): + _ = self.tools["nonexistent"] + + def test_index_error_for_invalid_index(self): + with self.assertRaises(IndexError): + _ = self.tools[10] + + def test_negative_index(self): + self.assertEqual(self.tools[-1], self.translator_tool) + self.assertEqual(self.tools[-2], self.calculator_tool) + self.assertEqual(self.tools[-3], self.search_tool) + + def test_append(self): + new_tool = self._create_mock_tool("new", "New Tool") + self.tools.append(new_tool) + + self.assertEqual(len(self.tools), 4) + self.assertEqual(self.tools[3], new_tool) + self.assertEqual(self.tools["new"], new_tool) + self.assertIn("new", self.tools._name_cache) + + def test_append_duplicate_name(self): + duplicate_tool = self._create_mock_tool("search", "Duplicate Search Tool") + self.tools.append(duplicate_tool) + + self.assertEqual(len(self.tools), 4) + self.assertEqual(self.tools["search"], duplicate_tool) + + def test_extend(self): + new_tools = [ + self._create_mock_tool("tool4", "Tool 4"), + self._create_mock_tool("tool5", "Tool 5"), + ] + self.tools.extend(new_tools) + + self.assertEqual(len(self.tools), 5) + self.assertEqual(self.tools["tool4"], new_tools[0]) + self.assertEqual(self.tools["tool5"], new_tools[1]) + self.assertIn("tool4", self.tools._name_cache) + self.assertIn("tool5", self.tools._name_cache) + + def test_insert(self): + new_tool = self._create_mock_tool("inserted", "Inserted Tool") + self.tools.insert(1, new_tool) + + self.assertEqual(len(self.tools), 4) + self.assertEqual(self.tools[1], new_tool) + self.assertEqual(self.tools["inserted"], new_tool) + self.assertIn("inserted", self.tools._name_cache) + + def test_remove(self): + self.tools.remove(self.calculator_tool) + + self.assertEqual(len(self.tools), 2) + with self.assertRaises(KeyError): + _ = self.tools["calculator"] + self.assertNotIn("calculator", self.tools._name_cache) + + def test_remove_nonexistent_tool(self): + nonexistent_tool = self._create_mock_tool("nonexistent", "Nonexistent Tool") + + with self.assertRaises(ValueError): + self.tools.remove(nonexistent_tool) + + def test_pop(self): + popped = self.tools.pop(1) + + self.assertEqual(popped, self.calculator_tool) + self.assertEqual(len(self.tools), 2) + with self.assertRaises(KeyError): + _ = self.tools["calculator"] + self.assertNotIn("calculator", self.tools._name_cache) + + def test_pop_last(self): + popped = self.tools.pop() + + self.assertEqual(popped, self.translator_tool) + self.assertEqual(len(self.tools), 2) + with self.assertRaises(KeyError): + _ = self.tools["translator"] + self.assertNotIn("translator", self.tools._name_cache) + + def test_clear(self): + self.tools.clear() + + self.assertEqual(len(self.tools), 0) + self.assertEqual(self.tools._name_cache, {}) + with self.assertRaises(KeyError): + _ = self.tools["search"] + + def test_iteration(self): + tools_list = list(self.tools) + self.assertEqual(tools_list, [self.search_tool, self.calculator_tool, self.translator_tool]) + + def test_contains(self): + self.assertIn(self.search_tool, self.tools) + self.assertIn(self.calculator_tool, self.tools) + self.assertIn(self.translator_tool, self.tools) + + nonexistent_tool = self._create_mock_tool("nonexistent", "Nonexistent Tool") + self.assertNotIn(nonexistent_tool, self.tools) + + def test_slicing(self): + slice_result = self.tools[1:3] + self.assertEqual(len(slice_result), 2) + self.assertEqual(slice_result[0], self.calculator_tool) + self.assertEqual(slice_result[1], self.translator_tool) + + self.assertIsInstance(slice_result, list) + self.assertNotIsInstance(slice_result, ToolCollection) + + def test_getitem_with_tool_name_as_int(self): + numeric_name_tool = self._create_mock_tool("123", "Numeric Name Tool") + self.tools.append(numeric_name_tool) + + self.assertEqual(self.tools["123"], numeric_name_tool) + + with self.assertRaises(IndexError): + _ = self.tools[123] \ No newline at end of file From c13b08de2eb63a69ba2d077189fa2ea98c5373f7 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 20 Jun 2025 12:09:45 -0300 Subject: [PATCH 344/391] fix: add support for case-insensitive Enterprise filter (#340) --- .../tools/crewai_enterprise_tools/crewai_enterprise_tools.py | 2 +- tests/tools/crewai_enterprise_tools_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 8e7275e69..871cf7c94 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -53,5 +53,5 @@ def CrewaiEnterpriseTools( return ToolCollection(all_tools) # Filter tools based on the provided list - filtered_tools = [tool for tool in all_tools if tool.name in actions_list] + filtered_tools = [tool for tool in all_tools if tool.name.lower() in [action.lower() for action in actions_list]] return ToolCollection(filtered_tools) diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py index 384093e0f..7a649028d 100644 --- a/tests/tools/crewai_enterprise_tools_test.py +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -46,7 +46,7 @@ class TestCrewaiEnterpriseTools(unittest.TestCase): @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) def test_filters_tools_by_actions_list(self): - tools = CrewaiEnterpriseTools(actions_list=["tool1", "tool3"]) + tools = CrewaiEnterpriseTools(actions_list=["ToOl1", "tool3"]) self.assertEqual(len(tools), 2) self.assertEqual(tools[0].name, "tool1") self.assertEqual(tools[1].name, "tool3") From 78a062a9072ddaf4736fb4c0c6171cbe05142d24 Mon Sep 17 00:00:00 2001 From: Rostyslav Borovyk Date: Tue, 24 Jun 2025 16:56:47 +0300 Subject: [PATCH 345/391] Add Oxylabs Web Scraping tools (#312) * Add Oxylabs tools * Review updates * Add package_dependencies attribute --- src/crewai_tools/__init__.py | 4 + src/crewai_tools/tools/__init__.py | 12 ++ .../README.md | 55 ++++++ .../oxylabs_amazon_product_scraper_tool.py | 151 ++++++++++++++++ .../README.md | 54 ++++++ .../oxylabs_amazon_search_scraper_tool.py | 153 ++++++++++++++++ .../README.md | 50 ++++++ .../oxylabs_google_search_scraper_tool.py | 156 +++++++++++++++++ .../oxylabs_universal_scraper_tool/README.md | 69 ++++++++ .../oxylabs_universal_scraper_tool.py | 146 ++++++++++++++++ tests/tools/test_oxylabs_tools.py | 163 ++++++++++++++++++ 11 files changed, 1013 insertions(+) create mode 100644 src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/README.md create mode 100644 src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py create mode 100644 src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/README.md create mode 100644 src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py create mode 100644 src/crewai_tools/tools/oxylabs_google_search_scraper_tool/README.md create mode 100644 src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py create mode 100644 src/crewai_tools/tools/oxylabs_universal_scraper_tool/README.md create mode 100644 src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py create mode 100644 tests/tools/test_oxylabs_tools.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index f49e4149b..36624f355 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -37,6 +37,10 @@ from .tools import ( MultiOnTool, MySQLSearchTool, NL2SQLTool, + OxylabsUniversalScraperTool, + OxylabsGoogleSearchScraperTool, + OxylabsAmazonProductScraperTool, + OxylabsAmazonSearchScraperTool, PatronusEvalTool, PatronusLocalEvaluatorTool, PatronusPredefinedCriteriaEvalTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index c10f152ef..957d2f1e2 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -32,6 +32,18 @@ from .mdx_search_tool.mdx_search_tool import MDXSearchTool from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool +from .oxylabs_universal_scraper_tool.oxylabs_universal_scraper_tool import ( + OxylabsUniversalScraperTool, +) +from .oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import ( + OxylabsGoogleSearchScraperTool, +) +from .oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import ( + OxylabsAmazonProductScraperTool, +) +from .oxylabs_amazon_search_scraper_tool.oxylabs_amazon_search_scraper_tool import ( + OxylabsAmazonSearchScraperTool, +) from .patronus_eval_tool import ( PatronusEvalTool, PatronusLocalEvaluatorTool, diff --git a/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/README.md b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/README.md new file mode 100644 index 000000000..f87c70c19 --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/README.md @@ -0,0 +1,55 @@ +# OxylabsAmazonProductScraperTool + +Scrape any website with `OxylabsAmazonProductScraperTool` + +## Installation + +``` +pip install 'crewai[tools]' oxylabs +``` + +## Example + +```python +from crewai_tools import OxylabsAmazonProductScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsAmazonProductScraperTool() + +result = tool.run(query="AAAAABBBBCC") + +print(result) +``` + +## Arguments + +- `username`: Oxylabs username. +- `password`: Oxylabs password. + +Get the credentials by creating an Oxylabs Account [here](https://oxylabs.io). + +## Advanced example + +Check out the Oxylabs [documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/product) to get the full list of parameters. + +```python +from crewai_tools import OxylabsAmazonProductScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsAmazonProductScraperTool( + config={ + "domain": "com", + "parse": True, + "context": [ + { + "key": "autoselect_variant", + "value": True + } + ] + } +) + +result = tool.run(query="AAAAABBBBCC") + +print(result) +``` diff --git a/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py new file mode 100644 index 000000000..d763fa86f --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py @@ -0,0 +1,151 @@ +import json +import os +from importlib.metadata import version +from platform import architecture, python_version +from typing import Any, List, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field + +try: + from oxylabs import RealtimeClient + from oxylabs.sources.response import Response as OxylabsResponse + + OXYLABS_AVAILABLE = True +except ImportError: + RealtimeClient = Any + OxylabsResponse = Any + + OXYLABS_AVAILABLE = False + + +__all__ = ["OxylabsAmazonProductScraperTool", "OxylabsAmazonProductScraperConfig"] + + +class OxylabsAmazonProductScraperArgs(BaseModel): + query: str = Field(description="Amazon product ASIN") + + +class OxylabsAmazonProductScraperConfig(BaseModel): + """ + Amazon Product Scraper configuration options: + https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/product + """ + + domain: str | None = Field( + None, description="The domain to limit the search results to." + ) + geo_location: str | None = Field(None, description="The Deliver to location.") + user_agent_type: str | None = Field(None, description="Device type and browser.") + render: str | None = Field(None, description="Enables JavaScript rendering.") + callback_url: str | None = Field(None, description="URL to your callback endpoint.") + context: list | None = Field( + None, + description="Additional advanced settings and controls for specialized requirements.", + ) + parse: bool | None = Field(None, description="True will return structured data.") + parsing_instructions: dict | None = Field( + None, description="Instructions for parsing the results." + ) + + +class OxylabsAmazonProductScraperTool(BaseTool): + """ + Scrape Amazon product pages with OxylabsAmazonProductScraperTool. + + Get Oxylabs account: + https://dashboard.oxylabs.io/en + + Args: + username (str): Oxylabs username. + password (str): Oxylabs password. + config: Configuration options. See ``OxylabsAmazonProductScraperConfig`` + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + ) + name: str = "Oxylabs Amazon Product Scraper tool" + description: str = "Scrape Amazon product pages with Oxylabs Amazon Product Scraper" + args_schema: Type[BaseModel] = OxylabsAmazonProductScraperArgs + + oxylabs_api: RealtimeClient + config: OxylabsAmazonProductScraperConfig + package_dependencies: List[str] = ["oxylabs"] + + def __init__( + self, + username: str | None = None, + password: str | None = None, + config: OxylabsAmazonProductScraperConfig + | dict = OxylabsAmazonProductScraperConfig(), + **kwargs, + ) -> None: + bits, _ = architecture() + sdk_type = ( + f"oxylabs-crewai-sdk-python/" + f"{version('crewai')} " + f"({python_version()}; {bits})" + ) + + if username is None or password is None: + username, password = self._get_credentials_from_env() + + if OXYLABS_AVAILABLE: + # import RealtimeClient to make it accessible for the current scope + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + else: + import click + + if click.confirm( + "You are missing the 'oxylabs' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "oxylabs"], check=True) + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + except subprocess.CalledProcessError: + raise ImportError("Failed to install oxylabs package") + else: + raise ImportError( + "`oxylabs` package not found, please run `uv add oxylabs`" + ) + + super().__init__(config=config, **kwargs) + + def _get_credentials_from_env(self) -> tuple[str, str]: + username = os.environ.get("OXYLABS_USERNAME") + password = os.environ.get("OXYLABS_PASSWORD") + if not username or not password: + raise ValueError( + "You must pass oxylabs username and password when instantiating the tool " + "or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables" + ) + return username, password + + def _run(self, query: str) -> str: + response = self.oxylabs_api.amazon.scrape_product( + query, + **self.config.model_dump(exclude_none=True), + ) + + content = response.results[0].content + + if isinstance(content, dict): + return json.dumps(content) + + return content diff --git a/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/README.md b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/README.md new file mode 100644 index 000000000..b0e2ef7b0 --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/README.md @@ -0,0 +1,54 @@ +# OxylabsAmazonSearchScraperTool + +Scrape any website with `OxylabsAmazonSearchScraperTool` + +## Installation + +``` +pip install 'crewai[tools]' oxylabs +``` + +## Example + +```python +from crewai_tools import OxylabsAmazonSearchScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsAmazonSearchScraperTool() + +result = tool.run(query="headsets") + +print(result) +``` + +## Arguments + +- `username`: Oxylabs username. +- `password`: Oxylabs password. + +Get the credentials by creating an Oxylabs Account [here](https://oxylabs.io). + +## Advanced example + +Check out the Oxylabs [documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/search) to get the full list of parameters. + +```python +from crewai_tools import OxylabsAmazonSearchScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsAmazonSearchScraperTool( + config={ + "domain": 'nl', + "start_page": 2, + "pages": 2, + "parse": True, + "context": [ + {'key': 'category_id', 'value': 16391693031} + ], + } +) + +result = tool.run(query='nirvana tshirt') + +print(result) +``` diff --git a/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py new file mode 100644 index 000000000..9a113e93a --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py @@ -0,0 +1,153 @@ +import json +import os +from importlib.metadata import version +from platform import architecture, python_version +from typing import Any, List, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field + +try: + from oxylabs import RealtimeClient + from oxylabs.sources.response import Response as OxylabsResponse + + OXYLABS_AVAILABLE = True +except ImportError: + RealtimeClient = Any + OxylabsResponse = Any + + OXYLABS_AVAILABLE = False + + +__all__ = ["OxylabsAmazonSearchScraperTool", "OxylabsAmazonSearchScraperConfig"] + + +class OxylabsAmazonSearchScraperArgs(BaseModel): + query: str = Field(description="Amazon search term") + + +class OxylabsAmazonSearchScraperConfig(BaseModel): + """ + Amazon Search Scraper configuration options: + https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/search + """ + + domain: str | None = Field( + None, description="The domain to limit the search results to." + ) + start_page: int | None = Field(None, description="The starting page number.") + pages: int | None = Field(None, description="The number of pages to scrape.") + geo_location: str | None = Field(None, description="The Deliver to location.") + user_agent_type: str | None = Field(None, description="Device type and browser.") + render: str | None = Field(None, description="Enables JavaScript rendering.") + callback_url: str | None = Field(None, description="URL to your callback endpoint.") + context: list | None = Field( + None, + description="Additional advanced settings and controls for specialized requirements.", + ) + parse: bool | None = Field(None, description="True will return structured data.") + parsing_instructions: dict | None = Field( + None, description="Instructions for parsing the results." + ) + + +class OxylabsAmazonSearchScraperTool(BaseTool): + """ + Scrape Amazon search results with OxylabsAmazonSearchScraperTool. + + Get Oxylabs account: + https://dashboard.oxylabs.io/en + + Args: + username (str): Oxylabs username. + password (str): Oxylabs password. + config: Configuration options. See ``OxylabsAmazonSearchScraperConfig`` + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + ) + name: str = "Oxylabs Amazon Search Scraper tool" + description: str = "Scrape Amazon search results with Oxylabs Amazon Search Scraper" + args_schema: Type[BaseModel] = OxylabsAmazonSearchScraperArgs + + oxylabs_api: RealtimeClient + config: OxylabsAmazonSearchScraperConfig + package_dependencies: List[str] = ["oxylabs"] + + def __init__( + self, + username: str | None = None, + password: str | None = None, + config: OxylabsAmazonSearchScraperConfig + | dict = OxylabsAmazonSearchScraperConfig(), + **kwargs, + ): + bits, _ = architecture() + sdk_type = ( + f"oxylabs-crewai-sdk-python/" + f"{version('crewai')} " + f"({python_version()}; {bits})" + ) + + if username is None or password is None: + username, password = self._get_credentials_from_env() + + if OXYLABS_AVAILABLE: + # import RealtimeClient to make it accessible for the current scope + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + else: + import click + + if click.confirm( + "You are missing the 'oxylabs' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "oxylabs"], check=True) + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + except subprocess.CalledProcessError: + raise ImportError("Failed to install oxylabs package") + else: + raise ImportError( + "`oxylabs` package not found, please run `uv add oxylabs`" + ) + + super().__init__(config=config, **kwargs) + + def _get_credentials_from_env(self) -> tuple[str, str]: + username = os.environ.get("OXYLABS_USERNAME") + password = os.environ.get("OXYLABS_PASSWORD") + if not username or not password: + raise ValueError( + "You must pass oxylabs username and password when instantiating the tool " + "or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables" + ) + return username, password + + def _run(self, query: str) -> str: + response = self.oxylabs_api.amazon.scrape_search( + query, + **self.config.model_dump(exclude_none=True), + ) + + content = response.results[0].content + + if isinstance(content, dict): + return json.dumps(content) + + return content diff --git a/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/README.md b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/README.md new file mode 100644 index 000000000..e9448d2db --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/README.md @@ -0,0 +1,50 @@ +# OxylabsGoogleSearchScraperTool + +Scrape any website with `OxylabsGoogleSearchScraperTool` + +## Installation + +``` +pip install 'crewai[tools]' oxylabs +``` + +## Example + +```python +from crewai_tools import OxylabsGoogleSearchScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsGoogleSearchScraperTool() + +result = tool.run(query="iPhone 16") + +print(result) +``` + +## Arguments + +- `username`: Oxylabs username. +- `password`: Oxylabs password. + +Get the credentials by creating an Oxylabs Account [here](https://oxylabs.io). + +## Advanced example + +Check out the Oxylabs [documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/google/search/search) to get the full list of parameters. + +```python +from crewai_tools import OxylabsGoogleSearchScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsGoogleSearchScraperTool( + config={ + "parse": True, + "geo_location": "Paris, France", + "user_agent_type": "tablet", + } +) + +result = tool.run(query="iPhone 16") + +print(result) +``` diff --git a/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py new file mode 100644 index 000000000..7de1aaa2d --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py @@ -0,0 +1,156 @@ +import json +import os +from importlib.metadata import version +from platform import architecture, python_version +from typing import Any, List, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field + +try: + from oxylabs import RealtimeClient + from oxylabs.sources.response import Response as OxylabsResponse + + OXYLABS_AVAILABLE = True +except ImportError: + RealtimeClient = Any + OxylabsResponse = Any + + OXYLABS_AVAILABLE = False + + +__all__ = ["OxylabsGoogleSearchScraperTool", "OxylabsGoogleSearchScraperConfig"] + + +class OxylabsGoogleSearchScraperArgs(BaseModel): + query: str = Field(description="Search query") + + +class OxylabsGoogleSearchScraperConfig(BaseModel): + """ + Google Search Scraper configuration options: + https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/google/search/search + """ + + domain: str | None = Field( + None, description="The domain to limit the search results to." + ) + start_page: int | None = Field(None, description="The starting page number.") + pages: int | None = Field(None, description="The number of pages to scrape.") + limit: int | None = Field( + None, description="Number of results to retrieve in each page." + ) + geo_location: str | None = Field(None, description="The Deliver to location.") + user_agent_type: str | None = Field(None, description="Device type and browser.") + render: str | None = Field(None, description="Enables JavaScript rendering.") + callback_url: str | None = Field(None, description="URL to your callback endpoint.") + context: list | None = Field( + None, + description="Additional advanced settings and controls for specialized requirements.", + ) + parse: bool | None = Field(None, description="True will return structured data.") + parsing_instructions: dict | None = Field( + None, description="Instructions for parsing the results." + ) + + +class OxylabsGoogleSearchScraperTool(BaseTool): + """ + Scrape Google Search results with OxylabsGoogleSearchScraperTool. + + Get Oxylabs account: + https://dashboard.oxylabs.io/en + + Args: + username (str): Oxylabs username. + password (str): Oxylabs password. + config: Configuration options. See ``OxylabsGoogleSearchScraperConfig`` + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + ) + name: str = "Oxylabs Google Search Scraper tool" + description: str = "Scrape Google Search results with Oxylabs Google Search Scraper" + args_schema: Type[BaseModel] = OxylabsGoogleSearchScraperArgs + + oxylabs_api: RealtimeClient + config: OxylabsGoogleSearchScraperConfig + package_dependencies: List[str] = ["oxylabs"] + + def __init__( + self, + username: str | None = None, + password: str | None = None, + config: OxylabsGoogleSearchScraperConfig + | dict = OxylabsGoogleSearchScraperConfig(), + **kwargs, + ): + bits, _ = architecture() + sdk_type = ( + f"oxylabs-crewai-sdk-python/" + f"{version('crewai')} " + f"({python_version()}; {bits})" + ) + + if username is None or password is None: + username, password = self._get_credentials_from_env() + + if OXYLABS_AVAILABLE: + # import RealtimeClient to make it accessible for the current scope + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + else: + import click + + if click.confirm( + "You are missing the 'oxylabs' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "oxylabs"], check=True) + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + except subprocess.CalledProcessError: + raise ImportError("Failed to install oxylabs package") + else: + raise ImportError( + "`oxylabs` package not found, please run `uv add oxylabs`" + ) + + super().__init__(config=config, **kwargs) + + def _get_credentials_from_env(self) -> tuple[str, str]: + username = os.environ.get("OXYLABS_USERNAME") + password = os.environ.get("OXYLABS_PASSWORD") + if not username or not password: + raise ValueError( + "You must pass oxylabs username and password when instantiating the tool " + "or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables" + ) + return username, password + + def _run(self, query: str, **kwargs) -> str: + response = self.oxylabs_api.google.scrape_search( + query, + **self.config.model_dump(exclude_none=True), + ) + + content = response.results[0].content + + if isinstance(content, dict): + return json.dumps(content) + + return content diff --git a/src/crewai_tools/tools/oxylabs_universal_scraper_tool/README.md b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/README.md new file mode 100644 index 000000000..82f345a65 --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/README.md @@ -0,0 +1,69 @@ +# OxylabsUniversalScraperTool + +Scrape any website with `OxylabsUniversalScraperTool` + +## Installation + +``` +pip install 'crewai[tools]' oxylabs +``` + +## Example + +```python +from crewai_tools import OxylabsUniversalScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsUniversalScraperTool() + +result = tool.run(url="https://ip.oxylabs.io") + +print(result) +``` + +## Arguments + +- `username`: Oxylabs username. +- `password`: Oxylabs password. + +Get the credentials by creating an Oxylabs Account [here](https://oxylabs.io). + +## Advanced example + +Check out the Oxylabs [documentation](https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites) to get the full list of parameters. + +```python +from crewai_tools import OxylabsUniversalScraperTool + +# make sure OXYLABS_USERNAME and OXYLABS_PASSWORD variables are set +tool = OxylabsUniversalScraperTool( + config={ + "render": "html", + "user_agent_type": "mobile", + "context": [ + {"key": "force_headers", "value": True}, + {"key": "force_cookies", "value": True}, + { + "key": "headers", + "value": { + "Custom-Header-Name": "custom header content", + }, + }, + { + "key": "cookies", + "value": [ + {"key": "NID", "value": "1234567890"}, + {"key": "1P JAR", "value": "0987654321"}, + ], + }, + {"key": "http_method", "value": "get"}, + {"key": "follow_redirects", "value": True}, + {"key": "successful_status_codes", "value": [808, 909]}, + ], + } +) + +result = tool.run(url="https://ip.oxylabs.io") + +print(result) +``` diff --git a/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py new file mode 100644 index 000000000..22d02f91f --- /dev/null +++ b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py @@ -0,0 +1,146 @@ +import json +import os +from importlib.metadata import version +from platform import architecture, python_version +from typing import Any, List, Type + +from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field + +try: + from oxylabs import RealtimeClient + from oxylabs.sources.response import Response as OxylabsResponse + + OXYLABS_AVAILABLE = True +except ImportError: + RealtimeClient = Any + OxylabsResponse = Any + + OXYLABS_AVAILABLE = False + +__all__ = ["OxylabsUniversalScraperTool", "OxylabsUniversalScraperConfig"] + + +class OxylabsUniversalScraperArgs(BaseModel): + url: str = Field(description="Website URL") + + +class OxylabsUniversalScraperConfig(BaseModel): + """ + Universal Scraper configuration options: + https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites + """ + + geo_location: str | None = Field(None, description="The Deliver to location.") + user_agent_type: str | None = Field(None, description="Device type and browser.") + render: str | None = Field(None, description="Enables JavaScript rendering.") + callback_url: str | None = Field(None, description="URL to your callback endpoint.") + context: list | None = Field( + None, + description="Additional advanced settings and controls for specialized requirements.", + ) + parse: bool | None = Field(None, description="True will return structured data.") + parsing_instructions: dict | None = Field( + None, description="Instructions for parsing the results." + ) + + +class OxylabsUniversalScraperTool(BaseTool): + """ + Scrape any website with OxylabsUniversalScraperTool. + + Get Oxylabs account: + https://dashboard.oxylabs.io/en + + Args: + username (str): Oxylabs username. + password (str): Oxylabs password. + config: Configuration options. See ``OxylabsUniversalScraperConfig`` + """ + + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + ) + name: str = "Oxylabs Universal Scraper tool" + description: str = "Scrape any url with Oxylabs Universal Scraper" + args_schema: Type[BaseModel] = OxylabsUniversalScraperArgs + + oxylabs_api: RealtimeClient + config: OxylabsUniversalScraperConfig + package_dependencies: List[str] = ["oxylabs"] + + def __init__( + self, + username: str | None = None, + password: str | None = None, + config: OxylabsUniversalScraperConfig | dict = OxylabsUniversalScraperConfig(), + **kwargs, + ): + bits, _ = architecture() + sdk_type = ( + f"oxylabs-crewai-sdk-python/" + f"{version('crewai')} " + f"({python_version()}; {bits})" + ) + + if username is None or password is None: + username, password = self._get_credentials_from_env() + + if OXYLABS_AVAILABLE: + # import RealtimeClient to make it accessible for the current scope + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + else: + import click + + if click.confirm( + "You are missing the 'oxylabs' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run(["uv", "add", "oxylabs"], check=True) + from oxylabs import RealtimeClient + + kwargs["oxylabs_api"] = RealtimeClient( + username=username, + password=password, + sdk_type=sdk_type, + ) + except subprocess.CalledProcessError: + raise ImportError("Failed to install oxylabs package") + else: + raise ImportError( + "`oxylabs` package not found, please run `uv add oxylabs`" + ) + + super().__init__(config=config, **kwargs) + + def _get_credentials_from_env(self) -> tuple[str, str]: + username = os.environ.get("OXYLABS_USERNAME") + password = os.environ.get("OXYLABS_PASSWORD") + if not username or not password: + raise ValueError( + "You must pass oxylabs username and password when instantiating the tool " + "or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables" + ) + return username, password + + def _run(self, url: str) -> str: + response = self.oxylabs_api.universal.scrape_url( + url, + **self.config.model_dump(exclude_none=True), + ) + + content = response.results[0].content + + if isinstance(content, dict): + return json.dumps(content) + + return content diff --git a/tests/tools/test_oxylabs_tools.py b/tests/tools/test_oxylabs_tools.py new file mode 100644 index 000000000..3fd3feca3 --- /dev/null +++ b/tests/tools/test_oxylabs_tools.py @@ -0,0 +1,163 @@ +import json +import os +from typing import Type +from unittest.mock import MagicMock + +import pytest +from crewai.tools.base_tool import BaseTool +from oxylabs import RealtimeClient +from oxylabs.sources.response import Response as OxylabsResponse +from pydantic import BaseModel + +from crewai_tools import ( + OxylabsAmazonProductScraperTool, + OxylabsAmazonSearchScraperTool, + OxylabsGoogleSearchScraperTool, + OxylabsUniversalScraperTool, +) +from crewai_tools.tools.oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import ( + OxylabsAmazonProductScraperConfig, +) +from crewai_tools.tools.oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import ( + OxylabsGoogleSearchScraperConfig, +) + + +@pytest.fixture +def oxylabs_api() -> RealtimeClient: + oxylabs_api_mock = MagicMock() + + html_content = """ + + + + + Scraping Sandbox + + +
+
+
+

Amazing product

+

Price $14.99

+
+
+

Good product

+

Price $9.99

+
+
+
+ + + """ + + json_content = { + "results": { + "products": [ + {"title": "Amazing product", "price": 14.99, "currency": "USD"}, + {"title": "Good product", "price": 9.99, "currency": "USD"}, + ], + }, + } + + html_response = OxylabsResponse({"results": [{"content": html_content}]}) + json_response = OxylabsResponse({"results": [{"content": json_content}]}) + + oxylabs_api_mock.universal.scrape_url.side_effect = [json_response, html_response] + oxylabs_api_mock.amazon.scrape_search.side_effect = [json_response, html_response] + oxylabs_api_mock.amazon.scrape_product.side_effect = [json_response, html_response] + oxylabs_api_mock.google.scrape_search.side_effect = [json_response, html_response] + + return oxylabs_api_mock + + +@pytest.mark.parametrize( + ("tool_class",), + [ + (OxylabsUniversalScraperTool,), + (OxylabsAmazonSearchScraperTool,), + (OxylabsGoogleSearchScraperTool,), + (OxylabsAmazonProductScraperTool,), + ], +) +def test_tool_initialization(tool_class: Type[BaseTool]): + tool = tool_class(username="username", password="password") + assert isinstance(tool, tool_class) + + +@pytest.mark.parametrize( + ("tool_class",), + [ + (OxylabsUniversalScraperTool,), + (OxylabsAmazonSearchScraperTool,), + (OxylabsGoogleSearchScraperTool,), + (OxylabsAmazonProductScraperTool,), + ], +) +def test_tool_initialization_with_env_vars(tool_class: Type[BaseTool]): + os.environ["OXYLABS_USERNAME"] = "username" + os.environ["OXYLABS_PASSWORD"] = "password" + + tool = tool_class() + assert isinstance(tool, tool_class) + + del os.environ["OXYLABS_USERNAME"] + del os.environ["OXYLABS_PASSWORD"] + + +@pytest.mark.parametrize( + ("tool_class",), + [ + (OxylabsUniversalScraperTool,), + (OxylabsAmazonSearchScraperTool,), + (OxylabsGoogleSearchScraperTool,), + (OxylabsAmazonProductScraperTool,), + ], +) +def test_tool_initialization_failure(tool_class: Type[BaseTool]): + # making sure env vars are not set + for key in ["OXYLABS_USERNAME", "OXYLABS_PASSWORD"]: + if key in os.environ: + del os.environ[key] + + with pytest.raises(ValueError): + tool_class() + + +@pytest.mark.parametrize( + ("tool_class", "tool_config"), + [ + (OxylabsUniversalScraperTool, {"geo_location": "Paris, France"}), + ( + OxylabsAmazonSearchScraperTool, + {"domain": "co.uk"}, + ), + ( + OxylabsGoogleSearchScraperTool, + OxylabsGoogleSearchScraperConfig(render="html"), + ), + ( + OxylabsAmazonProductScraperTool, + OxylabsAmazonProductScraperConfig(parse=True), + ), + ], +) +def test_tool_invocation( + tool_class: Type[BaseTool], + tool_config: BaseModel, + oxylabs_api: RealtimeClient, +): + tool = tool_class(username="username", password="password", config=tool_config) + + # setting via __dict__ to bypass pydantic validation + tool.__dict__["oxylabs_api"] = oxylabs_api + + # verifying parsed job returns json content + result = tool.run("Scraping Query 1") + assert isinstance(result, str) + assert isinstance(json.loads(result), dict) + + # verifying raw job returns str + result = tool.run("Scraping Query 2") + assert isinstance(result, str) + assert "" in result From e8825d071a6520b2f047bb747eb6df3fda6a23cb Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 24 Jun 2025 15:47:39 -0300 Subject: [PATCH 346/391] feat: support api_key fallback to EXA_API_KEY env-var (#341) --- .../tools/exa_tools/exa_search_tool.py | 12 +++++-- tests/tools/exa_search_tool_test.py | 32 +++++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 tests/tools/exa_search_tool_test.py diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index d626c03ed..b3d97d7af 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,6 +1,7 @@ from typing import Any, Optional, Type, List from pydantic import BaseModel, Field -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar +import os try: from exa_py import Exa @@ -36,10 +37,15 @@ class EXASearchTool(BaseTool): summary: Optional[bool] = False type: Optional[str] = "auto" package_dependencies: List[str] = ["exa_py"] + api_key: Optional[str] = Field( + default_factory=lambda: os.getenv("EXA_API_KEY"), description="API key for Exa services", required=False + ) + env_vars: List[EnvVar] = [ + EnvVar(name="EXA_API_KEY", description="API key for Exa services", required=False), + ] def __init__( self, - api_key: str, content: Optional[bool] = False, summary: Optional[bool] = False, type: Optional[str] = "auto", @@ -62,7 +68,7 @@ class EXASearchTool(BaseTool): raise ImportError( "You are missing the 'exa_py' package. Would you like to install it?" ) - self.client = Exa(api_key=api_key) + self.client = Exa(api_key=self.api_key) self.content = content self.summary = summary self.type = type diff --git a/tests/tools/exa_search_tool_test.py b/tests/tools/exa_search_tool_test.py new file mode 100644 index 000000000..17c92e2f4 --- /dev/null +++ b/tests/tools/exa_search_tool_test.py @@ -0,0 +1,32 @@ +import os +from unittest.mock import patch +from crewai_tools import EXASearchTool + +import pytest + +@pytest.fixture +def exa_search_tool(): + return EXASearchTool(api_key="test_api_key") + + +@pytest.fixture(autouse=True) +def mock_exa_api_key(): + with patch.dict(os.environ, {"EXA_API_KEY": "test_key_from_env"}): + yield + +def test_exa_search_tool_initialization(): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + api_key = "test_api_key" + tool = EXASearchTool(api_key=api_key) + + assert tool.api_key == api_key + assert tool.content is False + assert tool.summary is False + assert tool.type == "auto" + mock_exa_class.assert_called_once_with(api_key=api_key) + + +def test_exa_search_tool_initialization_with_env(mock_exa_api_key): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + EXASearchTool() + mock_exa_class.assert_called_once_with(api_key="test_key_from_env") From 03917411b4df05fe2462deb78723ff90b97cb362 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Wed, 25 Jun 2025 13:32:22 -0300 Subject: [PATCH 347/391] Support to filter available MCP Tools (#345) * feat: support to complex filter on ToolCollection * refactor: use proper tool collection methot to filter tool in CrewAiEnterpriseTools * feat: allow to filter available MCP tools --- src/crewai_tools/adapters/mcp_adapter.py | 20 ++++- src/crewai_tools/adapters/tool_collection.py | 35 +++++--- .../crewai_enterprise_tools.py | 6 +- tests/adapters/mcp_adapter_test.py | 90 ++++++++++++++++++- tests/tools/tool_collection_test.py | 65 +++++++++++++- 5 files changed, 193 insertions(+), 23 deletions(-) diff --git a/src/crewai_tools/adapters/mcp_adapter.py b/src/crewai_tools/adapters/mcp_adapter.py index bfff480eb..db4c15a24 100644 --- a/src/crewai_tools/adapters/mcp_adapter.py +++ b/src/crewai_tools/adapters/mcp_adapter.py @@ -46,10 +46,18 @@ class MCPServerAdapter: with MCPServerAdapter({"url": "http://localhost:8000/sse"}) as tools: # tools is now available + # context manager with filtered tools + with MCPServerAdapter(..., "tool1", "tool2") as filtered_tools: + # only tool1 and tool2 are available + # manually stop mcp server try: mcp_server = MCPServerAdapter(...) - tools = mcp_server.tools + tools = mcp_server.tools # all tools + + # or with filtered tools + mcp_server = MCPServerAdapter(..., "tool1", "tool2") + filtered_tools = mcp_server.tools # only tool1 and tool2 ... finally: mcp_server.stop() @@ -61,18 +69,22 @@ class MCPServerAdapter: def __init__( self, serverparams: StdioServerParameters | dict[str, Any], + *tool_names: str, ): """Initialize the MCP Server Args: serverparams: The parameters for the MCP server it supports either a `StdioServerParameters` or a `dict` respectively for STDIO and SSE. + *tool_names: Optional names of tools to filter. If provided, only tools with + matching names will be available. """ super().__init__() self._adapter = None self._tools = None + self._tool_names = list(tool_names) if tool_names else None if not MCP_AVAILABLE: import click @@ -127,7 +139,11 @@ class MCPServerAdapter: raise ValueError( "MCP server not started, run `mcp_server.start()` first before accessing `tools`" ) - return ToolCollection(self._tools) + + tools_collection = ToolCollection(self._tools) + if self._tool_names: + return tools_collection.filter_by_names(self._tool_names) + return tools_collection def __enter__(self): """ diff --git a/src/crewai_tools/adapters/tool_collection.py b/src/crewai_tools/adapters/tool_collection.py index f0ec9a288..291fa8f82 100644 --- a/src/crewai_tools/adapters/tool_collection.py +++ b/src/crewai_tools/adapters/tool_collection.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union, TypeVar, Generic, Dict +from typing import List, Optional, Union, TypeVar, Generic, Dict, Callable from crewai.tools import BaseTool T = TypeVar('T', bound=BaseTool) @@ -24,16 +24,16 @@ class ToolCollection(list, Generic[T]): self._build_name_cache() def _build_name_cache(self) -> None: - self._name_cache = {tool.name: tool for tool in self} + self._name_cache = {tool.name.lower(): tool for tool in self} def __getitem__(self, key: Union[int, str]) -> T: if isinstance(key, str): - return self._name_cache[key] + return self._name_cache[key.lower()] return super().__getitem__(key) def append(self, tool: T) -> None: super().append(tool) - self._name_cache[tool.name] = tool + self._name_cache[tool.name.lower()] = tool def extend(self, tools: List[T]) -> None: super().extend(tools) @@ -41,19 +41,34 @@ class ToolCollection(list, Generic[T]): def insert(self, index: int, tool: T) -> None: super().insert(index, tool) - self._name_cache[tool.name] = tool + self._name_cache[tool.name.lower()] = tool def remove(self, tool: T) -> None: super().remove(tool) - if tool.name in self._name_cache: - del self._name_cache[tool.name] + if tool.name.lower() in self._name_cache: + del self._name_cache[tool.name.lower()] def pop(self, index: int = -1) -> T: tool = super().pop(index) - if tool.name in self._name_cache: - del self._name_cache[tool.name] + if tool.name.lower() in self._name_cache: + del self._name_cache[tool.name.lower()] return tool + def filter_by_names(self, names: Optional[List[str]] = None) -> "ToolCollection[T]": + if names is None: + return self + + return ToolCollection( + [ + tool + for name in names + if (tool := self._name_cache.get(name.lower())) is not None + ] + ) + + def filter_where(self, func: Callable[[T], bool]) -> "ToolCollection[T]": + return ToolCollection([tool for tool in self if func(tool)]) + def clear(self) -> None: super().clear() - self._name_cache.clear() + self._name_cache.clear() \ No newline at end of file diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 871cf7c94..e531afeed 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -49,9 +49,5 @@ def CrewaiEnterpriseTools( adapter = EnterpriseActionKitToolAdapter(**adapter_kwargs) all_tools = adapter.tools() - if actions_list is None: - return ToolCollection(all_tools) - # Filter tools based on the provided list - filtered_tools = [tool for tool in all_tools if tool.name.lower() in [action.lower() for action in actions_list]] - return ToolCollection(filtered_tools) + return ToolCollection(all_tools).filter_by_names(actions_list) diff --git a/tests/adapters/mcp_adapter_test.py b/tests/adapters/mcp_adapter_test.py index f2b08bc16..d0dc88680 100644 --- a/tests/adapters/mcp_adapter_test.py +++ b/tests/adapters/mcp_adapter_test.py @@ -19,6 +19,11 @@ def echo_server_script(): """Echo the input text""" return f"Echo: {text}" + @mcp.tool() + def calc_tool(a: int, b: int) -> int: + """Calculate a + b""" + return a + b + mcp.run() ''' ) @@ -37,6 +42,11 @@ def echo_server_sse_script(): """Echo the input text""" return f"Echo: {text}" + @mcp.tool() + def calc_tool(a: int, b: int) -> int: + """Calculate a + b""" + return a + b + mcp.run("sse") ''' ) @@ -69,16 +79,20 @@ def test_context_manager_syntax(echo_server_script): ) with MCPServerAdapter(serverparams) as tools: assert isinstance(tools, ToolCollection) - assert len(tools) == 1 + assert len(tools) == 2 assert tools[0].name == "echo_tool" + assert tools[1].name == "calc_tool" assert tools[0].run(text="hello") == "Echo: hello" + assert tools[1].run(a=5, b=3) == '8' def test_context_manager_syntax_sse(echo_sse_server): sse_serverparams = echo_sse_server with MCPServerAdapter(sse_serverparams) as tools: - assert len(tools) == 1 + assert len(tools) == 2 assert tools[0].name == "echo_tool" + assert tools[1].name == "calc_tool" assert tools[0].run(text="hello") == "Echo: hello" + assert tools[1].run(a=5, b=3) == '8' def test_try_finally_syntax(echo_server_script): serverparams = StdioServerParameters( @@ -87,9 +101,11 @@ def test_try_finally_syntax(echo_server_script): try: mcp_server_adapter = MCPServerAdapter(serverparams) tools = mcp_server_adapter.tools - assert len(tools) == 1 + assert len(tools) == 2 assert tools[0].name == "echo_tool" + assert tools[1].name == "calc_tool" assert tools[0].run(text="hello") == "Echo: hello" + assert tools[1].run(a=5, b=3) == '8' finally: mcp_server_adapter.stop() @@ -98,8 +114,76 @@ def test_try_finally_syntax_sse(echo_sse_server): mcp_server_adapter = MCPServerAdapter(sse_serverparams) try: tools = mcp_server_adapter.tools + assert len(tools) == 2 + assert tools[0].name == "echo_tool" + assert tools[1].name == "calc_tool" + assert tools[0].run(text="hello") == "Echo: hello" + assert tools[1].run(a=5, b=3) == '8' + finally: + mcp_server_adapter.stop() + +def test_context_manager_with_filtered_tools(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + # Only select the echo_tool + with MCPServerAdapter(serverparams, "echo_tool") as tools: + assert isinstance(tools, ToolCollection) assert len(tools) == 1 assert tools[0].name == "echo_tool" assert tools[0].run(text="hello") == "Echo: hello" + # Check that calc_tool is not present + with pytest.raises(IndexError): + _ = tools[1] + with pytest.raises(KeyError): + _ = tools["calc_tool"] + +def test_context_manager_sse_with_filtered_tools(echo_sse_server): + sse_serverparams = echo_sse_server + # Only select the calc_tool + with MCPServerAdapter(sse_serverparams, "calc_tool") as tools: + assert isinstance(tools, ToolCollection) + assert len(tools) == 1 + assert tools[0].name == "calc_tool" + assert tools[0].run(a=10, b=5) == '15' + # Check that echo_tool is not present + with pytest.raises(IndexError): + _ = tools[1] + with pytest.raises(KeyError): + _ = tools["echo_tool"] + +def test_try_finally_with_filtered_tools(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + try: + # Select both tools but in reverse order + mcp_server_adapter = MCPServerAdapter(serverparams, "calc_tool", "echo_tool") + tools = mcp_server_adapter.tools + assert len(tools) == 2 + # The order of tools is based on filter_by_names which preserves + # the original order from the collection + assert tools[0].name == "calc_tool" + assert tools[1].name == "echo_tool" finally: mcp_server_adapter.stop() + +def test_filter_with_nonexistent_tool(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + # Include a tool that doesn't exist + with MCPServerAdapter(serverparams, "echo_tool", "nonexistent_tool") as tools: + # Only echo_tool should be in the result + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + +def test_filter_with_only_nonexistent_tools(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + # All requested tools don't exist + with MCPServerAdapter(serverparams, "nonexistent1", "nonexistent2") as tools: + # Should return an empty tool collection + assert isinstance(tools, ToolCollection) + assert len(tools) == 0 diff --git a/tests/tools/tool_collection_test.py b/tests/tools/tool_collection_test.py index fb4f35c95..e409a4e76 100644 --- a/tests/tools/tool_collection_test.py +++ b/tests/tools/tool_collection_test.py @@ -8,7 +8,7 @@ from crewai_tools.adapters.tool_collection import ToolCollection class TestToolCollection(unittest.TestCase): def setUp(self): - self.search_tool = self._create_mock_tool("search", "Search Tool") + self.search_tool = self._create_mock_tool("SearcH", "Search Tool") # Tool name is case sensitive self.calculator_tool = self._create_mock_tool("calculator", "Calculator Tool") self.translator_tool = self._create_mock_tool("translator", "Translator Tool") @@ -26,7 +26,7 @@ class TestToolCollection(unittest.TestCase): def test_initialization(self): self.assertEqual(len(self.tools), 3) - self.assertEqual(self.tools[0].name, "search") + self.assertEqual(self.tools[0].name, "SearcH") self.assertEqual(self.tools[1].name, "calculator") self.assertEqual(self.tools[2].name, "translator") @@ -169,4 +169,63 @@ class TestToolCollection(unittest.TestCase): self.assertEqual(self.tools["123"], numeric_name_tool) with self.assertRaises(IndexError): - _ = self.tools[123] \ No newline at end of file + _ = self.tools[123] + + def test_filter_by_names(self): + + filtered = self.tools.filter_by_names(None) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 3) + + filtered = self.tools.filter_by_names(["search", "translator"]) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 2) + self.assertEqual(filtered[0], self.search_tool) + self.assertEqual(filtered[1], self.translator_tool) + self.assertEqual(filtered["search"], self.search_tool) + self.assertEqual(filtered["translator"], self.translator_tool) + + filtered = self.tools.filter_by_names(["search", "nonexistent"]) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 1) + self.assertEqual(filtered[0], self.search_tool) + + filtered = self.tools.filter_by_names(["nonexistent1", "nonexistent2"]) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 0) + + filtered = self.tools.filter_by_names([]) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 0) + + def test_filter_where(self): + filtered = self.tools.filter_where(lambda tool: tool.name.startswith("S")) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 1) + self.assertEqual(filtered[0], self.search_tool) + self.assertEqual(filtered["search"], self.search_tool) + + filtered = self.tools.filter_where(lambda tool: True) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 3) + self.assertEqual(filtered[0], self.search_tool) + self.assertEqual(filtered[1], self.calculator_tool) + self.assertEqual(filtered[2], self.translator_tool) + + filtered = self.tools.filter_where(lambda tool: False) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 0) + filtered = self.tools.filter_where(lambda tool: len(tool.name) > 8) + + self.assertIsInstance(filtered, ToolCollection) + self.assertEqual(len(filtered), 2) + self.assertEqual(filtered[0], self.calculator_tool) + self.assertEqual(filtered[1], self.translator_tool) From e4cb8bf797ff882843f8fb247d48744bfb102f32 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Wed, 25 Jun 2025 10:17:26 -0700 Subject: [PATCH 348/391] Lorenze/better env vars setup enterprise tools (#343) * refactor: remove token validation from EnterpriseActionKitToolAdapter and CrewaiEnterpriseTools This commit simplifies the initialization of the EnterpriseActionKitToolAdapter and CrewaiEnterpriseTools by removing the explicit validation for the enterprise action token. The token can now be set to None without raising an error, allowing for more flexible usage. * added loggers for monitoring * fixed typo * fix: enhance token handling in EnterpriseActionKitToolAdapter and CrewaiEnterpriseTools This commit improves the handling of the enterprise action token by allowing it to be fetched from environment variables if not provided. It adds checks to ensure the token is set before making API requests, enhancing robustness and flexibility. * removed redundancy * test: add new test for environment token fallback in CrewaiEnterpriseTools This update introduces a new test case to verify that the environment token is used when no token is provided during the initialization of CrewaiEnterpriseTools. Additionally, minor formatting adjustments were made to existing assertions for consistency. * test: update environment token test to clear environment variables This change modifies the test for CrewaiEnterpriseTools to ensure that the environment variables are cleared before setting the test token. This ensures a clean test environment and prevents potential interference from other tests. * drop redundancy --- src/crewai_tools/adapters/enterprise_adapter.py | 15 ++++++++++++--- .../crewai_enterprise_tools.py | 6 ++++-- tests/tools/crewai_enterprise_tools_test.py | 13 +++++++++---- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 34238602e..6799d7ea8 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -1,7 +1,8 @@ -import requests -from pydantic import Field, create_model -from typing import List, Any, Dict, Optional +import os import json +import requests +from typing import List, Any, Dict, Optional +from pydantic import Field, create_model from crewai.tools import BaseTool # DEFAULTS @@ -146,6 +147,14 @@ class EnterpriseActionKitToolAdapter: def _fetch_actions(self): """Fetch available actions from the API.""" try: + if ( + self.enterprise_action_token is None + or self.enterprise_action_token == "" + ): + self.enterprise_action_token = os.environ.get( + "CREWAI_ENTERPRISE_TOOLS_TOKEN" + ) + actions_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" headers = {"Authorization": f"Bearer {self.enterprise_action_token}"} params = {"format": "json_schema"} diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index e531afeed..302d34164 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -31,9 +31,11 @@ def CrewaiEnterpriseTools( Returns: A ToolCollection of BaseTool instances for enterprise actions """ - if enterprise_token is None: + + if enterprise_token is None or enterprise_token == "": enterprise_token = os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") - logger.warning("No enterprise token provided") + if not enterprise_token: + logger.warning("No enterprise token provided") adapter_kwargs = {"enterprise_action_token": enterprise_token} diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py index 7a649028d..c49d35afb 100644 --- a/tests/tools/crewai_enterprise_tools_test.py +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -15,7 +15,7 @@ class TestCrewaiEnterpriseTools(unittest.TestCase): self._create_mock_tool("tool3", "Tool 3 Description"), ] self.adapter_patcher = patch( - 'crewai_tools.tools.crewai_enterprise_tools.crewai_enterprise_tools.EnterpriseActionKitToolAdapter' + "crewai_tools.tools.crewai_enterprise_tools.crewai_enterprise_tools.EnterpriseActionKitToolAdapter" ) self.MockAdapter = self.adapter_patcher.start() @@ -55,16 +55,21 @@ class TestCrewaiEnterpriseTools(unittest.TestCase): CrewaiEnterpriseTools( enterprise_token="test-token", enterprise_action_kit_project_id="project-id", - enterprise_action_kit_project_url="project-url" + enterprise_action_kit_project_url="project-url", ) self.MockAdapter.assert_called_once_with( enterprise_action_token="test-token", enterprise_action_kit_project_id="project-id", - enterprise_action_kit_project_url="project-url" + enterprise_action_kit_project_url="project-url", ) @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) def test_uses_environment_token(self): CrewaiEnterpriseTools() - self.MockAdapter.assert_called_once_with(enterprise_action_token="env-token") \ No newline at end of file + self.MockAdapter.assert_called_once_with(enterprise_action_token="env-token") + + @patch.dict(os.environ, {"CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token"}) + def test_uses_environment_token_when_no_token_provided(self): + CrewaiEnterpriseTools(enterprise_token="") + self.MockAdapter.assert_called_once_with(enterprise_action_token="env-token") From 8723e66807f73bdad7115a26efddee5ff87955e6 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Wed, 25 Jun 2025 11:14:41 -0700 Subject: [PATCH 349/391] feat: add support for parsing actions list from environment variables (#346) * feat: add support for parsing actions list from environment variables This commit introduces a new function, _parse_actions_list, to handle the parsing of a string representation of a list of tool names from environment variables. The CrewaiEnterpriseTools now utilizes this function to filter tools based on the parsed actions list, enhancing flexibility in tool selection. Additionally, a new test case is added to verify the correct usage of the environment actions list. * test: simplify environment actions list test setup This commit refactors the test for CrewaiEnterpriseTools to streamline the setup of environment variables. The environment token and actions list are now set in a single patch.dict call, improving readability and reducing redundancy in the test code. --- .../crewai_enterprise_tools.py | 28 ++++++++++++++++++- tests/tools/crewai_enterprise_tools_test.py | 13 +++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 302d34164..0a56dee67 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -5,6 +5,7 @@ Crewai Enterprise Tools import os import typing as t import logging +import json from crewai.tools import BaseTool from crewai_tools.adapters.enterprise_adapter import EnterpriseActionKitToolAdapter from crewai_tools.adapters.tool_collection import ToolCollection @@ -50,6 +51,31 @@ def CrewaiEnterpriseTools( adapter = EnterpriseActionKitToolAdapter(**adapter_kwargs) all_tools = adapter.tools() + parsed_actions_list = _parse_actions_list(actions_list) # Filter tools based on the provided list - return ToolCollection(all_tools).filter_by_names(actions_list) + return ToolCollection(all_tools).filter_by_names(parsed_actions_list) + + +# ENTERPRISE INJECTION ONLY +def _parse_actions_list(actions_list: t.Optional[t.List[str]]) -> t.List[str] | None: + """Parse a string representation of a list of tool names to a list of tool names. + + Args: + actions_list: A string representation of a list of tool names. + + Returns: + A list of tool names. + """ + if actions_list is not None: + return actions_list + + actions_list_from_env = os.environ.get("CREWAI_ENTERPRISE_TOOLS_ACTIONS_LIST") + if actions_list_from_env is None: + return None + + try: + return json.loads(actions_list_from_env) + except json.JSONDecodeError: + logger.warning(f"Failed to parse actions_list as JSON: {actions_list_from_env}") + return None diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py index c49d35afb..d7a868472 100644 --- a/tests/tools/crewai_enterprise_tools_test.py +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -73,3 +73,16 @@ class TestCrewaiEnterpriseTools(unittest.TestCase): def test_uses_environment_token_when_no_token_provided(self): CrewaiEnterpriseTools(enterprise_token="") self.MockAdapter.assert_called_once_with(enterprise_action_token="env-token") + + @patch.dict( + os.environ, + { + "CREWAI_ENTERPRISE_TOOLS_TOKEN": "env-token", + "CREWAI_ENTERPRISE_TOOLS_ACTIONS_LIST": '["tool1", "tool3"]', + }, + ) + def test_uses_environment_actions_list(self): + tools = CrewaiEnterpriseTools() + self.assertEqual(len(tools), 2) + self.assertEqual(tools[0].name, "tool1") + self.assertEqual(tools[1].name, "tool3") From 180cc38330e4eaacfdb0bb168f5812e94eeda85e Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:27:48 -0700 Subject: [PATCH 350/391] refactor: update Firecrawl tools to improve configuration and error handling (#351) - Added TYPE_CHECKING imports for FirecrawlApp to enhance type safety. - Updated configuration keys in FirecrawlCrawlWebsiteTool and FirecrawlScrapeWebsiteTool to camelCase for consistency. - Introduced error handling in the _run methods of both tools to ensure FirecrawlApp is properly initialized before usage. - Adjusted parameters passed to crawl_url and scrape_url methods to use 'params' instead of unpacking the config dictionary directly. --- .../firecrawl_crawl_website_tool.py | 36 +++++++++++-------- .../firecrawl_scrape_website_tool.py | 32 +++++++++++------ .../firecrawl_search_tool.py | 2 +- 3 files changed, 44 insertions(+), 26 deletions(-) diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 6642fbd54..0d2ef325e 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,12 +1,17 @@ -from typing import Any, Optional, Type, List +from typing import Any, Optional, Type, List, TYPE_CHECKING from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + try: - from firecrawl import FirecrawlApp, ScrapeOptions + from firecrawl import FirecrawlApp + + FIRECRAWL_AVAILABLE = True except ImportError: - FirecrawlApp = Any + FIRECRAWL_AVAILABLE = False class FirecrawlCrawlWebsiteToolSchema(BaseModel): @@ -42,16 +47,16 @@ class FirecrawlCrawlWebsiteTool(BaseTool): api_key: Optional[str] = None config: Optional[dict[str, Any]] = Field( default_factory=lambda: { - "max_depth": 2, - "ignore_sitemap": True, - "limit": 100, - "allow_backward_links": False, - "allow_external_links": False, - "scrape_options": ScrapeOptions( - formats=["markdown", "screenshot", "links"], - only_main_content=True, - timeout=30000, - ), + "maxDepth": 2, + "ignoreSitemap": True, + "limit": 10, + "allowBackwardLinks": False, + "allowExternalLinks": False, + "scrapeOptions": { + "formats": ["markdown", "screenshot", "links"], + "onlyMainContent": True, + "timeout": 10000, + }, } ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) @@ -88,7 +93,10 @@ class FirecrawlCrawlWebsiteTool(BaseTool): ) def _run(self, url: str): - return self._firecrawl.crawl_url(url, **self.config) + if not self._firecrawl: + raise RuntimeError("FirecrawlApp not properly initialized") + + return self._firecrawl.crawl_url(url, poll_interval=2, params=self.config) try: diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index acb1c0af5..31742340d 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,16 +1,23 @@ -from typing import Any, Optional, Type, Dict, List +from typing import Any, Optional, Type, Dict, List, TYPE_CHECKING from crewai.tools import BaseTool from pydantic import BaseModel, ConfigDict, Field, PrivateAttr +if TYPE_CHECKING: + from firecrawl import FirecrawlApp + try: from firecrawl import FirecrawlApp + + FIRECRAWL_AVAILABLE = True except ImportError: - FirecrawlApp = Any + FIRECRAWL_AVAILABLE = False + class FirecrawlScrapeWebsiteToolSchema(BaseModel): url: str = Field(description="Website URL") + class FirecrawlScrapeWebsiteTool(BaseTool): """ Tool for scraping webpages using Firecrawl. To run this tool, you need to have a Firecrawl API key. @@ -21,11 +28,11 @@ class FirecrawlScrapeWebsiteTool(BaseTool): Default configuration options: formats (list[str]): Content formats to return. Default: ["markdown"] - only_main_content (bool): Only return main content. Default: True - include_tags (list[str]): Tags to include. Default: [] - exclude_tags (list[str]): Tags to exclude. Default: [] + onlyMainContent (bool): Only return main content. Default: True + includeTags (list[str]): Tags to include. Default: [] + excludeTags (list[str]): Tags to exclude. Default: [] headers (dict): Headers to include. Default: {} - wait_for (int): Time to wait for page to load in ms. Default: 0 + waitFor (int): Time to wait for page to load in ms. Default: 0 json_options (dict): Options for JSON extraction. Default: None """ @@ -39,11 +46,11 @@ class FirecrawlScrapeWebsiteTool(BaseTool): config: Dict[str, Any] = Field( default_factory=lambda: { "formats": ["markdown"], - "only_main_content": True, - "include_tags": [], - "exclude_tags": [], + "onlyMainContent": True, + "includeTags": [], + "excludeTags": [], "headers": {}, - "wait_for": 0, + "waitFor": 0, } ) @@ -74,7 +81,10 @@ class FirecrawlScrapeWebsiteTool(BaseTool): self._firecrawl = FirecrawlApp(api_key=api_key) def _run(self, url: str): - return self._firecrawl.scrape_url(url, **self.config) + if not self._firecrawl: + raise RuntimeError("FirecrawlApp not properly initialized") + + return self._firecrawl.scrape_url(url, params=self.config) try: diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 0fb091b68..1cad4819a 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -98,7 +98,7 @@ class FirecrawlSearchTool(BaseTool): return self._firecrawl.search( query=query, - **self.config, + params=self.config, ) From 26652e5e245737c8a626749865c5b18a7ee257dc Mon Sep 17 00:00:00 2001 From: Thiago Moretto <168731+thiagomoretto@users.noreply.github.com> Date: Tue, 1 Jul 2025 15:08:30 -0300 Subject: [PATCH 351/391] Mapping required env vars of more tools (#353) --- src/crewai_tools/tools/composio_tool/composio_tool.py | 5 ++++- src/crewai_tools/tools/dalle_tool/dalle_tool.py | 8 ++++++-- .../firecrawl_crawl_website_tool.py | 5 ++++- .../firecrawl_scrape_website_tool.py | 5 ++++- .../tools/firecrawl_search_tool/firecrawl_search_tool.py | 5 ++++- .../tools/serply_api_tool/serply_job_search_tool.py | 6 +++++- .../tools/serply_api_tool/serply_news_search_tool.py | 7 +++++-- .../tools/serply_api_tool/serply_scholar_search_tool.py | 7 +++++-- .../serply_api_tool/serply_webpage_to_markdown_tool.py | 6 +++++- src/crewai_tools/tools/vision_tool/vision_tool.py | 7 +++++-- 10 files changed, 47 insertions(+), 14 deletions(-) diff --git a/src/crewai_tools/tools/composio_tool/composio_tool.py b/src/crewai_tools/tools/composio_tool/composio_tool.py index 4823441bf..019b7895c 100644 --- a/src/crewai_tools/tools/composio_tool/composio_tool.py +++ b/src/crewai_tools/tools/composio_tool/composio_tool.py @@ -5,13 +5,16 @@ Composio tools wrapper. import typing as t import typing_extensions as te -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar class ComposioTool(BaseTool): """Wrapper for composio tools.""" composio_action: t.Callable + env_vars: t.List[EnvVar] = [ + EnvVar(name="COMPOSIO_API_KEY", description="API key for Composio services", required=True), + ] def _run(self, *args: t.Any, **kwargs: t.Any) -> t.Any: """Run the composio action with given arguments.""" diff --git a/src/crewai_tools/tools/dalle_tool/dalle_tool.py b/src/crewai_tools/tools/dalle_tool/dalle_tool.py index 8957d9636..6a4a9e84f 100644 --- a/src/crewai_tools/tools/dalle_tool/dalle_tool.py +++ b/src/crewai_tools/tools/dalle_tool/dalle_tool.py @@ -1,7 +1,7 @@ import json -from typing import Type +from typing import List, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from openai import OpenAI from pydantic import BaseModel, Field @@ -22,6 +22,10 @@ class DallETool(BaseTool): quality: str = "standard" n: int = 1 + env_vars: List[EnvVar] = [ + EnvVar(name="OPENAI_API_KEY", description="API key for OpenAI services", required=True), + ] + def _run(self, **kwargs) -> str: client = OpenAI() diff --git a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py index 0d2ef325e..9c99fe8d4 100644 --- a/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py @@ -1,6 +1,6 @@ from typing import Any, Optional, Type, List, TYPE_CHECKING -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, PrivateAttr if TYPE_CHECKING: @@ -61,6 +61,9 @@ class FirecrawlCrawlWebsiteTool(BaseTool): ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) package_dependencies: List[str] = ["firecrawl-py"] + env_vars: List[EnvVar] = [ + EnvVar(name="FIRECRAWL_API_KEY", description="API key for Firecrawl services", required=True), + ] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py index 31742340d..816e40159 100644 --- a/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py +++ b/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py @@ -1,6 +1,6 @@ from typing import Any, Optional, Type, Dict, List, TYPE_CHECKING -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, PrivateAttr if TYPE_CHECKING: @@ -56,6 +56,9 @@ class FirecrawlScrapeWebsiteTool(BaseTool): _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) package_dependencies: List[str] = ["firecrawl-py"] + env_vars: List[EnvVar] = [ + EnvVar(name="FIRECRAWL_API_KEY", description="API key for Firecrawl services", required=True), + ] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py index 1cad4819a..ba4d4c242 100644 --- a/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py +++ b/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Type, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field, PrivateAttr if TYPE_CHECKING: @@ -58,6 +58,9 @@ class FirecrawlSearchTool(BaseTool): ) _firecrawl: Optional["FirecrawlApp"] = PrivateAttr(None) package_dependencies: List[str] = ["firecrawl-py"] + env_vars: List[EnvVar] = [ + EnvVar(name="FIRECRAWL_API_KEY", description="API key for Firecrawl services", required=True), + ] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py index 1c0c665b5..9d99fa01b 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py @@ -1,10 +1,11 @@ import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type from urllib.parse import urlencode import requests from pydantic import BaseModel, Field +from crewai.tools import EnvVar from crewai_tools.tools.rag.rag_tool import RagTool @@ -30,6 +31,9 @@ class SerplyJobSearchTool(RagTool): - Currently only supports US """ headers: Optional[dict] = {} + env_vars: List[EnvVar] = [ + EnvVar(name="SERPLY_API_KEY", description="API key for Serply services", required=True), + ] def __init__(self, **kwargs): super().__init__(**kwargs) diff --git a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py index c058091a2..5a2b27798 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py @@ -1,9 +1,9 @@ import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type from urllib.parse import urlencode import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -23,6 +23,9 @@ class SerplyNewsSearchTool(BaseTool): proxy_location: Optional[str] = "US" headers: Optional[dict] = {} limit: Optional[int] = 10 + env_vars: List[EnvVar] = [ + EnvVar(name="SERPLY_API_KEY", description="API key for Serply services", required=True), + ] def __init__( self, limit: Optional[int] = 10, proxy_location: Optional[str] = "US", **kwargs diff --git a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py index 3ed9de4ab..c49734c56 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py @@ -1,9 +1,9 @@ import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type from urllib.parse import urlencode import requests -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -26,6 +26,9 @@ class SerplyScholarSearchTool(BaseTool): hl: Optional[str] = "us" proxy_location: Optional[str] = "US" headers: Optional[dict] = {} + env_vars: List[EnvVar] = [ + EnvVar(name="SERPLY_API_KEY", description="API key for Serply services", required=True), + ] def __init__(self, hl: str = "us", proxy_location: Optional[str] = "US", **kwargs): """ diff --git a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py index c7678f852..fa2404f75 100644 --- a/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py +++ b/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py @@ -1,7 +1,8 @@ import os -from typing import Any, Optional, Type +from typing import Any, List, Optional, Type import requests +from crewai.tools import EnvVar from pydantic import BaseModel, Field from crewai_tools.tools.rag.rag_tool import RagTool @@ -25,6 +26,9 @@ class SerplyWebpageToMarkdownTool(RagTool): request_url: str = "https://api.serply.io/v1/request" proxy_location: Optional[str] = "US" headers: Optional[dict] = {} + env_vars: List[EnvVar] = [ + EnvVar(name="SERPLY_API_KEY", description="API key for Serply services", required=True), + ] def __init__(self, proxy_location: Optional[str] = "US", **kwargs): """ diff --git a/src/crewai_tools/tools/vision_tool/vision_tool.py b/src/crewai_tools/tools/vision_tool/vision_tool.py index cd4f5e74c..6df658898 100644 --- a/src/crewai_tools/tools/vision_tool/vision_tool.py +++ b/src/crewai_tools/tools/vision_tool/vision_tool.py @@ -1,9 +1,9 @@ import base64 from pathlib import Path -from typing import Optional, Type +from typing import List, Optional, Type from crewai import LLM -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, PrivateAttr, field_validator @@ -44,6 +44,9 @@ class VisionTool(BaseTool): "This tool uses OpenAI's Vision API to describe the contents of an image." ) args_schema: Type[BaseModel] = ImagePromptSchema + env_vars: List[EnvVar] = [ + EnvVar(name="OPENAI_API_KEY", description="API key for OpenAI services", required=True), + ] _model: str = PrivateAttr(default="gpt-4o-mini") _llm: Optional[LLM] = PrivateAttr(default=None) From d53e96fcd7a0ddc1a821fa55bd01b9af9a6d3ae6 Mon Sep 17 00:00:00 2001 From: Emmanuel Ferdman Date: Wed, 2 Jul 2025 19:25:26 +0300 Subject: [PATCH 352/391] fix: update Pydantic schema access (#337) Signed-off-by: Emmanuel Ferdman --- tests/base_tool_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/base_tool_test.py b/tests/base_tool_test.py index e6f4f127d..dbb7fe20e 100644 --- a/tests/base_tool_test.py +++ b/tests/base_tool_test.py @@ -16,7 +16,7 @@ def test_creating_a_tool_using_annotation(): my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." ) - assert my_tool.args_schema.schema()["properties"] == { + assert my_tool.args_schema.model_json_schema()["properties"] == { "question": {"title": "Question", "type": "string"} } assert ( @@ -30,7 +30,7 @@ def test_creating_a_tool_using_annotation(): converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." ) - assert converted_tool.args_schema.schema()["properties"] == { + assert converted_tool.args_schema.model_json_schema()["properties"] == { "question": {"title": "Question", "type": "string"} } assert ( @@ -54,7 +54,7 @@ def test_creating_a_tool_using_baseclass(): my_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." ) - assert my_tool.args_schema.schema()["properties"] == { + assert my_tool.args_schema.model_json_schema()["properties"] == { "question": {"title": "Question", "type": "string"} } assert ( @@ -68,7 +68,7 @@ def test_creating_a_tool_using_baseclass(): converted_tool.description == "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it." ) - assert converted_tool.args_schema.schema()["properties"] == { + assert converted_tool.args_schema.model_json_schema()["properties"] == { "question": {"title": "Question", "type": "string"} } assert ( From b4786d86b0946abfefe943d9d7c36c6d2cc94d8b Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Wed, 2 Jul 2025 12:54:09 -0700 Subject: [PATCH 353/391] refactor: enhance schema handling in EnterpriseActionTool (#355) * refactor: enhance schema handling in EnterpriseActionTool - Extracted schema property and required field extraction into separate methods for better readability and maintainability. - Introduced methods to analyze field types and create Pydantic field definitions based on nullability and requirement status. - Updated the _run method to handle required nullable fields, ensuring they are set to None if not provided in kwargs. * refactor: streamline nullable field handling in EnterpriseActionTool - Removed commented-out code related to handling required nullable fields for clarity. - Simplified the logic in the _run method to focus on processing parameters without unnecessary comments. --- .../adapters/enterprise_adapter.py | 114 ++++++++++++++---- 1 file changed, 93 insertions(+), 21 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 6799d7ea8..96d64af8b 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -37,34 +37,18 @@ class EnterpriseActionTool(BaseTool): enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, ): - schema_props = ( - action_schema.get("function", {}) - .get("parameters", {}) - .get("properties", {}) - ) - required = ( - action_schema.get("function", {}).get("parameters", {}).get("required", []) - ) + schema_props, required = self._extract_schema_info(action_schema) # Define field definitions for the model field_definitions = {} for param_name, param_details in schema_props.items(): - param_type = str # Default to string type param_desc = param_details.get("description", "") is_required = param_name in required + is_nullable, param_type = self._analyze_field_type(param_details) - # Basic type mapping (can be extended) - if param_details.get("type") == "integer": - param_type = int - elif param_details.get("type") == "number": - param_type = float - elif param_details.get("type") == "boolean": - param_type = bool - - # Create field with appropriate type and config - field_definitions[param_name] = ( - param_type if is_required else Optional[param_type], - Field(description=param_desc), + # Create field definition based on nullable and required status + field_definitions[param_name] = self._create_field_definition( + param_type, is_required, is_nullable, param_desc ) # Create the model @@ -89,9 +73,97 @@ class EnterpriseActionTool(BaseTool): if enterprise_action_kit_project_url is not None: self.enterprise_action_kit_project_url = enterprise_action_kit_project_url + def _extract_schema_info( + self, action_schema: Dict[str, Any] + ) -> tuple[Dict[str, Any], List[str]]: + """Extract schema properties and required fields from action schema.""" + schema_props = ( + action_schema.get("function", {}) + .get("parameters", {}) + .get("properties", {}) + ) + required = ( + action_schema.get("function", {}).get("parameters", {}).get("required", []) + ) + return schema_props, required + + def _analyze_field_type(self, param_details: Dict[str, Any]) -> tuple[bool, type]: + """Analyze field type and nullability from parameter details.""" + is_nullable = False + param_type = str # Default type + + if "anyOf" in param_details: + any_of_types = param_details["anyOf"] + is_nullable = any(t.get("type") == "null" for t in any_of_types) + non_null_types = [t for t in any_of_types if t.get("type") != "null"] + if non_null_types: + first_type = non_null_types[0].get("type", "string") + param_type = self._map_json_type_to_python( + first_type, non_null_types[0] + ) + else: + json_type = param_details.get("type", "string") + param_type = self._map_json_type_to_python(json_type, param_details) + is_nullable = json_type == "null" + + return is_nullable, param_type + + def _create_field_definition( + self, param_type: type, is_required: bool, is_nullable: bool, param_desc: str + ) -> tuple: + """Create Pydantic field definition based on type, requirement, and nullability.""" + if is_nullable: + return ( + Optional[param_type], + Field(default=None, description=param_desc), + ) + elif is_required: + return ( + param_type, + Field(description=param_desc), + ) + else: + return ( + Optional[param_type], + Field(default=None, description=param_desc), + ) + + def _map_json_type_to_python( + self, json_type: str, param_details: Dict[str, Any] + ) -> type: + """Map JSON schema types to Python types.""" + type_mapping = { + "string": str, + "integer": int, + "number": float, + "boolean": bool, + "array": list, + "object": dict, + } + return type_mapping.get(json_type, str) + + def _get_required_nullable_fields(self) -> List[str]: + """Get a list of required nullable fields from the action schema.""" + schema_props, required = self._extract_schema_info(self.action_schema) + + required_nullable_fields = [] + for param_name in required: + param_details = schema_props.get(param_name, {}) + is_nullable, _ = self._analyze_field_type(param_details) + if is_nullable: + required_nullable_fields.append(param_name) + + return required_nullable_fields + def _run(self, **kwargs) -> str: """Execute the specific enterprise action with validated parameters.""" try: + required_nullable_fields = self._get_required_nullable_fields() + + for field_name in required_nullable_fields: + if field_name not in kwargs: + kwargs[field_name] = None + params = {k: v for k, v in kwargs.items() if v is not None} api_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" From c45e92bd17d81962889ae7a90a337aa9ef2a7a53 Mon Sep 17 00:00:00 2001 From: gautham <91133513+capemox@users.noreply.github.com> Date: Tue, 8 Jul 2025 20:24:54 +0530 Subject: [PATCH 354/391] Add Couchbase as a tool (#264) * - Added CouchbaseFTSVectorStore as a CrewAI tool. - Wrote a README to setup the tool. - Wrote test cases. - Added Couchbase as an optional dependency in the project. * Fixed naming in some places. Added docstrings. Added instructions on how to create a vector search index. * Fixed pyproject.toml * error handling and response format - Removed unnecessary ImportError for missing 'couchbase' package. - Changed response format from a concatenated string to a JSON array for search results. - Updated error handling to return error messages instead of raising exceptions in certain cases. - Adjusted tests to reflect changes in response format and error handling. * Update dependencies in pyproject.toml and uv.lock - Changed pydantic version from 2.6.1 to 2.10.6 in both pyproject.toml and uv.lock. - Updated crewai-tools version from 0.42.2 to 0.42.3 in uv.lock. - Adjusted pydantic-core version from 2.33.1 to 2.27.2 in uv.lock, reflecting the new pydantic version. * Removed restrictive pydantic version and updated uv.lock * synced lockfile * regenerated lockfile * updated lockfile * regenerated lockfile * Update tool specifications for * Fix test cases --------- Co-authored-by: AayushTyagi1 Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/couchbase_tool/README.md | 62 +++ .../tools/couchbase_tool/couchbase_tool.py | 241 ++++++++++++ tests/tools/couchbase_tool_test.py | 365 ++++++++++++++++++ 5 files changed, 670 insertions(+) create mode 100644 src/crewai_tools/tools/couchbase_tool/README.md create mode 100644 src/crewai_tools/tools/couchbase_tool/couchbase_tool.py create mode 100644 tests/tools/couchbase_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 36624f355..8df620788 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -14,6 +14,7 @@ from .tools import ( CodeDocsSearchTool, CodeInterpreterTool, ComposioTool, + CouchbaseFTSVectorSearchTool, CrewaiEnterpriseTools, CSVSearchTool, DallETool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 957d2f1e2..47f3f5f80 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -5,6 +5,7 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool +from .couchbase_tool.couchbase_tool import CouchbaseFTSVectorSearchTool from .crewai_enterprise_tools.crewai_enterprise_tools import CrewaiEnterpriseTools from .csv_search_tool.csv_search_tool import CSVSearchTool from .dalle_tool.dalle_tool import DallETool diff --git a/src/crewai_tools/tools/couchbase_tool/README.md b/src/crewai_tools/tools/couchbase_tool/README.md new file mode 100644 index 000000000..382f6eae0 --- /dev/null +++ b/src/crewai_tools/tools/couchbase_tool/README.md @@ -0,0 +1,62 @@ +# CouchbaseFTSVectorSearchTool +## Description +Couchbase is a NoSQL database with vector search capabilities. Users can store and query vector embeddings. You can learn more about Couchbase vector search here: https://docs.couchbase.com/cloud/vector-search/vector-search.html + +This tool is specifically crafted for performing semantic search using Couchbase. Use this tool to find semantically similar docs to a given query. + +## Installation +Install the crewai_tools package by executing the following command in your terminal: + +```shell +uv pip install 'crewai[tools]' +``` + +## Setup +Before instantiating the tool, you need a Couchbase cluster. +- Create a cluster on [Couchbase Capella](https://docs.couchbase.com/cloud/get-started/create-account.html), Couchbase's cloud database solution. +- Create a [local Couchbase server](https://docs.couchbase.com/server/current/getting-started/start-here.html). + +You will need to create a bucket, scope and collection on the cluster. Then, [follow this guide](https://docs.couchbase.com/python-sdk/current/hello-world/start-using-sdk.html) to create a Couchbase Cluster object and load documents into your collection. + +Follow the docs below to create a vector search index on Couchbase. +- [Create a vector search index on Couchbase Capella.](https://docs.couchbase.com/cloud/vector-search/create-vector-search-index-ui.html) +- [Create a vector search index on your local Couchbase server.](https://docs.couchbase.com/server/current/vector-search/create-vector-search-index-ui.html) + +Ensure that the `Dimension` field in the index matches the embedding model. For example, OpenAI's `text-embedding-3-small` model has an embedding dimension of 1536 dimensions, and so the `Dimension` field must be 1536 in the index. + +## Example +To utilize the CouchbaseFTSVectorSearchTool for different use cases, follow these examples: + +```python +from crewai_tools import CouchbaseFTSVectorSearchTool + +# Instantiate a Couchbase Cluster object from the Couchbase SDK + +tool = CouchbaseFTSVectorSearchTool( + cluster=cluster, + collection_name="collection", + scope_name="scope", + bucket_name="bucket", + index_name="index", + embedding_function=embed_fn +) + +# Adding the tool to an agent +rag_agent = Agent( + name="rag_agent", + role="You are a helpful assistant that can answer questions with the help of the CouchbaseFTSVectorSearchTool.", + llm="gpt-4o-mini", + tools=[tool], +) +``` + +## Arguments +- `cluster`: An initialized Couchbase `Cluster` instance. +- `bucket_name`: The name of the Couchbase bucket. +- `scope_name`: The name of the scope within the bucket. +- `collection_name`: The name of the collection within the scope. +- `index_name`: The name of the search index (vector index). +- `embedding_function`: A function that takes a string and returns its embedding (list of floats). +- `embedding_key`: Name of the field in the search index storing the vector. (Optional, defaults to 'embedding') +- `scoped_index`: Whether the index is scoped (True) or cluster-level (False). (Optional, defaults to True) +- `limit`: The maximum number of search results to return. (Optional, defaults to 3) \ No newline at end of file diff --git a/src/crewai_tools/tools/couchbase_tool/couchbase_tool.py b/src/crewai_tools/tools/couchbase_tool/couchbase_tool.py new file mode 100644 index 000000000..3017f694f --- /dev/null +++ b/src/crewai_tools/tools/couchbase_tool/couchbase_tool.py @@ -0,0 +1,241 @@ +import json +import os +from typing import Any, Optional, Type, List, Dict, Callable + +try: + import couchbase.search as search + from couchbase.cluster import Cluster + from couchbase.options import SearchOptions + from couchbase.vector_search import VectorQuery, VectorSearch + + COUCHBASE_AVAILABLE = True +except ImportError: + COUCHBASE_AVAILABLE = False + search = Any + Cluster = Any + SearchOptions = Any + VectorQuery = Any + VectorSearch = Any + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field, SkipValidation + + +class CouchbaseToolSchema(BaseModel): + """Input for CouchbaseTool.""" + + query: str = Field( + ..., + description="The query to search retrieve relevant information from the Couchbase database. Pass only the query, not the question.", + ) + +class CouchbaseFTSVectorSearchTool(BaseTool): + """Tool to search the Couchbase database""" + + model_config = {"arbitrary_types_allowed": True} + name: str = "CouchbaseFTSVectorSearchTool" + description: str = "A tool to search the Couchbase database for relevant information on internal documents." + args_schema: Type[BaseModel] = CouchbaseToolSchema + cluster: SkipValidation[Optional[Cluster]] = None + collection_name: Optional[str] = None, + scope_name: Optional[str] = None, + bucket_name: Optional[str] = None, + index_name: Optional[str] = None, + embedding_key: Optional[str] = Field( + default="embedding", + description="Name of the field in the search index that stores the vector" + ) + scoped_index: Optional[bool] = Field( + default=True, + description="Specify whether the index is scoped. Is True by default." + ), + limit: Optional[int] = Field(default=3) + embedding_function: SkipValidation[Callable[[str], List[float]]] = Field( + default=None, + description="A function that takes a string and returns a list of floats. This is used to embed the query before searching the database." + ) + + def _check_bucket_exists(self) -> bool: + """Check if the bucket exists in the linked Couchbase cluster""" + bucket_manager = self.cluster.buckets() + try: + bucket_manager.get_bucket(self.bucket_name) + return True + except Exception: + return False + + def _check_scope_and_collection_exists(self) -> bool: + """Check if the scope and collection exists in the linked Couchbase bucket + Raises a ValueError if either is not found""" + scope_collection_map: Dict[str, Any] = {} + + # Get a list of all scopes in the bucket + for scope in self._bucket.collections().get_all_scopes(): + scope_collection_map[scope.name] = [] + + # Get a list of all the collections in the scope + for collection in scope.collections: + scope_collection_map[scope.name].append(collection.name) + + # Check if the scope exists + if self.scope_name not in scope_collection_map.keys(): + raise ValueError( + f"Scope {self.scope_name} not found in Couchbase " + f"bucket {self.bucket_name}" + ) + + # Check if the collection exists in the scope + if self.collection_name not in scope_collection_map[self.scope_name]: + raise ValueError( + f"Collection {self.collection_name} not found in scope " + f"{self.scope_name} in Couchbase bucket {self.bucket_name}" + ) + + return True + + def _check_index_exists(self) -> bool: + """Check if the Search index exists in the linked Couchbase cluster + Raises a ValueError if the index does not exist""" + if self.scoped_index: + all_indexes = [ + index.name for index in self._scope.search_indexes().get_all_indexes() + ] + if self.index_name not in all_indexes: + raise ValueError( + f"Index {self.index_name} does not exist. " + " Please create the index before searching." + ) + else: + all_indexes = [ + index.name for index in self.cluster.search_indexes().get_all_indexes() + ] + if self.index_name not in all_indexes: + raise ValueError( + f"Index {self.index_name} does not exist. " + " Please create the index before searching." + ) + + return True + + def __init__(self, **kwargs): + """Initialize the CouchbaseFTSVectorSearchTool. + + Args: + **kwargs: Keyword arguments to pass to the BaseTool constructor and + to configure the Couchbase connection and search parameters. + Requires 'cluster', 'bucket_name', 'scope_name', + 'collection_name', 'index_name', and 'embedding_function'. + + Raises: + ValueError: If required parameters are missing, the Couchbase cluster + cannot be reached, or the specified bucket, scope, + collection, or index does not exist. + """ + super().__init__(**kwargs) + if COUCHBASE_AVAILABLE: + try: + if not self.cluster: + raise ValueError("Cluster instance must be provided") + + if not self.bucket_name: + raise ValueError("Bucket name must be provided") + + if not self.scope_name: + raise ValueError("Scope name must be provided") + + if not self.collection_name: + raise ValueError("Collection name must be provided") + + if not self.index_name: + raise ValueError("Index name must be provided") + + if not self.embedding_function: + raise ValueError("Embedding function must be provided") + + self._bucket = self.cluster.bucket(self.bucket_name) + self._scope = self._bucket.scope(self.scope_name) + self._collection = self._scope.collection(self.collection_name) + except Exception as e: + raise ValueError( + "Error connecting to couchbase. " + "Please check the connection and credentials" + ) from e + + # check if bucket exists + if not self._check_bucket_exists(): + raise ValueError( + f"Bucket {self.bucket_name} does not exist. " + " Please create the bucket before searching." + ) + + self._check_scope_and_collection_exists() + self._check_index_exists() + else: + import click + + if click.confirm( + "The 'couchbase' package is required to use the CouchbaseFTSVectorSearchTool. " + "Would you like to install it?" + ): + import subprocess + + subprocess.run(["uv", "add", "couchbase"], check=True) + else: + raise ImportError( + "The 'couchbase' package is required to use the CouchbaseFTSVectorSearchTool. " + "Please install it with: uv add couchbase" + ) + + def _run(self, query: str) -> str: + """Execute a vector search query against the Couchbase index. + + Args: + query: The search query string. + + Returns: + A JSON string containing the search results. + + Raises: + ValueError: If the search query fails or returns results without fields. + """ + query_embedding = self.embedding_function(query) + fields = ["*"] + + search_req = search.SearchRequest.create( + VectorSearch.from_vector_query( + VectorQuery( + self.embedding_key, + query_embedding, + self.limit + ) + ) + ) + + try: + if self.scoped_index: + search_iter = self._scope.search( + self.index_name, + search_req, + SearchOptions( + limit=self.limit, + fields=fields, + ) + ) + else: + search_iter = self.cluster.search( + self.index_name, + search_req, + SearchOptions( + limit=self.limit, + fields=fields + ) + ) + + json_response = [] + + for row in search_iter.rows(): + json_response.append(row.fields) + except Exception as e: + return f"Search failed with error: {e}" + + return json.dumps(json_response, indent=2) \ No newline at end of file diff --git a/tests/tools/couchbase_tool_test.py b/tests/tools/couchbase_tool_test.py new file mode 100644 index 000000000..424a19025 --- /dev/null +++ b/tests/tools/couchbase_tool_test.py @@ -0,0 +1,365 @@ +import pytest +from unittest.mock import MagicMock, patch, ANY + +# Mock the couchbase library before importing the tool +# This prevents ImportErrors if couchbase isn't installed in the test environment +mock_couchbase = MagicMock() +mock_couchbase.search = MagicMock() +mock_couchbase.cluster = MagicMock() +mock_couchbase.options = MagicMock() +mock_couchbase.vector_search = MagicMock() + +# Simulate the structure needed for checks +mock_couchbase.cluster.Cluster = MagicMock() +mock_couchbase.options.SearchOptions = MagicMock() +mock_couchbase.vector_search.VectorQuery = MagicMock() +mock_couchbase.vector_search.VectorSearch = MagicMock() +mock_couchbase.search.SearchRequest = MagicMock() # Mock the class itself +mock_couchbase.search.SearchRequest.create = MagicMock() # Mock the class method + +# Add necessary exception types if needed for testing error handling +class MockCouchbaseException(Exception): + pass +mock_couchbase.exceptions = MagicMock() +mock_couchbase.exceptions.BucketNotFoundException = MockCouchbaseException +mock_couchbase.exceptions.ScopeNotFoundException = MockCouchbaseException +mock_couchbase.exceptions.CollectionNotFoundException = MockCouchbaseException +mock_couchbase.exceptions.IndexNotFoundException = MockCouchbaseException + + +import sys +sys.modules['couchbase'] = mock_couchbase +sys.modules['couchbase.search'] = mock_couchbase.search +sys.modules['couchbase.cluster'] = mock_couchbase.cluster +sys.modules['couchbase.options'] = mock_couchbase.options +sys.modules['couchbase.vector_search'] = mock_couchbase.vector_search +sys.modules['couchbase.exceptions'] = mock_couchbase.exceptions + +# Now import the tool +from crewai_tools.tools.couchbase_tool.couchbase_tool import CouchbaseFTSVectorSearchTool + +# --- Test Fixtures --- +@pytest.fixture(autouse=True) +def reset_global_mocks(): + """Reset call counts for globally defined mocks before each test.""" + # Reset the specific mock causing the issue + mock_couchbase.vector_search.VectorQuery.reset_mock() + # It's good practice to also reset other related global mocks + # that might be called in your tests to prevent similar issues: + mock_couchbase.vector_search.VectorSearch.from_vector_query.reset_mock() + mock_couchbase.search.SearchRequest.create.reset_mock() + +# Additional fixture to handle import pollution in full test suite +@pytest.fixture(autouse=True) +def ensure_couchbase_mocks(): + """Ensure that couchbase imports are properly mocked even when other tests have run first.""" + # This fixture ensures our mocks are in place regardless of import order + original_modules = {} + + # Store any existing modules + for module_name in ['couchbase', 'couchbase.search', 'couchbase.cluster', 'couchbase.options', 'couchbase.vector_search', 'couchbase.exceptions']: + if module_name in sys.modules: + original_modules[module_name] = sys.modules[module_name] + + # Ensure our mocks are active + sys.modules['couchbase'] = mock_couchbase + sys.modules['couchbase.search'] = mock_couchbase.search + sys.modules['couchbase.cluster'] = mock_couchbase.cluster + sys.modules['couchbase.options'] = mock_couchbase.options + sys.modules['couchbase.vector_search'] = mock_couchbase.vector_search + sys.modules['couchbase.exceptions'] = mock_couchbase.exceptions + + yield + + # Restore original modules if they existed + for module_name, original_module in original_modules.items(): + if original_module is not None: + sys.modules[module_name] = original_module + +@pytest.fixture +def mock_cluster(): + cluster = MagicMock() + bucket_manager = MagicMock() + search_index_manager = MagicMock() + bucket = MagicMock() + scope = MagicMock() + collection = MagicMock() + scope_search_index_manager = MagicMock() + + # Setup mock return values for checks + cluster.buckets.return_value = bucket_manager + cluster.search_indexes.return_value = search_index_manager + cluster.bucket.return_value = bucket + bucket.scope.return_value = scope + scope.collection.return_value = collection + scope.search_indexes.return_value = scope_search_index_manager + + # Mock bucket existence check + bucket_manager.get_bucket.return_value = True + + # Mock scope/collection existence check + mock_scope_spec = MagicMock() + mock_scope_spec.name = "test_scope" + mock_collection_spec = MagicMock() + mock_collection_spec.name = "test_collection" + mock_scope_spec.collections = [mock_collection_spec] + bucket.collections.return_value.get_all_scopes.return_value = [mock_scope_spec] + + # Mock index existence check + mock_index_def = MagicMock() + mock_index_def.name = "test_index" + scope_search_index_manager.get_all_indexes.return_value = [mock_index_def] + search_index_manager.get_all_indexes.return_value = [mock_index_def] + + return cluster + +@pytest.fixture +def mock_embedding_function(): + # Simple mock embedding function + # return lambda query: [0.1] * 10 # Example embedding vector + return MagicMock(return_value=[0.1] * 10) + +@pytest.fixture +def tool_config(mock_cluster, mock_embedding_function): + return { + "cluster": mock_cluster, + "bucket_name": "test_bucket", + "scope_name": "test_scope", + "collection_name": "test_collection", + "index_name": "test_index", + "embedding_function": mock_embedding_function, + "limit": 5, + "embedding_key": "test_embedding", + "scoped_index": True + } + +@pytest.fixture +def couchbase_tool(tool_config): + # Patch COUCHBASE_AVAILABLE to True for these tests + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + tool = CouchbaseFTSVectorSearchTool(**tool_config) + return tool + +@pytest.fixture +def mock_search_iter(): + mock_iter = MagicMock() + # Simulate search results with a 'fields' attribute + mock_row1 = MagicMock() + mock_row1.fields = {"id": "doc1", "text": "content 1", "test_embedding": [0.1]*10} + mock_row2 = MagicMock() + mock_row2.fields = {"id": "doc2", "text": "content 2", "test_embedding": [0.2]*10} + mock_iter.rows.return_value = [mock_row1, mock_row2] + return mock_iter + +# --- Test Cases --- + +def test_initialization_success(couchbase_tool, tool_config): + """Test successful initialization with valid config.""" + assert couchbase_tool.cluster == tool_config["cluster"] + assert couchbase_tool.bucket_name == "test_bucket" + assert couchbase_tool.scope_name == "test_scope" + assert couchbase_tool.collection_name == "test_collection" + assert couchbase_tool.index_name == "test_index" + assert couchbase_tool.embedding_function is not None + assert couchbase_tool.limit == 5 + assert couchbase_tool.embedding_key == "test_embedding" + assert couchbase_tool.scoped_index == True + + # Check if helper methods were called during init (via mocks in fixture) + couchbase_tool.cluster.buckets().get_bucket.assert_called_once_with("test_bucket") + couchbase_tool.cluster.bucket().collections().get_all_scopes.assert_called_once() + couchbase_tool.cluster.bucket().scope().search_indexes().get_all_indexes.assert_called_once() + +def test_initialization_missing_required_args(mock_cluster, mock_embedding_function): + """Test initialization fails when required arguments are missing.""" + base_config = { + "cluster": mock_cluster, "bucket_name": "b", "scope_name": "s", + "collection_name": "c", "index_name": "i", "embedding_function": mock_embedding_function + } + required_keys = base_config.keys() + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + for key in required_keys: + incomplete_config = base_config.copy() + del incomplete_config[key] + with pytest.raises(ValueError): + CouchbaseFTSVectorSearchTool(**incomplete_config) + +def test_initialization_couchbase_unavailable(): + """Test behavior when couchbase library is not available.""" + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', False): + with patch('click.confirm', return_value=False) as mock_confirm: + with pytest.raises(ImportError, match="The 'couchbase' package is required"): + CouchbaseFTSVectorSearchTool(cluster=MagicMock(), bucket_name="b", scope_name="s", + collection_name="c", index_name="i", embedding_function=MagicMock()) + mock_confirm.assert_called_once() # Ensure user was prompted + +def test_run_success_scoped_index(couchbase_tool, mock_search_iter, tool_config, mock_embedding_function): + """Test successful _run execution with a scoped index.""" + query = "find relevant documents" + # expected_embedding = mock_embedding_function(query) + + # Mock the scope search method + couchbase_tool._scope.search = MagicMock(return_value=mock_search_iter) + # Mock the VectorQuery/VectorSearch/SearchRequest creation using runtime patching + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.VectorQuery') as mock_vq, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.VectorSearch') as mock_vs, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.search.SearchRequest') as mock_sr, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.SearchOptions') as mock_so: + + # Set up the mock objects and their return values + mock_vector_query = MagicMock() + mock_vector_search = MagicMock() + mock_search_req = MagicMock() + mock_search_options = MagicMock() + + mock_vq.return_value = mock_vector_query + mock_vs.from_vector_query.return_value = mock_vector_search + mock_sr.create.return_value = mock_search_req + mock_so.return_value = mock_search_options + + result = couchbase_tool._run(query=query) + + # Check embedding function call + tool_config['embedding_function'].assert_called_once_with(query) + + # Check VectorQuery call + mock_vq.assert_called_once_with( + tool_config['embedding_key'], mock_embedding_function.return_value, tool_config['limit'] + ) + # Check VectorSearch call + mock_vs.from_vector_query.assert_called_once_with(mock_vector_query) + # Check SearchRequest creation + mock_sr.create.assert_called_once_with(mock_vector_search) + # Check SearchOptions creation + mock_so.assert_called_once_with(limit=tool_config['limit'], fields=["*"]) + + # Check that scope search was called correctly + couchbase_tool._scope.search.assert_called_once_with( + tool_config['index_name'], + mock_search_req, + mock_search_options + ) + + # Check cluster search was NOT called + couchbase_tool.cluster.search.assert_not_called() + + # Check result format (simple check for JSON structure) + assert '"id": "doc1"' in result + assert '"id": "doc2"' in result + assert result.startswith('[') # Should be valid JSON after concatenation + +def test_run_success_global_index(tool_config, mock_search_iter, mock_embedding_function): + """Test successful _run execution with a global (non-scoped) index.""" + tool_config['scoped_index'] = False + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + couchbase_tool = CouchbaseFTSVectorSearchTool(**tool_config) + + query = "find global documents" + # expected_embedding = mock_embedding_function(query) + + # Mock the cluster search method + couchbase_tool.cluster.search = MagicMock(return_value=mock_search_iter) + # Mock the VectorQuery/VectorSearch/SearchRequest creation using runtime patching + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.VectorQuery') as mock_vq, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.VectorSearch') as mock_vs, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.search.SearchRequest') as mock_sr, \ + patch('crewai_tools.tools.couchbase_tool.couchbase_tool.SearchOptions') as mock_so: + + # Set up the mock objects and their return values + mock_vector_query = MagicMock() + mock_vector_search = MagicMock() + mock_search_req = MagicMock() + mock_search_options = MagicMock() + + mock_vq.return_value = mock_vector_query + mock_vs.from_vector_query.return_value = mock_vector_search + mock_sr.create.return_value = mock_search_req + mock_so.return_value = mock_search_options + + result = couchbase_tool._run(query=query) + + # Check embedding function call + tool_config['embedding_function'].assert_called_once_with(query) + + # Check VectorQuery/Search call + mock_vq.assert_called_once_with( + tool_config['embedding_key'], mock_embedding_function.return_value, tool_config['limit'] + ) + mock_sr.create.assert_called_once_with(mock_vector_search) + # Check SearchOptions creation + mock_so.assert_called_once_with(limit=tool_config['limit'], fields=["*"]) + + # Check that cluster search was called correctly + couchbase_tool.cluster.search.assert_called_once_with( + tool_config['index_name'], + mock_search_req, + mock_search_options + ) + + # Check scope search was NOT called + couchbase_tool._scope.search.assert_not_called() + + # Check result format + assert '"id": "doc1"' in result + assert '"id": "doc2"' in result + +def test_check_bucket_exists_fail(tool_config): + """Test check for bucket non-existence.""" + mock_cluster = tool_config['cluster'] + mock_cluster.buckets().get_bucket.side_effect = mock_couchbase.exceptions.BucketNotFoundException("Bucket not found") + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + with pytest.raises(ValueError, match="Bucket test_bucket does not exist."): + CouchbaseFTSVectorSearchTool(**tool_config) + + +def test_check_scope_exists_fail(tool_config): + """Test check for scope non-existence.""" + mock_cluster = tool_config['cluster'] + # Simulate scope not being in the list returned + mock_scope_spec = MagicMock() + mock_scope_spec.name = "wrong_scope" + mock_cluster.bucket().collections().get_all_scopes.return_value = [mock_scope_spec] + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + with pytest.raises(ValueError, match="Scope test_scope not found"): + CouchbaseFTSVectorSearchTool(**tool_config) + + +def test_check_collection_exists_fail(tool_config): + """Test check for collection non-existence.""" + mock_cluster = tool_config['cluster'] + # Simulate collection not being in the scope's list + mock_scope_spec = MagicMock() + mock_scope_spec.name = "test_scope" + mock_collection_spec = MagicMock() + mock_collection_spec.name = "wrong_collection" + mock_scope_spec.collections = [mock_collection_spec] # Only has wrong collection + mock_cluster.bucket().collections().get_all_scopes.return_value = [mock_scope_spec] + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + with pytest.raises(ValueError, match="Collection test_collection not found"): + CouchbaseFTSVectorSearchTool(**tool_config) + +def test_check_index_exists_fail_scoped(tool_config): + """Test check for scoped index non-existence.""" + mock_cluster = tool_config['cluster'] + # Simulate index not being in the list returned by scope manager + mock_cluster.bucket().scope().search_indexes().get_all_indexes.return_value = [] + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + with pytest.raises(ValueError, match="Index test_index does not exist"): + CouchbaseFTSVectorSearchTool(**tool_config) + + +def test_check_index_exists_fail_global(tool_config): + """Test check for global index non-existence.""" + tool_config['scoped_index'] = False + mock_cluster = tool_config['cluster'] + # Simulate index not being in the list returned by cluster manager + mock_cluster.search_indexes().get_all_indexes.return_value = [] + + with patch('crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE', True): + with pytest.raises(ValueError, match="Index test_index does not exist"): + CouchbaseFTSVectorSearchTool(**tool_config) \ No newline at end of file From e0de166592c721e4945549d808dec935d545a4c7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 9 Jul 2025 10:44:23 -0500 Subject: [PATCH 355/391] Add MongoDB Vector Search Tool (#319) * INTPYTHON-580 Design and Implement MongoDBVectorSearchTool * add implementation * wip * wip * finish tests * add todo * refactor to wrap langchain-mongodb * cleanup * address review * Fix usage of EnvVar class * inline code * lint * lint * fix usage of SearchIndexModel * Refactor: Update EnvVar import path and remove unused tests.utils module - Changed import of EnvVar from tests.utils to crewai.tools in multiple files. - Updated README.md for MongoDB vector search tool with additional context. - Modified subprocess command in vector_search.py for package installation. - Cleaned up test_generate_tool_specs.py to improve mock patching syntax. - Deleted unused tests/utils.py file. * update the crewai dep and the lockfile * chore: update package versions and dependencies in uv.lock - Removed `auth0-python` package. - Updated `crewai` version to 0.140.0 and adjusted its dependencies. - Changed `json-repair` version to 0.25.2. - Updated `litellm` version to 1.72.6. - Modified dependency markers for several packages to improve compatibility with Python versions. * refactor: improve MongoDB vector search tool with enhanced error handling and new dimensions field - Added logging for error handling in the _run method and during client cleanup. - Introduced a new 'dimensions' field in the MongoDBVectorSearchConfig for embedding vector size. - Refactored the _run method to return JSON formatted results and handle exceptions gracefully. - Cleaned up import statements and improved code readability. * address review * update tests * debug * fix test * fix test * fix test * support azure openai --------- Co-authored-by: lorenzejay --- README.md | 2 +- src/crewai_tools/__init__.py | 6 +- src/crewai_tools/tools/__init__.py | 7 +- .../mongodb_vector_search_tool/README.md | 87 +++++ .../mongodb_vector_search_tool/__init__.py | 11 + .../tools/mongodb_vector_search_tool/utils.py | 120 +++++++ .../vector_search.py | 326 ++++++++++++++++++ tests/test_generate_tool_specs.py | 79 +++-- .../tools/test_mongodb_vector_search_tool.py | 75 ++++ 9 files changed, 685 insertions(+), 28 deletions(-) create mode 100644 src/crewai_tools/tools/mongodb_vector_search_tool/README.md create mode 100644 src/crewai_tools/tools/mongodb_vector_search_tool/__init__.py create mode 100644 src/crewai_tools/tools/mongodb_vector_search_tool/utils.py create mode 100644 src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py create mode 100644 tests/tools/test_mongodb_vector_search_tool.py diff --git a/README.md b/README.md index 4ce6d3807..3ee271370 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ CrewAI provides an extensive collection of powerful tools ready to enhance your - **File Management**: `FileReadTool`, `FileWriteTool` - **Web Scraping**: `ScrapeWebsiteTool`, `SeleniumScrapingTool` - **Database Integrations**: `PGSearchTool`, `MySQLSearchTool` +- **Vector Database Integrations**: `MongoDBVectorSearchTool`, `QdrantVectorSearchTool`, `WeaviateVectorSearchTool` - **API Integrations**: `SerperApiTool`, `EXASearchTool` - **AI-powered Tools**: `DallETool`, `VisionTool`, `StagehandTool` @@ -226,4 +227,3 @@ Join our rapidly growing community and receive real-time support: - [Open an Issue](https://github.com/crewAIInc/crewAI/issues) Build smarter, faster, and more powerful AI solutions—powered by CrewAI Tools. - diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 8df620788..7831b957d 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -1,5 +1,6 @@ from .adapters.enterprise_adapter import EnterpriseActionTool from .adapters.mcp_adapter import MCPServerAdapter +from .adapters.zapier_adapter import ZapierActionTool from .aws import ( BedrockInvokeAgentTool, BedrockKBRetrieverTool, @@ -23,9 +24,9 @@ from .tools import ( DirectorySearchTool, DOCXSearchTool, EXASearchTool, + FileCompressorTool, FileReadTool, FileWriterTool, - FileCompressorTool, FirecrawlCrawlWebsiteTool, FirecrawlScrapeWebsiteTool, FirecrawlSearchTool, @@ -35,6 +36,8 @@ from .tools import ( LinkupSearchTool, LlamaIndexTool, MDXSearchTool, + MongoDBVectorSearchConfig, + MongoDBVectorSearchTool, MultiOnTool, MySQLSearchTool, NL2SQLTool, @@ -76,4 +79,3 @@ from .tools import ( YoutubeVideoSearchTool, ZapierActionTools, ) -from .adapters.zapier_adapter import ZapierActionTool diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 47f3f5f80..d4b54c5ff 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -16,10 +16,10 @@ from .docx_search_tool.docx_search_tool import DOCXSearchTool from .exa_tools.exa_search_tool import EXASearchTool from .file_read_tool.file_read_tool import FileReadTool from .file_writer_tool.file_writer_tool import FileWriterTool +from .files_compressor_tool.files_compressor_tool import FileCompressorTool from .firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import ( FirecrawlCrawlWebsiteTool, ) -from .files_compressor_tool.files_compressor_tool import FileCompressorTool from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( FirecrawlScrapeWebsiteTool, ) @@ -30,6 +30,11 @@ from .json_search_tool.json_search_tool import JSONSearchTool from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool from .mdx_search_tool.mdx_search_tool import MDXSearchTool +from .mongodb_vector_search_tool import ( + MongoDBToolSchema, + MongoDBVectorSearchConfig, + MongoDBVectorSearchTool, +) from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool diff --git a/src/crewai_tools/tools/mongodb_vector_search_tool/README.md b/src/crewai_tools/tools/mongodb_vector_search_tool/README.md new file mode 100644 index 000000000..c66dfcf43 --- /dev/null +++ b/src/crewai_tools/tools/mongodb_vector_search_tool/README.md @@ -0,0 +1,87 @@ +# MongoDBVectorSearchTool + +## Description +This tool is specifically crafted for conducting vector searches within docs within a MongoDB database. Use this tool to find semantically similar docs to a given query. + +MongoDB can act as a vector database that is used to store and query vector embeddings. You can follow the docs here: +https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/ + +## Installation +Install the crewai_tools package with MongoDB support by executing the following command in your terminal: + +```shell +pip install crewai-tools[mongodb] +``` + +or + +``` +uv add crewai-tools --extra mongodb +``` + +## Example +To utilize the MongoDBVectorSearchTool for different use cases, follow these examples: + +```python +from crewai_tools import MongoDBVectorSearchTool + +# To enable the tool to search any website the agent comes across or learns about during its operation +tool = MongoDBVectorSearchTool( + database_name="example_database', + collection_name='example_collections', + connection_string="", +) +``` + +or + +```python +from crewai_tools import MongoDBVectorSearchConfig, MongoDBVectorSearchTool + +# Setup custom embedding model and customize the parameters. +query_config = MongoDBVectorSearchConfig(limit=10, oversampling_factor=2) +tool = MongoDBVectorSearchTool( + database_name="example_database', + collection_name='example_collections', + connection_string="", + query_config=query_config, + index_name="my_vector_index", + generative_model="gpt-4o-mini" +) + +# Adding the tool to an agent +rag_agent = Agent( + name="rag_agent", + role="You are a helpful assistant that can answer questions with the help of the MongoDBVectorSearchTool.", + goal="...", + backstory="...", + llm="gpt-4o-mini", + tools=[tool], +) +``` + +Preloading the MongoDB database with documents: + +```python +from crewai_tools import MongoDBVectorSearchTool + +# Generate the documents and add them to the MongoDB database +test_docs = client.collections.get("example_collections") + +# Create the tool. +tool = MongoDBVectorSearchTool( + database_name="example_database', + collection_name='example_collections', + connection_string="", +) + +# Add the text from a set of CrewAI knowledge documents. +texts = [] +for d in os.listdir("knowledge"): + with open(os.path.join("knowledge", d), "r") as f: + texts.append(f.read()) +tool.add_texts(text) + +# Create the vector search index (if it wasn't already created in Atlas). +tool.create_vector_search_index(dimensions=3072) +``` diff --git a/src/crewai_tools/tools/mongodb_vector_search_tool/__init__.py b/src/crewai_tools/tools/mongodb_vector_search_tool/__init__.py new file mode 100644 index 000000000..c7e991472 --- /dev/null +++ b/src/crewai_tools/tools/mongodb_vector_search_tool/__init__.py @@ -0,0 +1,11 @@ +from .vector_search import ( + MongoDBToolSchema, + MongoDBVectorSearchConfig, + MongoDBVectorSearchTool, +) + +__all__ = [ + "MongoDBVectorSearchConfig", + "MongoDBVectorSearchTool", + "MongoDBToolSchema", +] diff --git a/src/crewai_tools/tools/mongodb_vector_search_tool/utils.py b/src/crewai_tools/tools/mongodb_vector_search_tool/utils.py new file mode 100644 index 000000000..a66586f6f --- /dev/null +++ b/src/crewai_tools/tools/mongodb_vector_search_tool/utils.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +from time import monotonic, sleep +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional + +if TYPE_CHECKING: + from pymongo.collection import Collection + + +def _vector_search_index_definition( + dimensions: int, + path: str, + similarity: str, + filters: Optional[List[str]] = None, + **kwargs: Any, +) -> Dict[str, Any]: + # https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-type/ + fields = [ + { + "numDimensions": dimensions, + "path": path, + "similarity": similarity, + "type": "vector", + }, + ] + if filters: + for field in filters: + fields.append({"type": "filter", "path": field}) + definition = {"fields": fields} + definition.update(kwargs) + return definition + + +def create_vector_search_index( + collection: Collection, + index_name: str, + dimensions: int, + path: str, + similarity: str, + filters: Optional[List[str]] = None, + *, + wait_until_complete: Optional[float] = None, + **kwargs: Any, +) -> None: + """Experimental Utility function to create a vector search index + + Args: + collection (Collection): MongoDB Collection + index_name (str): Name of Index + dimensions (int): Number of dimensions in embedding + path (str): field with vector embedding + similarity (str): The similarity score used for the index + filters (List[str]): Fields/paths to index to allow filtering in $vectorSearch + wait_until_complete (Optional[float]): If provided, number of seconds to wait + until search index is ready. + kwargs: Keyword arguments supplying any additional options to SearchIndexModel. + """ + from pymongo.operations import SearchIndexModel + + if collection.name not in collection.database.list_collection_names(): + collection.database.create_collection(collection.name) + + result = collection.create_search_index( + SearchIndexModel( + definition=_vector_search_index_definition( + dimensions=dimensions, + path=path, + similarity=similarity, + filters=filters, + **kwargs, + ), + name=index_name, + type="vectorSearch", + ) + ) + + if wait_until_complete: + _wait_for_predicate( + predicate=lambda: _is_index_ready(collection, index_name), + err=f"{index_name=} did not complete in {wait_until_complete}!", + timeout=wait_until_complete, + ) + + +def _is_index_ready(collection: Collection, index_name: str) -> bool: + """Check for the index name in the list of available search indexes to see if the + specified index is of status READY + + Args: + collection (Collection): MongoDB Collection to for the search indexes + index_name (str): Vector Search Index name + + Returns: + bool : True if the index is present and READY false otherwise + """ + for index in collection.list_search_indexes(index_name): + if index["status"] == "READY": + return True + return False + + +def _wait_for_predicate( + predicate: Callable, err: str, timeout: float = 120, interval: float = 0.5 +) -> None: + """Generic to block until the predicate returns true + + Args: + predicate (Callable[, bool]): A function that returns a boolean value + err (str): Error message to raise if nothing occurs + timeout (float, optional): Wait time for predicate. Defaults to TIMEOUT. + interval (float, optional): Interval to check predicate. Defaults to DELAY. + + Raises: + TimeoutError: _description_ + """ + start = monotonic() + while not predicate(): + if monotonic() - start > timeout: + raise TimeoutError(err) + sleep(interval) diff --git a/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py b/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py new file mode 100644 index 000000000..3f8af315d --- /dev/null +++ b/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py @@ -0,0 +1,326 @@ +import json +import os +from importlib.metadata import version +from logging import getLogger +from typing import Any, Dict, Iterable, List, Optional, Type + +from crewai.tools import BaseTool, EnvVar +from openai import AzureOpenAI, Client +from pydantic import BaseModel, Field + +from crewai_tools.tools.mongodb_vector_search_tool.utils import ( + create_vector_search_index, +) + +try: + import pymongo # noqa: F403 + + MONGODB_AVAILABLE = True +except ImportError: + MONGODB_AVAILABLE = False + +logger = getLogger(__name__) + + +class MongoDBVectorSearchConfig(BaseModel): + """Configuration for MongoDB vector search queries.""" + + limit: Optional[int] = Field( + default=4, description="number of documents to return." + ) + pre_filter: Optional[dict[str, Any]] = Field( + default=None, + description="List of MQL match expressions comparing an indexed field", + ) + post_filter_pipeline: Optional[list[dict]] = Field( + default=None, + description="Pipeline of MongoDB aggregation stages to filter/process results after $vectorSearch.", + ) + oversampling_factor: int = Field( + default=10, + description="Multiple of limit used when generating number of candidates at each step in the HNSW Vector Search", + ) + include_embeddings: bool = Field( + default=False, + description="Whether to include the embedding vector of each result in metadata.", + ) + + +class MongoDBToolSchema(MongoDBVectorSearchConfig): + """Input for MongoDBTool.""" + + query: str = Field( + ..., + description="The query to search retrieve relevant information from the MongoDB database. Pass only the query, not the question.", + ) + + +class MongoDBVectorSearchTool(BaseTool): + """Tool to perfrom a vector search the MongoDB database""" + + name: str = "MongoDBVectorSearchTool" + description: str = "A tool to perfrom a vector search on a MongoDB database for relevant information on internal documents." + + args_schema: Type[BaseModel] = MongoDBToolSchema + query_config: Optional[MongoDBVectorSearchConfig] = Field( + default=None, description="MongoDB Vector Search query configuration" + ) + embedding_model: str = Field( + default="text-embedding-3-large", + description="Text OpenAI embedding model to use", + ) + vector_index_name: str = Field( + default="vector_index", description="Name of the Atlas Search vector index" + ) + text_key: str = Field( + default="text", + description="MongoDB field that will contain the text for each document", + ) + embedding_key: str = Field( + default="embedding", + description="Field that will contain the embedding for each document", + ) + database_name: str = Field(..., description="The name of the MongoDB database") + collection_name: str = Field(..., description="The name of the MongoDB collection") + connection_string: str = Field( + ..., + description="The connection string of the MongoDB cluster", + ) + dimensions: int = Field( + default=1536, + description="Number of dimensions in the embedding vector", + ) + env_vars: List[EnvVar] = [ + EnvVar( + name="BROWSERBASE_API_KEY", + description="API key for Browserbase services", + required=False, + ), + EnvVar( + name="BROWSERBASE_PROJECT_ID", + description="Project ID for Browserbase services", + required=False, + ), + ] + package_dependencies: List[str] = ["mongdb"] + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if not MONGODB_AVAILABLE: + import click + + if click.confirm( + "You are missing the 'mongodb' crewai tool. Would you like to install it?" + ): + import subprocess + + subprocess.run(["uv", "add", "pymongo"], check=True) + + else: + raise ImportError("You are missing the 'mongodb' crewai tool.") + + if "AZURE_OPENAI_ENDPOINT" in os.environ: + self._openai_client = AzureOpenAI() + elif "OPENAI_API_KEY" in os.environ: + self._openai_client = Client() + else: + raise ValueError( + "OPENAI_API_KEY environment variable is required for MongoDBVectorSearchTool and it is mandatory to use the tool." + ) + + from pymongo import MongoClient + from pymongo.driver_info import DriverInfo + + self._client = MongoClient( + self.connection_string, + driver=DriverInfo(name="CrewAI", version=version("crewai-tools")), + ) + self._coll = self._client[self.database_name][self.collection_name] + + def create_vector_search_index( + self, + *, + dimensions: int, + relevance_score_fn: str = "cosine", + auto_index_timeout: int = 15, + ) -> None: + """Convenience function to create a vector search index. + + Args: + dimensions: Number of dimensions in embedding. If the value is set and + the index does not exist, an index will be created. + relevance_score_fn: The similarity score used for the index + Currently supported: 'euclidean', 'cosine', and 'dotProduct' + auto_index_timeout: Timeout in seconds to wait for an auto-created index + to be ready. + """ + + create_vector_search_index( + collection=self._coll, + index_name=self.vector_index_name, + dimensions=dimensions, + path=self.embedding_key, + similarity=relevance_score_fn, + wait_until_complete=auto_index_timeout, + ) + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict[str, Any]]] = None, + ids: Optional[List[str]] = None, + batch_size: int = 100, + **kwargs: Any, + ) -> List[str]: + """Add texts, create embeddings, and add to the Collection and index. + + Important notes on ids: + - If _id or id is a key in the metadatas dicts, one must + pop them and provide as separate list. + - They must be unique. + - If they are not provided, the VectorStore will create unique ones, + stored as bson.ObjectIds internally, and strings in Langchain. + These will appear in Document.metadata with key, '_id'. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + ids: Optional list of unique ids that will be used as index in VectorStore. + See note on ids. + batch_size: Number of documents to insert at a time. + Tuning this may help with performance and sidestep MongoDB limits. + + Returns: + List of ids added to the vectorstore. + """ + from bson import ObjectId + + _metadatas = metadatas or [{} for _ in texts] + ids = [str(ObjectId()) for _ in range(len(list(texts)))] + metadatas_batch = _metadatas + + result_ids = [] + texts_batch = [] + metadatas_batch = [] + size = 0 + i = 0 + for j, (text, metadata) in enumerate(zip(texts, _metadatas)): + size += len(text) + len(metadata) + texts_batch.append(text) + metadatas_batch.append(metadata) + if (j + 1) % batch_size == 0 or size >= 47_000_000: + batch_res = self._bulk_embed_and_insert_texts( + texts_batch, metadatas_batch, ids[i : j + 1] + ) + result_ids.extend(batch_res) + texts_batch = [] + metadatas_batch = [] + size = 0 + i = j + 1 + if texts_batch: + batch_res = self._bulk_embed_and_insert_texts( + texts_batch, metadatas_batch, ids[i : j + 1] + ) + result_ids.extend(batch_res) + return result_ids + + def _embed_texts(self, texts: List[str]) -> List[List[float]]: + return [ + i.embedding + for i in self._openai_client.embeddings.create( + input=texts, + model=self.embedding_model, + dimensions=self.dimensions, + ).data + ] + + def _bulk_embed_and_insert_texts( + self, + texts: List[str], + metadatas: List[dict], + ids: List[str], + ) -> List[str]: + """Bulk insert single batch of texts, embeddings, and ids.""" + from bson import ObjectId + from pymongo.operations import ReplaceOne + + if not texts: + return [] + # Compute embedding vectors + embeddings = self._embed_texts(texts) + docs = [ + { + "_id": ObjectId(i), + self.text_key: t, + self.embedding_key: embedding, + **m, + } + for i, t, m, embedding in zip(ids, texts, metadatas, embeddings) + ] + operations = [ReplaceOne({"_id": doc["_id"]}, doc, upsert=True) for doc in docs] + # insert the documents in MongoDB Atlas + result = self._coll.bulk_write(operations) + assert result.upserted_ids is not None + return [str(_id) for _id in result.upserted_ids.values()] + + def _run(self, query: str) -> str: + try: + query_config = self.query_config or MongoDBVectorSearchConfig() + limit = query_config.limit + oversampling_factor = query_config.oversampling_factor + pre_filter = query_config.pre_filter + include_embeddings = query_config.include_embeddings + post_filter_pipeline = query_config.post_filter_pipeline + + # Create the embedding for the query + query_vector = self._embed_texts([query])[0] + + # Atlas Vector Search, potentially with filter + stage = { + "index": self.vector_index_name, + "path": self.embedding_key, + "queryVector": query_vector, + "numCandidates": limit * oversampling_factor, + "limit": limit, + } + if pre_filter: + stage["filter"] = pre_filter + + pipeline = [ + {"$vectorSearch": stage}, + {"$set": {"score": {"$meta": "vectorSearchScore"}}}, + ] + + # Remove embeddings unless requested + if not include_embeddings: + pipeline.append({"$project": {self.embedding_key: 0}}) + + # Post-processing + if post_filter_pipeline is not None: + pipeline.extend(post_filter_pipeline) + + # Execution + cursor = self._coll.aggregate(pipeline) # type: ignore[arg-type] + docs = [] + + # Format + for doc in cursor: + docs.append(doc) + return json.dumps(docs) + except Exception as e: + logger.error(f"Error: {e}") + return "" + + def __del__(self): + """Cleanup clients on deletion.""" + try: + if hasattr(self, "_client") and self._client: + self._client.close() + except Exception as e: + logger.error(f"Error: {e}") + + try: + if hasattr(self, "_openai_client") and self._openai_client: + self._openai_client.close() + except Exception as e: + logger.error(f"Error: {e}") diff --git a/tests/test_generate_tool_specs.py b/tests/test_generate_tool_specs.py index 73034a174..eeb407be1 100644 --- a/tests/test_generate_tool_specs.py +++ b/tests/test_generate_tool_specs.py @@ -1,12 +1,13 @@ import json from typing import List, Optional, Type - -import pytest -from pydantic import BaseModel, Field from unittest import mock -from generate_tool_specs import ToolSpecExtractor +import pytest from crewai.tools.base_tool import BaseTool, EnvVar +from pydantic import BaseModel, Field + +from generate_tool_specs import ToolSpecExtractor + class MockToolSchema(BaseModel): query: str = Field(..., description="The query parameter") @@ -19,15 +20,30 @@ class MockTool(BaseTool): description: str = "A tool that mocks search functionality" args_schema: Type[BaseModel] = MockToolSchema - another_parameter: str = Field("Another way to define a default value", description="") + another_parameter: str = Field( + "Another way to define a default value", description="" + ) my_parameter: str = Field("This is default value", description="What a description") my_parameter_bool: bool = Field(False) - package_dependencies: List[str] = Field(["this-is-a-required-package", "another-required-package"], description="") + package_dependencies: List[str] = Field( + ["this-is-a-required-package", "another-required-package"], description="" + ) env_vars: List[EnvVar] = [ - EnvVar(name="SERPER_API_KEY", description="API key for Serper", required=True, default=None), - EnvVar(name="API_RATE_LIMIT", description="API rate limit", required=False, default="100") + EnvVar( + name="SERPER_API_KEY", + description="API key for Serper", + required=True, + default=None, + ), + EnvVar( + name="API_RATE_LIMIT", + description="API rate limit", + required=False, + default="100", + ), ] + @pytest.fixture def extractor(): ext = ToolSpecExtractor() @@ -37,7 +53,7 @@ def extractor(): def test_unwrap_schema(extractor): nested_schema = { "type": "function-after", - "schema": {"type": "default", "schema": {"type": "str", "value": "test"}} + "schema": {"type": "default", "schema": {"type": "str", "value": "test"}}, } result = extractor._unwrap_schema(nested_schema) assert result["type"] == "str" @@ -46,12 +62,15 @@ def test_unwrap_schema(extractor): @pytest.fixture def mock_tool_extractor(extractor): - with mock.patch("generate_tool_specs.dir", return_value=["MockTool"]), \ - mock.patch("generate_tool_specs.getattr", return_value=MockTool): + with ( + mock.patch("generate_tool_specs.dir", return_value=["MockTool"]), + mock.patch("generate_tool_specs.getattr", return_value=MockTool), + ): extractor.extract_all_tools() assert len(extractor.tools_spec) == 1 return extractor.tools_spec[0] + def test_extract_basic_tool_info(mock_tool_extractor): tool_info = mock_tool_extractor @@ -69,6 +88,7 @@ def test_extract_basic_tool_info(mock_tool_extractor): assert tool_info["humanized_name"] == "Mock Search Tool" assert tool_info["description"] == "A tool that mocks search functionality" + def test_extract_init_params_schema(mock_tool_extractor): tool_info = mock_tool_extractor init_params_schema = tool_info["init_params_schema"] @@ -80,20 +100,21 @@ def test_extract_init_params_schema(mock_tool_extractor): "type", } - another_parameter = init_params_schema['properties']['another_parameter'] + another_parameter = init_params_schema["properties"]["another_parameter"] assert another_parameter["description"] == "" assert another_parameter["default"] == "Another way to define a default value" assert another_parameter["type"] == "string" - my_parameter = init_params_schema['properties']['my_parameter'] + my_parameter = init_params_schema["properties"]["my_parameter"] assert my_parameter["description"] == "What a description" assert my_parameter["default"] == "This is default value" assert my_parameter["type"] == "string" - my_parameter_bool = init_params_schema['properties']['my_parameter_bool'] + my_parameter_bool = init_params_schema["properties"]["my_parameter_bool"] assert my_parameter_bool["default"] == False assert my_parameter_bool["type"] == "boolean" + def test_extract_env_vars(mock_tool_extractor): tool_info = mock_tool_extractor @@ -109,6 +130,7 @@ def test_extract_env_vars(mock_tool_extractor): assert rate_limit_var["required"] == False assert rate_limit_var["default"] == "100" + def test_extract_run_params_schema(mock_tool_extractor): tool_info = mock_tool_extractor @@ -131,22 +153,31 @@ def test_extract_run_params_schema(mock_tool_extractor): filters_param = run_params_schema["properties"]["filters"] assert filters_param["description"] == "Optional filters to apply" assert filters_param["default"] == None - assert filters_param['anyOf'] == [{'items': {'type': 'string'}, 'type': 'array'}, {'type': 'null'}] + assert filters_param["anyOf"] == [ + {"items": {"type": "string"}, "type": "array"}, + {"type": "null"}, + ] + def test_extract_package_dependencies(mock_tool_extractor): tool_info = mock_tool_extractor - assert tool_info["package_dependencies"] == ["this-is-a-required-package", "another-required-package"] + assert tool_info["package_dependencies"] == [ + "this-is-a-required-package", + "another-required-package", + ] def test_save_to_json(extractor, tmp_path): - extractor.tools_spec = [{ - "name": "TestTool", - "humanized_name": "Test Tool", - "description": "A test tool", - "run_params_schema": [ - {"name": "param1", "description": "Test parameter", "type": "str"} - ] - }] + extractor.tools_spec = [ + { + "name": "TestTool", + "humanized_name": "Test Tool", + "description": "A test tool", + "run_params_schema": [ + {"name": "param1", "description": "Test parameter", "type": "str"} + ], + } + ] file_path = tmp_path / "output.json" extractor.save_to_json(str(file_path)) diff --git a/tests/tools/test_mongodb_vector_search_tool.py b/tests/tools/test_mongodb_vector_search_tool.py new file mode 100644 index 000000000..b76debbde --- /dev/null +++ b/tests/tools/test_mongodb_vector_search_tool.py @@ -0,0 +1,75 @@ +import json +from unittest.mock import patch + +import pytest + +from crewai_tools import MongoDBVectorSearchConfig, MongoDBVectorSearchTool + + +# Unit Test Fixtures +@pytest.fixture +def mongodb_vector_search_tool(): + tool = MongoDBVectorSearchTool( + connection_string="foo", database_name="bar", collection_name="test" + ) + tool._embed_texts = lambda x: [[0.1]] + yield tool + + +# Unit Tests +def test_successful_query_execution(mongodb_vector_search_tool): + # Enable embedding + with patch.object(mongodb_vector_search_tool._coll, "aggregate") as mock_aggregate: + mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)] + + results = json.loads(mongodb_vector_search_tool._run(query="sandwiches")) + + assert len(results) == 1 + assert results[0]["text"] == "foo" + assert results[0]["_id"] == 1 + + +def test_provide_config(): + query_config = MongoDBVectorSearchConfig(limit=10) + tool = MongoDBVectorSearchTool( + connection_string="foo", + database_name="bar", + collection_name="test", + query_config=query_config, + vector_index_name="foo", + embedding_model="bar", + ) + tool._embed_texts = lambda x: [[0.1]] + with patch.object(tool._coll, "aggregate") as mock_aggregate: + mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)] + + tool._run(query="sandwiches") + assert mock_aggregate.mock_calls[-1].args[0][0]["$vectorSearch"]["limit"] == 10 + + mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)] + + +def test_cleanup_on_deletion(mongodb_vector_search_tool): + with patch.object(mongodb_vector_search_tool, "_client") as mock_client: + # Trigger cleanup + mongodb_vector_search_tool.__del__() + + mock_client.close.assert_called_once() + + +def test_create_search_index(mongodb_vector_search_tool): + with patch( + "crewai_tools.tools.mongodb_vector_search_tool.vector_search.create_vector_search_index" + ) as mock_create_search_index: + mongodb_vector_search_tool.create_vector_search_index(dimensions=10) + kwargs = mock_create_search_index.mock_calls[0].kwargs + assert kwargs["dimensions"] == 10 + assert kwargs["similarity"] == "cosine" + + +def test_add_texts(mongodb_vector_search_tool): + with patch.object(mongodb_vector_search_tool._coll, "bulk_write") as bulk_write: + mongodb_vector_search_tool.add_texts(["foo"]) + args = bulk_write.mock_calls[0].args + assert "ReplaceOne" in str(args[0][0]) + assert "foo" in str(args[0][0]) From eb09f2718fdc23e911ca8500bada4a1161d03776 Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Wed, 9 Jul 2025 15:03:16 -0300 Subject: [PATCH 356/391] Use environment variable to initialize `scrapegraph_py.Client` (#362) This commit fixes a bug where `SCRAPEGRAPH_API_KEY` were never used to initialize `scrapegraph_py.Client`. --- .../tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py index 04a544fa6..34f42e52e 100644 --- a/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py +++ b/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py @@ -102,9 +102,8 @@ class ScrapegraphScrapeTool(BaseTool): "`scrapegraph-py` package not found, please run `uv add scrapegraph-py`" ) - self._client = Client(api_key=api_key) - self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY") + self._client = Client(api_key=self.api_key) if not self.api_key: raise ValueError("Scrapegraph API key is required") From 78f5144bde44fee525d2b68f39bd43c4912579f2 Mon Sep 17 00:00:00 2001 From: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com> Date: Fri, 11 Jul 2025 10:18:54 -0700 Subject: [PATCH 357/391] =?UTF-8?q?Enhance=20EnterpriseActionTool=20with?= =?UTF-8?q?=20improved=20schema=20processing=20and=20erro=E2=80=A6=20(#371?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhance EnterpriseActionTool with improved schema processing and error handling - Added methods for sanitizing names and processing schema types, including support for nested models and nullable types. - Improved error handling during schema creation and processing, with warnings for failures. - Updated parameter handling in the `_run` method to clean up `kwargs` before sending requests. - Introduced a detailed description generation for nested schema structures to enhance tool documentation. * Add tests for EnterpriseActionTool schema conversion and validation - Introduced a new test class for validating complex nested schemas in EnterpriseActionTool. - Added tests for schema conversion, optional fields, enum validation, and required nested fields. - Implemented execution tests to ensure the tool can handle complex validated input correctly. - Verified model naming conventions and added tests for simpler schemas with basic enum validation. - Enhanced overall test coverage for the EnterpriseActionTool functionality. * Update chromadb dependency version in pyproject.toml and uv.lock - Changed chromadb version from >=0.4.22 to ==0.5.23 in both pyproject.toml and uv.lock to ensure compatibility and stability. * Update test workflow configuration - Changed EMBEDCHAIN_DB_URI to point to a temporary test database location. - Added CHROMA_PERSIST_PATH for specifying the path to the Chroma test database. - Cleaned up the test run command in the workflow file. * reverted --- .../adapters/enterprise_adapter.py | 258 ++++++++++++----- tests/tools/crewai_enterprise_tools_test.py | 268 ++++++++++++++++++ 2 files changed, 456 insertions(+), 70 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 96d64af8b..bd442d98f 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -1,9 +1,11 @@ import os import json import requests -from typing import List, Any, Dict, Optional +from typing import List, Any, Dict, Literal, Optional, Union, get_origin from pydantic import Field, create_model from crewai.tools import BaseTool +import re + # DEFAULTS ENTERPRISE_ACTION_KIT_PROJECT_ID = "dd525517-df22-49d2-a69e-6a0eed211166" @@ -37,6 +39,9 @@ class EnterpriseActionTool(BaseTool): enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, ): + self._model_registry = {} + self._base_name = self._sanitize_name(name) + schema_props, required = self._extract_schema_info(action_schema) # Define field definitions for the model @@ -44,22 +49,36 @@ class EnterpriseActionTool(BaseTool): for param_name, param_details in schema_props.items(): param_desc = param_details.get("description", "") is_required = param_name in required - is_nullable, param_type = self._analyze_field_type(param_details) - # Create field definition based on nullable and required status + try: + field_type = self._process_schema_type( + param_details, self._sanitize_name(param_name).title() + ) + except Exception as e: + print(f"Warning: Could not process schema for {param_name}: {e}") + field_type = str + + # Create field definition based on requirement field_definitions[param_name] = self._create_field_definition( - param_type, is_required, is_nullable, param_desc + field_type, is_required, param_desc ) # Create the model if field_definitions: - args_schema = create_model( - f"{name.capitalize()}Schema", **field_definitions - ) + try: + args_schema = create_model( + f"{self._base_name}Schema", **field_definitions + ) + except Exception as e: + print(f"Warning: Could not create main schema model: {e}") + args_schema = create_model( + f"{self._base_name}Schema", + input_text=(str, Field(description="Input for the action")), + ) else: # Fallback for empty schema args_schema = create_model( - f"{name.capitalize()}Schema", + f"{self._base_name}Schema", input_text=(str, Field(description="Input for the action")), ) @@ -73,6 +92,12 @@ class EnterpriseActionTool(BaseTool): if enterprise_action_kit_project_url is not None: self.enterprise_action_kit_project_url = enterprise_action_kit_project_url + def _sanitize_name(self, name: str) -> str: + """Sanitize names to create proper Python class names.""" + sanitized = re.sub(r"[^a-zA-Z0-9_]", "", name) + parts = sanitized.split("_") + return "".join(word.capitalize() for word in parts if word) + def _extract_schema_info( self, action_schema: Dict[str, Any] ) -> tuple[Dict[str, Any], List[str]]: @@ -87,51 +112,97 @@ class EnterpriseActionTool(BaseTool): ) return schema_props, required - def _analyze_field_type(self, param_details: Dict[str, Any]) -> tuple[bool, type]: - """Analyze field type and nullability from parameter details.""" - is_nullable = False - param_type = str # Default type - - if "anyOf" in param_details: - any_of_types = param_details["anyOf"] + def _process_schema_type(self, schema: Dict[str, Any], type_name: str) -> type: + """Process a JSON schema and return appropriate Python type.""" + if "anyOf" in schema: + any_of_types = schema["anyOf"] is_nullable = any(t.get("type") == "null" for t in any_of_types) non_null_types = [t for t in any_of_types if t.get("type") != "null"] - if non_null_types: - first_type = non_null_types[0].get("type", "string") - param_type = self._map_json_type_to_python( - first_type, non_null_types[0] - ) - else: - json_type = param_details.get("type", "string") - param_type = self._map_json_type_to_python(json_type, param_details) - is_nullable = json_type == "null" - return is_nullable, param_type + if non_null_types: + base_type = self._process_schema_type(non_null_types[0], type_name) + return Optional[base_type] if is_nullable else base_type + return Optional[str] + + if "oneOf" in schema: + return self._process_schema_type(schema["oneOf"][0], type_name) + + if "allOf" in schema: + return self._process_schema_type(schema["allOf"][0], type_name) + + json_type = schema.get("type", "string") + + if "enum" in schema: + enum_values = schema["enum"] + if not enum_values: + return self._map_json_type_to_python(json_type) + return Literal[tuple(enum_values)] # type: ignore + + if json_type == "array": + items_schema = schema.get("items", {"type": "string"}) + item_type = self._process_schema_type(items_schema, f"{type_name}Item") + return List[item_type] + + if json_type == "object": + return self._create_nested_model(schema, type_name) + + return self._map_json_type_to_python(json_type) + + def _create_nested_model(self, schema: Dict[str, Any], model_name: str) -> type: + """Create a nested Pydantic model for complex objects.""" + full_model_name = f"{self._base_name}{model_name}" + + if full_model_name in self._model_registry: + return self._model_registry[full_model_name] + + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + if not properties: + return dict + + field_definitions = {} + for prop_name, prop_schema in properties.items(): + prop_desc = prop_schema.get("description", "") + is_required = prop_name in required_fields + + try: + prop_type = self._process_schema_type( + prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}" + ) + except Exception as e: + print(f"Warning: Could not process schema for {prop_name}: {e}") + prop_type = str + + field_definitions[prop_name] = self._create_field_definition( + prop_type, is_required, prop_desc + ) + + try: + nested_model = create_model(full_model_name, **field_definitions) + self._model_registry[full_model_name] = nested_model + return nested_model + except Exception as e: + print(f"Warning: Could not create nested model {full_model_name}: {e}") + return dict def _create_field_definition( - self, param_type: type, is_required: bool, is_nullable: bool, param_desc: str + self, field_type: type, is_required: bool, description: str ) -> tuple: - """Create Pydantic field definition based on type, requirement, and nullability.""" - if is_nullable: - return ( - Optional[param_type], - Field(default=None, description=param_desc), - ) - elif is_required: - return ( - param_type, - Field(description=param_desc), - ) + """Create Pydantic field definition based on type and requirement.""" + if is_required: + return (field_type, Field(description=description)) else: - return ( - Optional[param_type], - Field(default=None, description=param_desc), - ) + if get_origin(field_type) is Union: + return (field_type, Field(default=None, description=description)) + else: + return ( + Optional[field_type], + Field(default=None, description=description), + ) - def _map_json_type_to_python( - self, json_type: str, param_details: Dict[str, Any] - ) -> type: - """Map JSON schema types to Python types.""" + def _map_json_type_to_python(self, json_type: str) -> type: + """Map basic JSON schema types to Python types.""" type_mapping = { "string": str, "integer": int, @@ -139,6 +210,7 @@ class EnterpriseActionTool(BaseTool): "boolean": bool, "array": list, "object": dict, + "null": type(None), } return type_mapping.get(json_type, str) @@ -149,29 +221,37 @@ class EnterpriseActionTool(BaseTool): required_nullable_fields = [] for param_name in required: param_details = schema_props.get(param_name, {}) - is_nullable, _ = self._analyze_field_type(param_details) - if is_nullable: + if self._is_nullable_type(param_details): required_nullable_fields.append(param_name) return required_nullable_fields + def _is_nullable_type(self, schema: Dict[str, Any]) -> bool: + """Check if a schema represents a nullable type.""" + if "anyOf" in schema: + return any(t.get("type") == "null" for t in schema["anyOf"]) + return schema.get("type") == "null" + def _run(self, **kwargs) -> str: """Execute the specific enterprise action with validated parameters.""" try: + cleaned_kwargs = {} + for key, value in kwargs.items(): + if value is not None: + cleaned_kwargs[key] = value + required_nullable_fields = self._get_required_nullable_fields() for field_name in required_nullable_fields: - if field_name not in kwargs: - kwargs[field_name] = None - - params = {k: v for k, v in kwargs.items() if v is not None} + if field_name not in cleaned_kwargs: + cleaned_kwargs[field_name] = None api_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" headers = { "Authorization": f"Bearer {self.enterprise_action_token}", "Content-Type": "application/json", } - payload = {"action": self.action_name, "parameters": params} + payload = {"action": self.action_name, "parameters": cleaned_kwargs} response = requests.post( url=api_url, headers=headers, json=payload, timeout=60 @@ -198,7 +278,6 @@ class EnterpriseActionKitToolAdapter: enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, ): """Initialize the adapter with an enterprise action token.""" - self.enterprise_action_token = enterprise_action_token self._actions_schema = {} self._tools = None @@ -206,11 +285,7 @@ class EnterpriseActionKitToolAdapter: self.enterprise_action_kit_project_url = enterprise_action_kit_project_url def tools(self) -> List[BaseTool]: - """Get the list of tools created from enterprise actions. - - Returns: - List of BaseTool instances, one for each enterprise action. - """ + """Get the list of tools created from enterprise actions.""" if self._tools is None: self._fetch_actions() self._create_tools() @@ -261,6 +336,53 @@ class EnterpriseActionKitToolAdapter: traceback.print_exc() + def _generate_detailed_description( + self, schema: Dict[str, Any], indent: int = 0 + ) -> List[str]: + """Generate detailed description for nested schema structures.""" + descriptions = [] + indent_str = " " * indent + + schema_type = schema.get("type", "string") + + if schema_type == "object": + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + if properties: + descriptions.append(f"{indent_str}Object with properties:") + for prop_name, prop_schema in properties.items(): + prop_desc = prop_schema.get("description", "") + is_required = prop_name in required_fields + req_str = " (required)" if is_required else " (optional)" + descriptions.append( + f"{indent_str} - {prop_name}: {prop_desc}{req_str}" + ) + + if prop_schema.get("type") == "object": + descriptions.extend( + self._generate_detailed_description(prop_schema, indent + 2) + ) + elif prop_schema.get("type") == "array": + items_schema = prop_schema.get("items", {}) + if items_schema.get("type") == "object": + descriptions.append(f"{indent_str} Array of objects:") + descriptions.extend( + self._generate_detailed_description( + items_schema, indent + 3 + ) + ) + elif "enum" in items_schema: + descriptions.append( + f"{indent_str} Array of enum values: {items_schema['enum']}" + ) + elif "enum" in prop_schema: + descriptions.append( + f"{indent_str} Enum values: {prop_schema['enum']}" + ) + + return descriptions + def _create_tools(self): """Create BaseTool instances for each action.""" tools = [] @@ -269,19 +391,16 @@ class EnterpriseActionKitToolAdapter: function_details = action_schema.get("function", {}) description = function_details.get("description", f"Execute {action_name}") - # Get parameter info for a better description - parameters = function_details.get("parameters", {}).get("properties", {}) - param_info = [] - for param_name, param_details in parameters.items(): - param_desc = param_details.get("description", "") - required = param_name in function_details.get("parameters", {}).get( - "required", [] - ) - param_info.append( - f"- {param_name}: {param_desc} {'(required)' if required else '(optional)'}" + parameters = function_details.get("parameters", {}) + param_descriptions = [] + + if parameters.get("properties"): + param_descriptions.append("\nDetailed Parameter Structure:") + param_descriptions.extend( + self._generate_detailed_description(parameters) ) - full_description = f"{description}\n\nParameters:\n" + "\n".join(param_info) + full_description = description + "\n".join(param_descriptions) tool = EnterpriseActionTool( name=action_name.lower().replace(" ", "_"), @@ -297,7 +416,6 @@ class EnterpriseActionKitToolAdapter: self._tools = tools - # Adding context manager support for convenience, but direct usage is also supported def __enter__(self): return self.tools() diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py index d7a868472..b043289dc 100644 --- a/tests/tools/crewai_enterprise_tools_test.py +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -2,9 +2,11 @@ import os import unittest from unittest.mock import patch, MagicMock + from crewai.tools import BaseTool from crewai_tools.tools import CrewaiEnterpriseTools from crewai_tools.adapters.tool_collection import ToolCollection +from crewai_tools.adapters.enterprise_adapter import EnterpriseActionTool class TestCrewaiEnterpriseTools(unittest.TestCase): @@ -86,3 +88,269 @@ class TestCrewaiEnterpriseTools(unittest.TestCase): self.assertEqual(len(tools), 2) self.assertEqual(tools[0].name, "tool1") self.assertEqual(tools[1].name, "tool3") + + +class TestEnterpriseActionToolSchemaConversion(unittest.TestCase): + """Test the enterprise action tool schema conversion and validation.""" + + def setUp(self): + self.test_schema = { + "type": "function", + "function": { + "name": "TEST_COMPLEX_ACTION", + "description": "Test action with complex nested structure", + "parameters": { + "type": "object", + "properties": { + "filterCriteria": { + "type": "object", + "description": "Filter criteria object", + "properties": { + "operation": {"type": "string", "enum": ["AND", "OR"]}, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "field": { + "type": "string", + "enum": ["name", "email", "status"], + }, + "operator": { + "type": "string", + "enum": ["equals", "contains"], + }, + "value": {"type": "string"}, + }, + "required": ["field", "operator", "value"], + }, + }, + }, + "required": ["operation", "rules"], + }, + "options": { + "type": "object", + "properties": { + "limit": {"type": "integer"}, + "offset": {"type": "integer"}, + }, + "required": [], + }, + }, + "required": [], + }, + }, + } + + def test_complex_schema_conversion(self): + """Test that complex nested schemas are properly converted to Pydantic models.""" + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + self.assertEqual(tool.name, "gmail_search_for_email") + self.assertEqual(tool.action_name, "GMAIL_SEARCH_FOR_EMAIL") + + schema_class = tool.args_schema + self.assertIsNotNone(schema_class) + + schema_fields = schema_class.model_fields + self.assertIn("filterCriteria", schema_fields) + self.assertIn("options", schema_fields) + + # Test valid input structure + valid_input = { + "filterCriteria": { + "operation": "AND", + "rules": [ + {"field": "name", "operator": "contains", "value": "test"}, + {"field": "status", "operator": "equals", "value": "active"}, + ], + }, + "options": {"limit": 10}, + } + + # This should not raise an exception + validated_input = schema_class(**valid_input) + self.assertIsNotNone(validated_input.filterCriteria) + self.assertIsNotNone(validated_input.options) + + def test_optional_fields_validation(self): + """Test that optional fields work correctly.""" + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + schema_class = tool.args_schema + + minimal_input = {} + validated_input = schema_class(**minimal_input) + self.assertIsNone(validated_input.filterCriteria) + self.assertIsNone(validated_input.options) + + partial_input = {"options": {"limit": 10}} + validated_input = schema_class(**partial_input) + self.assertIsNone(validated_input.filterCriteria) + self.assertIsNotNone(validated_input.options) + + def test_enum_validation(self): + """Test that enum values are properly validated.""" + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + schema_class = tool.args_schema + + invalid_input = { + "filterCriteria": { + "operation": "INVALID_OPERATOR", + "rules": [], + } + } + + with self.assertRaises(Exception): + schema_class(**invalid_input) + + def test_required_nested_fields(self): + """Test that required fields in nested objects are validated.""" + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + schema_class = tool.args_schema + + incomplete_input = { + "filterCriteria": { + "operation": "OR", + "rules": [ + { + "field": "name", + "operator": "contains", + } + ], + } + } + + with self.assertRaises(Exception): + schema_class(**incomplete_input) + + @patch("requests.post") + def test_tool_execution_with_complex_input(self, mock_post): + """Test that the tool can execute with complex validated input.""" + mock_response = MagicMock() + mock_response.ok = True + mock_response.json.return_value = {"success": True, "results": []} + mock_post.return_value = mock_response + + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + tool._run( + filterCriteria={ + "operation": "OR", + "rules": [ + {"field": "name", "operator": "contains", "value": "test"}, + {"field": "status", "operator": "equals", "value": "active"}, + ], + }, + options={"limit": 10}, + ) + + mock_post.assert_called_once() + call_args = mock_post.call_args + payload = call_args[1]["json"] + + self.assertEqual(payload["action"], "GMAIL_SEARCH_FOR_EMAIL") + self.assertIn("filterCriteria", payload["parameters"]) + self.assertIn("options", payload["parameters"]) + self.assertEqual(payload["parameters"]["filterCriteria"]["operation"], "OR") + + def test_model_naming_convention(self): + """Test that generated model names follow proper conventions.""" + tool = EnterpriseActionTool( + name="gmail_search_for_email", + description="Test tool", + enterprise_action_token="test_token", + action_name="GMAIL_SEARCH_FOR_EMAIL", + action_schema=self.test_schema, + ) + + schema_class = tool.args_schema + self.assertIsNotNone(schema_class) + + self.assertTrue(schema_class.__name__.endswith("Schema")) + self.assertTrue(schema_class.__name__[0].isupper()) + + complex_input = { + "filterCriteria": { + "operation": "OR", + "rules": [ + {"field": "name", "operator": "contains", "value": "test"}, + {"field": "status", "operator": "equals", "value": "active"}, + ], + }, + "options": {"limit": 10}, + } + + validated = schema_class(**complex_input) + self.assertIsNotNone(validated.filterCriteria) + + def test_simple_schema_with_enums(self): + """Test a simpler schema with basic enum validation.""" + simple_schema = { + "type": "function", + "function": { + "name": "SIMPLE_TEST", + "description": "Simple test function", + "parameters": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": ["active", "inactive", "pending"], + }, + "priority": {"type": "integer", "enum": [1, 2, 3]}, + }, + "required": ["status"], + }, + }, + } + + tool = EnterpriseActionTool( + name="simple_test", + description="Simple test tool", + enterprise_action_token="test_token", + action_name="SIMPLE_TEST", + action_schema=simple_schema, + ) + + schema_class = tool.args_schema + + valid_input = {"status": "active", "priority": 2} + validated = schema_class(**valid_input) + self.assertEqual(validated.status, "active") + self.assertEqual(validated.priority, 2) + + with self.assertRaises(Exception): + schema_class(status="invalid_status") From 9f6002a9dd6083b99ac0e2c24f118b4e139f8334 Mon Sep 17 00:00:00 2001 From: Thiago Moretto <168731+thiagomoretto@users.noreply.github.com> Date: Mon, 14 Jul 2025 10:48:36 -0300 Subject: [PATCH 358/391] Declaring and make some tool configurable using env vars (#376) --- src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py | 7 +++++-- src/crewai_tools/tools/linkup/linkup_search_tool.py | 10 +++++++--- src/crewai_tools/tools/multion_tool/multion_tool.py | 8 ++++++-- .../oxylabs_amazon_product_scraper_tool.py | 6 +++++- .../oxylabs_amazon_search_scraper_tool.py | 6 +++++- .../oxylabs_google_search_scraper_tool.py | 6 +++++- .../oxylabs_universal_scraper_tool.py | 6 +++++- .../scrapfly_scrape_website_tool.py | 8 ++++++-- src/crewai_tools/tools/spider_tool/spider_tool.py | 5 ++++- 9 files changed, 48 insertions(+), 14 deletions(-) diff --git a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py index 1a96f62ff..ea6b19281 100644 --- a/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py +++ b/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py @@ -2,7 +2,7 @@ import os import secrets from typing import Any, Dict, List, Optional, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from openai import OpenAI from pydantic import BaseModel, Field @@ -33,6 +33,9 @@ class AIMindTool(BaseTool): datasources: Optional[List[Dict[str, Any]]] = None mind_name: Optional[str] = None package_dependencies: List[str] = ["minds-sdk"] + env_vars: List[EnvVar] = [ + EnvVar(name="MINDS_API_KEY", description="API key for AI-Minds", required=True), + ] def __init__(self, api_key: Optional[str] = None, **kwargs): super().__init__(**kwargs) @@ -85,4 +88,4 @@ class AIMindTool(BaseTool): stream=False, ) - return completion.choices[0].message.content \ No newline at end of file + return completion.choices[0].message.content diff --git a/src/crewai_tools/tools/linkup/linkup_search_tool.py b/src/crewai_tools/tools/linkup/linkup_search_tool.py index c35c7fac3..634ba2863 100644 --- a/src/crewai_tools/tools/linkup/linkup_search_tool.py +++ b/src/crewai_tools/tools/linkup/linkup_search_tool.py @@ -1,6 +1,7 @@ +import os from typing import Any, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar try: from linkup import LinkupClient @@ -24,8 +25,11 @@ class LinkupSearchTool(BaseTool): ) _client: LinkupClient = PrivateAttr() # type: ignore package_dependencies: List[str] = ["linkup-sdk"] + env_vars: List[EnvVar] = [ + EnvVar(name="LINKUP_API_KEY", description="API key for Linkup", required=True), + ] - def __init__(self, api_key: str): + def __init__(self, api_key: str | None = None): """ Initialize the tool with an API key. """ @@ -48,7 +52,7 @@ class LinkupSearchTool(BaseTool): "The 'linkup-sdk' package is required to use the LinkupSearchTool. " "Please install it with: uv add linkup-sdk" ) - self._client = LinkupClient(api_key=api_key) + self._client = LinkupClient(api_key=api_key or os.getenv("LINKUP_API_KEY")) def _run( self, query: str, depth: str = "standard", output_type: str = "searchResults" diff --git a/src/crewai_tools/tools/multion_tool/multion_tool.py b/src/crewai_tools/tools/multion_tool/multion_tool.py index 3c8d17819..cf652c324 100644 --- a/src/crewai_tools/tools/multion_tool/multion_tool.py +++ b/src/crewai_tools/tools/multion_tool/multion_tool.py @@ -1,8 +1,9 @@ """Multion tool spec.""" +import os from typing import Any, Optional, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar class MultiOnTool(BaseTool): @@ -17,6 +18,9 @@ class MultiOnTool(BaseTool): local: bool = False max_steps: int = 3 package_dependencies: List[str] = ["multion"] + env_vars: List[EnvVar] = [ + EnvVar(name="MULTION_API_KEY", description="API key for Multion", required=True), + ] def __init__( self, @@ -44,7 +48,7 @@ class MultiOnTool(BaseTool): ) self.session_id = None self.local = local - self.multion = MultiOn(api_key=api_key) + self.multion = MultiOn(api_key=api_key or os.getenv("MULTION_API_KEY")) self.max_steps = max_steps def _run( diff --git a/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py index d763fa86f..1d4146fcb 100644 --- a/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py +++ b/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py @@ -4,7 +4,7 @@ from importlib.metadata import version from platform import architecture, python_version from typing import Any, List, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field try: @@ -73,6 +73,10 @@ class OxylabsAmazonProductScraperTool(BaseTool): oxylabs_api: RealtimeClient config: OxylabsAmazonProductScraperConfig package_dependencies: List[str] = ["oxylabs"] + env_vars: List[EnvVar] = [ + EnvVar(name="OXYLABS_USERNAME", description="Username for Oxylabs", required=True), + EnvVar(name="OXYLABS_PASSWORD", description="Password for Oxylabs", required=True), + ] def __init__( self, diff --git a/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py index 9a113e93a..e659d244f 100644 --- a/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py +++ b/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py @@ -4,7 +4,7 @@ from importlib.metadata import version from platform import architecture, python_version from typing import Any, List, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field try: @@ -75,6 +75,10 @@ class OxylabsAmazonSearchScraperTool(BaseTool): oxylabs_api: RealtimeClient config: OxylabsAmazonSearchScraperConfig package_dependencies: List[str] = ["oxylabs"] + env_vars: List[EnvVar] = [ + EnvVar(name="OXYLABS_USERNAME", description="Username for Oxylabs", required=True), + EnvVar(name="OXYLABS_PASSWORD", description="Password for Oxylabs", required=True), + ] def __init__( self, diff --git a/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py index 7de1aaa2d..1096df098 100644 --- a/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py +++ b/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py @@ -4,7 +4,7 @@ from importlib.metadata import version from platform import architecture, python_version from typing import Any, List, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field try: @@ -78,6 +78,10 @@ class OxylabsGoogleSearchScraperTool(BaseTool): oxylabs_api: RealtimeClient config: OxylabsGoogleSearchScraperConfig package_dependencies: List[str] = ["oxylabs"] + env_vars: List[EnvVar] = [ + EnvVar(name="OXYLABS_USERNAME", description="Username for Oxylabs", required=True), + EnvVar(name="OXYLABS_PASSWORD", description="Password for Oxylabs", required=True), + ] def __init__( self, diff --git a/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py index 22d02f91f..05b174500 100644 --- a/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py +++ b/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py @@ -4,7 +4,7 @@ from importlib.metadata import version from platform import architecture, python_version from typing import Any, List, Type -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, ConfigDict, Field try: @@ -69,6 +69,10 @@ class OxylabsUniversalScraperTool(BaseTool): oxylabs_api: RealtimeClient config: OxylabsUniversalScraperConfig package_dependencies: List[str] = ["oxylabs"] + env_vars: List[EnvVar] = [ + EnvVar(name="OXYLABS_USERNAME", description="Username for Oxylabs", required=True), + EnvVar(name="OXYLABS_PASSWORD", description="Password for Oxylabs", required=True), + ] def __init__( self, diff --git a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py index 60fc75e16..38bdab2a0 100644 --- a/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py +++ b/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py @@ -1,7 +1,8 @@ +import os import logging from typing import Any, Dict, Literal, Optional, Type, List -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field logger = logging.getLogger(__file__) @@ -29,6 +30,9 @@ class ScrapflyScrapeWebsiteTool(BaseTool): api_key: str = None scrapfly: Optional[Any] = None package_dependencies: List[str] = ["scrapfly-sdk"] + env_vars: List[EnvVar] = [ + EnvVar(name="SCRAPFLY_API_KEY", description="API key for Scrapfly", required=True), + ] def __init__(self, api_key: str): super().__init__() @@ -47,7 +51,7 @@ class ScrapflyScrapeWebsiteTool(BaseTool): raise ImportError( "`scrapfly-sdk` package not found, please run `uv add scrapfly-sdk`" ) - self.scrapfly = ScrapflyClient(key=api_key) + self.scrapfly = ScrapflyClient(key=api_key or os.getenv("SCRAPFLY_API_KEY")) def _run( self, diff --git a/src/crewai_tools/tools/spider_tool/spider_tool.py b/src/crewai_tools/tools/spider_tool/spider_tool.py index 853833261..3aee6ef88 100644 --- a/src/crewai_tools/tools/spider_tool/spider_tool.py +++ b/src/crewai_tools/tools/spider_tool/spider_tool.py @@ -2,7 +2,7 @@ import logging from typing import Any, Dict, Literal, Optional, Type, List from urllib.parse import unquote, urlparse -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field logger = logging.getLogger(__file__) @@ -54,6 +54,9 @@ class SpiderTool(BaseTool): log_failures: bool = True config: SpiderToolConfig = SpiderToolConfig() package_dependencies: List[str] = ["spider-client"] + env_vars: List[EnvVar] = [ + EnvVar(name="SPIDER_API_KEY", description="API key for Spider.cloud", required=True), + ] def __init__( self, From 2c38d1d4487c0254fe066b983396d84dc294119e Mon Sep 17 00:00:00 2001 From: Piyush Jain Date: Fri, 18 Jul 2025 14:33:39 -0700 Subject: [PATCH 359/391] Bedrock AgentCore browser and code interpreter toolkits (#385) * Added browser and code tools * Added dependencies, moved imports inside class * Added instructions in README * Updated imports * Updated imports * Updated dependencies * Fix 'get_current_page' utilities for Browser tools * Support browser session cleanup from synchronous code * Update browser tool examples for new changes * Manually override _run->_arun and set nested loop when in crew event loop * Browser async example * update examples with uv * Fix toolkit fields for code interpreter * Update code interpreter examples * update uv.lock * Move nest_asyncio import --------- Co-authored-by: Michael Chin --- src/crewai_tools/aws/__init__.py | 19 +- src/crewai_tools/aws/bedrock/__init__.py | 9 +- .../aws/bedrock/browser/README.md | 158 +++++ .../aws/bedrock/browser/__init__.py | 3 + .../browser/browser_session_manager.py | 260 ++++++++ .../aws/bedrock/browser/browser_toolkit.py | 587 ++++++++++++++++++ src/crewai_tools/aws/bedrock/browser/utils.py | 43 ++ .../aws/bedrock/code_interpreter/README.md | 217 +++++++ .../aws/bedrock/code_interpreter/__init__.py | 3 + .../code_interpreter_toolkit.py | 543 ++++++++++++++++ 10 files changed, 1835 insertions(+), 7 deletions(-) create mode 100644 src/crewai_tools/aws/bedrock/browser/README.md create mode 100644 src/crewai_tools/aws/bedrock/browser/__init__.py create mode 100644 src/crewai_tools/aws/bedrock/browser/browser_session_manager.py create mode 100644 src/crewai_tools/aws/bedrock/browser/browser_toolkit.py create mode 100644 src/crewai_tools/aws/bedrock/browser/utils.py create mode 100644 src/crewai_tools/aws/bedrock/code_interpreter/README.md create mode 100644 src/crewai_tools/aws/bedrock/code_interpreter/__init__.py create mode 100644 src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py diff --git a/src/crewai_tools/aws/__init__.py b/src/crewai_tools/aws/__init__.py index dd01fd8fe..b2d279078 100644 --- a/src/crewai_tools/aws/__init__.py +++ b/src/crewai_tools/aws/__init__.py @@ -1,9 +1,16 @@ from .s3 import S3ReaderTool, S3WriterTool -from .bedrock import BedrockKBRetrieverTool, BedrockInvokeAgentTool +from .bedrock import ( + BedrockKBRetrieverTool, + BedrockInvokeAgentTool, + create_browser_toolkit, + create_code_interpreter_toolkit, +) __all__ = [ - 'S3ReaderTool', - 'S3WriterTool', - 'BedrockKBRetrieverTool', - 'BedrockInvokeAgentTool' -] \ No newline at end of file + "S3ReaderTool", + "S3WriterTool", + "BedrockKBRetrieverTool", + "BedrockInvokeAgentTool", + "create_browser_toolkit", + "create_code_interpreter_toolkit" +] diff --git a/src/crewai_tools/aws/bedrock/__init__.py b/src/crewai_tools/aws/bedrock/__init__.py index ded472062..58fc5bca9 100644 --- a/src/crewai_tools/aws/bedrock/__init__.py +++ b/src/crewai_tools/aws/bedrock/__init__.py @@ -1,4 +1,11 @@ from .knowledge_base.retriever_tool import BedrockKBRetrieverTool from .agents.invoke_agent_tool import BedrockInvokeAgentTool +from .browser import create_browser_toolkit +from .code_interpreter import create_code_interpreter_toolkit -__all__ = ["BedrockKBRetrieverTool", "BedrockInvokeAgentTool"] +__all__ = [ + "BedrockKBRetrieverTool", + "BedrockInvokeAgentTool", + "create_browser_toolkit", + "create_code_interpreter_toolkit" +] diff --git a/src/crewai_tools/aws/bedrock/browser/README.md b/src/crewai_tools/aws/bedrock/browser/README.md new file mode 100644 index 000000000..7f0188bbb --- /dev/null +++ b/src/crewai_tools/aws/bedrock/browser/README.md @@ -0,0 +1,158 @@ +# AWS Bedrock Browser Tools + +This toolkit provides a set of tools for interacting with web browsers through AWS Bedrock Browser. It enables your CrewAI agents to navigate websites, extract content, click elements, and more. + +## Features + +- Navigate to URLs and browse the web +- Extract text and hyperlinks from pages +- Click on elements using CSS selectors +- Navigate back through browser history +- Get information about the current webpage +- Multiple browser sessions with thread-based isolation + +## Installation + +Ensure you have the necessary dependencies: + +```bash +uv add crewai-tools bedrock-agentcore beautifulsoup4 playwright nest-asyncio +``` + +## Usage + +### Basic Usage + +```python +from crewai import Agent, Task, Crew, LLM +from crewai_tools.aws.bedrock.browser import create_browser_toolkit + +# Create the browser toolkit +toolkit, browser_tools = create_browser_toolkit(region="us-west-2") + +# Create the Bedrock LLM +llm = LLM( + model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0", + region_name="us-west-2", +) + +# Create a CrewAI agent that uses the browser tools +research_agent = Agent( + role="Web Researcher", + goal="Research and summarize web content", + backstory="You're an expert at finding information online.", + tools=browser_tools, + llm=llm +) + +# Create a task for the agent +research_task = Task( + description="Navigate to https://example.com and extract all text content. Summarize the main points.", + expected_output="A list of bullet points containing the most important information on https://example.com. Plus, a description of the tool calls used, and actions performed to get to the page.", + agent=research_agent +) + +# Create and run the crew +crew = Crew( + agents=[research_agent], + tasks=[research_task] +) +result = crew.kickoff() + +print(f"\n***Final result:***\n\n{result}") + +# Clean up browser resources when done +toolkit.sync_cleanup() +``` + +### Available Tools + +The toolkit provides the following tools: + +1. `navigate_browser` - Navigate to a URL +2. `click_element` - Click on an element using CSS selectors +3. `extract_text` - Extract all text from the current webpage +4. `extract_hyperlinks` - Extract all hyperlinks from the current webpage +5. `get_elements` - Get elements matching a CSS selector +6. `navigate_back` - Navigate to the previous page +7. `current_webpage` - Get information about the current webpage + +### Advanced Usage (with async) + +```python +import asyncio +from crewai import Agent, Task, Crew, LLM +from crewai_tools.aws.bedrock.browser import create_browser_toolkit + +async def main(): + + # Create the browser toolkit with specific AWS region + toolkit, browser_tools = create_browser_toolkit(region="us-west-2") + tools_by_name = toolkit.get_tools_by_name() + + # Create the Bedrock LLM + llm = LLM( + model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0", + region_name="us-west-2", + ) + + # Create agents with specific tools + navigator_agent = Agent( + role="Navigator", + goal="Find specific information across websites", + backstory="You navigate through websites to locate information.", + tools=[ + tools_by_name["navigate_browser"], + tools_by_name["click_element"], + tools_by_name["navigate_back"] + ], + llm=llm + ) + + content_agent = Agent( + role="Content Extractor", + goal="Extract and analyze webpage content", + backstory="You extract and analyze content from webpages.", + tools=[ + tools_by_name["extract_text"], + tools_by_name["extract_hyperlinks"], + tools_by_name["get_elements"] + ], + llm=llm + ) + + # Create tasks for the agents + navigation_task = Task( + description="Navigate to https://example.com, then click on the the 'More information...' link.", + expected_output="The status of the tool calls for this task.", + agent=navigator_agent, + ) + + extraction_task = Task( + description="Extract all text from the current page and summarize it.", + expected_output="The summary of the page, and a description of the tool calls used, and actions performed to get to the page.", + agent=content_agent, + ) + + # Create and run the crew + crew = Crew( + agents=[navigator_agent, content_agent], + tasks=[navigation_task, extraction_task] + ) + + result = await crew.kickoff_async() + + # Clean up browser resources when done + toolkit.sync_cleanup() + + return result + +if __name__ == "__main__": + result = asyncio.run(main()) + print(f"\n***Final result:***\n\n{result}") +``` + +## Requirements + +- AWS account with access to Bedrock AgentCore API +- Properly configured AWS credentials \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/browser/__init__.py b/src/crewai_tools/aws/bedrock/browser/__init__.py new file mode 100644 index 000000000..e82666ebc --- /dev/null +++ b/src/crewai_tools/aws/bedrock/browser/__init__.py @@ -0,0 +1,3 @@ +from .browser_toolkit import BrowserToolkit, create_browser_toolkit + +__all__ = ["BrowserToolkit", "create_browser_toolkit"] \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/browser/browser_session_manager.py b/src/crewai_tools/aws/bedrock/browser/browser_session_manager.py new file mode 100644 index 000000000..d4652c320 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/browser/browser_session_manager.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Dict, Tuple + +if TYPE_CHECKING: + from playwright.async_api import Browser as AsyncBrowser + from playwright.sync_api import Browser as SyncBrowser + from bedrock_agentcore.tools.browser_client import BrowserClient + +logger = logging.getLogger(__name__) + + +class BrowserSessionManager: + """ + Manages browser sessions for different threads. + + This class maintains separate browser sessions for different threads, + enabling concurrent usage of browsers in multi-threaded environments. + Browsers are created lazily only when needed by tools. + """ + + def __init__(self, region: str = "us-west-2"): + """ + Initialize the browser session manager. + + Args: + region: AWS region for browser client + """ + self.region = region + self._async_sessions: Dict[str, Tuple[BrowserClient, AsyncBrowser]] = {} + self._sync_sessions: Dict[str, Tuple[BrowserClient, SyncBrowser]] = {} + + async def get_async_browser(self, thread_id: str) -> AsyncBrowser: + """ + Get or create an async browser for the specified thread. + + Args: + thread_id: Unique identifier for the thread requesting the browser + + Returns: + An async browser instance specific to the thread + """ + if thread_id in self._async_sessions: + return self._async_sessions[thread_id][1] + + return await self._create_async_browser_session(thread_id) + + def get_sync_browser(self, thread_id: str) -> SyncBrowser: + """ + Get or create a sync browser for the specified thread. + + Args: + thread_id: Unique identifier for the thread requesting the browser + + Returns: + A sync browser instance specific to the thread + """ + if thread_id in self._sync_sessions: + return self._sync_sessions[thread_id][1] + + return self._create_sync_browser_session(thread_id) + + async def _create_async_browser_session(self, thread_id: str) -> AsyncBrowser: + """ + Create a new async browser session for the specified thread. + + Args: + thread_id: Unique identifier for the thread + + Returns: + The newly created async browser instance + + Raises: + Exception: If browser session creation fails + """ + from bedrock_agentcore.tools.browser_client import BrowserClient + browser_client = BrowserClient(region=self.region) + + try: + # Start browser session + browser_client.start() + + # Get WebSocket connection info + ws_url, headers = browser_client.generate_ws_headers() + + logger.info( + f"Connecting to async WebSocket endpoint for thread {thread_id}: {ws_url}" + ) + + from playwright.async_api import async_playwright + + # Connect to browser using Playwright + playwright = await async_playwright().start() + browser = await playwright.chromium.connect_over_cdp( + endpoint_url=ws_url, headers=headers, timeout=30000 + ) + logger.info( + f"Successfully connected to async browser for thread {thread_id}" + ) + + # Store session resources + self._async_sessions[thread_id] = (browser_client, browser) + + return browser + + except Exception as e: + logger.error( + f"Failed to create async browser session for thread {thread_id}: {e}" + ) + + # Clean up resources if session creation fails + if browser_client: + try: + browser_client.stop() + except Exception as cleanup_error: + logger.warning(f"Error cleaning up browser client: {cleanup_error}") + + raise + + def _create_sync_browser_session(self, thread_id: str) -> SyncBrowser: + """ + Create a new sync browser session for the specified thread. + + Args: + thread_id: Unique identifier for the thread + + Returns: + The newly created sync browser instance + + Raises: + Exception: If browser session creation fails + """ + from bedrock_agentcore.tools.browser_client import BrowserClient + browser_client = BrowserClient(region=self.region) + + try: + # Start browser session + browser_client.start() + + # Get WebSocket connection info + ws_url, headers = browser_client.generate_ws_headers() + + logger.info( + f"Connecting to sync WebSocket endpoint for thread {thread_id}: {ws_url}" + ) + + from playwright.sync_api import sync_playwright + + # Connect to browser using Playwright + playwright = sync_playwright().start() + browser = playwright.chromium.connect_over_cdp( + endpoint_url=ws_url, headers=headers, timeout=30000 + ) + logger.info( + f"Successfully connected to sync browser for thread {thread_id}" + ) + + # Store session resources + self._sync_sessions[thread_id] = (browser_client, browser) + + return browser + + except Exception as e: + logger.error( + f"Failed to create sync browser session for thread {thread_id}: {e}" + ) + + # Clean up resources if session creation fails + if browser_client: + try: + browser_client.stop() + except Exception as cleanup_error: + logger.warning(f"Error cleaning up browser client: {cleanup_error}") + + raise + + async def close_async_browser(self, thread_id: str) -> None: + """ + Close the async browser session for the specified thread. + + Args: + thread_id: Unique identifier for the thread + """ + if thread_id not in self._async_sessions: + logger.warning(f"No async browser session found for thread {thread_id}") + return + + browser_client, browser = self._async_sessions[thread_id] + + # Close browser + if browser: + try: + await browser.close() + except Exception as e: + logger.warning( + f"Error closing async browser for thread {thread_id}: {e}" + ) + + # Stop browser client + if browser_client: + try: + browser_client.stop() + except Exception as e: + logger.warning( + f"Error stopping browser client for thread {thread_id}: {e}" + ) + + # Remove session from dictionary + del self._async_sessions[thread_id] + logger.info(f"Async browser session cleaned up for thread {thread_id}") + + def close_sync_browser(self, thread_id: str) -> None: + """ + Close the sync browser session for the specified thread. + + Args: + thread_id: Unique identifier for the thread + """ + if thread_id not in self._sync_sessions: + logger.warning(f"No sync browser session found for thread {thread_id}") + return + + browser_client, browser = self._sync_sessions[thread_id] + + # Close browser + if browser: + try: + browser.close() + except Exception as e: + logger.warning( + f"Error closing sync browser for thread {thread_id}: {e}" + ) + + # Stop browser client + if browser_client: + try: + browser_client.stop() + except Exception as e: + logger.warning( + f"Error stopping browser client for thread {thread_id}: {e}" + ) + + # Remove session from dictionary + del self._sync_sessions[thread_id] + logger.info(f"Sync browser session cleaned up for thread {thread_id}") + + async def close_all_browsers(self) -> None: + """Close all browser sessions.""" + # Close all async browsers + async_thread_ids = list(self._async_sessions.keys()) + for thread_id in async_thread_ids: + await self.close_async_browser(thread_id) + + # Close all sync browsers + sync_thread_ids = list(self._sync_sessions.keys()) + for thread_id in sync_thread_ids: + self.close_sync_browser(thread_id) + + logger.info("All browser sessions closed") \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/browser/browser_toolkit.py b/src/crewai_tools/aws/bedrock/browser/browser_toolkit.py new file mode 100644 index 000000000..2939bbb00 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/browser/browser_toolkit.py @@ -0,0 +1,587 @@ +"""Toolkit for navigating web with AWS browser.""" + +import json +import logging +import asyncio +from typing import Dict, List, Tuple, Any, Type +from urllib.parse import urlparse + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + +from .browser_session_manager import BrowserSessionManager +from .utils import aget_current_page, get_current_page + +logger = logging.getLogger(__name__) + + +# Input schemas +class NavigateToolInput(BaseModel): + """Input for NavigateTool.""" + url: str = Field(description="URL to navigate to") + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class ClickToolInput(BaseModel): + """Input for ClickTool.""" + selector: str = Field(description="CSS selector for the element to click on") + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class GetElementsToolInput(BaseModel): + """Input for GetElementsTool.""" + selector: str = Field(description="CSS selector for elements to get") + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class ExtractTextToolInput(BaseModel): + """Input for ExtractTextTool.""" + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class ExtractHyperlinksToolInput(BaseModel): + """Input for ExtractHyperlinksTool.""" + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class NavigateBackToolInput(BaseModel): + """Input for NavigateBackTool.""" + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +class CurrentWebPageToolInput(BaseModel): + """Input for CurrentWebPageTool.""" + thread_id: str = Field(default="default", description="Thread ID for the browser session") + + +# Base tool class +class BrowserBaseTool(BaseTool): + """Base class for browser tools.""" + + def __init__(self, session_manager: BrowserSessionManager): + """Initialize with a session manager.""" + super().__init__() + self._session_manager = session_manager + + if self._is_in_asyncio_loop() and hasattr(self, '_arun'): + self._original_run = self._run + # Override _run to use _arun when in an asyncio loop + def patched_run(*args, **kwargs): + try: + import nest_asyncio + loop = asyncio.get_event_loop() + nest_asyncio.apply(loop) + return asyncio.get_event_loop().run_until_complete( + self._arun(*args, **kwargs) + ) + except Exception as e: + return f"Error in patched _run: {str(e)}" + self._run = patched_run + + async def get_async_page(self, thread_id: str) -> Any: + """Get or create a page for the specified thread.""" + browser = await self._session_manager.get_async_browser(thread_id) + page = await aget_current_page(browser) + return page + + def get_sync_page(self, thread_id: str) -> Any: + """Get or create a page for the specified thread.""" + browser = self._session_manager.get_sync_browser(thread_id) + page = get_current_page(browser) + return page + + def _is_in_asyncio_loop(self) -> bool: + """Check if we're currently in an asyncio event loop.""" + try: + loop = asyncio.get_event_loop() + return loop.is_running() + except RuntimeError: + return False + + +# Tool classes +class NavigateTool(BrowserBaseTool): + """Tool for navigating a browser to a URL.""" + + name: str = "navigate_browser" + description: str = "Navigate a browser to the specified URL" + args_schema: Type[BaseModel] = NavigateToolInput + + def _run(self, url: str, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Get page for this thread + page = self.get_sync_page(thread_id) + + # Validate URL scheme + parsed_url = urlparse(url) + if parsed_url.scheme not in ("http", "https"): + raise ValueError("URL scheme must be 'http' or 'https'") + + # Navigate to URL + response = page.goto(url) + status = response.status if response else "unknown" + return f"Navigating to {url} returned status code {status}" + except Exception as e: + return f"Error navigating to {url}: {str(e)}" + + async def _arun(self, url: str, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Get page for this thread + page = await self.get_async_page(thread_id) + + # Validate URL scheme + parsed_url = urlparse(url) + if parsed_url.scheme not in ("http", "https"): + raise ValueError("URL scheme must be 'http' or 'https'") + + # Navigate to URL + response = await page.goto(url) + status = response.status if response else "unknown" + return f"Navigating to {url} returned status code {status}" + except Exception as e: + return f"Error navigating to {url}: {str(e)}" + + +class ClickTool(BrowserBaseTool): + """Tool for clicking on an element with the given CSS selector.""" + + name: str = "click_element" + description: str = "Click on an element with the given CSS selector" + args_schema: Type[BaseModel] = ClickToolInput + + visible_only: bool = True + """Whether to consider only visible elements.""" + playwright_strict: bool = False + """Whether to employ Playwright's strict mode when clicking on elements.""" + playwright_timeout: float = 1_000 + """Timeout (in ms) for Playwright to wait for element to be ready.""" + + def _selector_effective(self, selector: str) -> str: + if not self.visible_only: + return selector + return f"{selector} >> visible=1" + + def _run(self, selector: str, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Get the current page + page = self.get_sync_page(thread_id) + + # Click on the element + selector_effective = self._selector_effective(selector=selector) + from playwright.sync_api import TimeoutError as PlaywrightTimeoutError + + try: + page.click( + selector_effective, + strict=self.playwright_strict, + timeout=self.playwright_timeout, + ) + except PlaywrightTimeoutError: + return f"Unable to click on element '{selector}'" + except Exception as click_error: + return f"Unable to click on element '{selector}': {str(click_error)}" + + return f"Clicked element '{selector}'" + except Exception as e: + return f"Error clicking on element: {str(e)}" + + async def _arun(self, selector: str, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Get the current page + page = await self.get_async_page(thread_id) + + # Click on the element + selector_effective = self._selector_effective(selector=selector) + from playwright.async_api import TimeoutError as PlaywrightTimeoutError + + try: + await page.click( + selector_effective, + strict=self.playwright_strict, + timeout=self.playwright_timeout, + ) + except PlaywrightTimeoutError: + return f"Unable to click on element '{selector}'" + except Exception as click_error: + return f"Unable to click on element '{selector}': {str(click_error)}" + + return f"Clicked element '{selector}'" + except Exception as e: + return f"Error clicking on element: {str(e)}" + + +class NavigateBackTool(BrowserBaseTool): + """Tool for navigating back in browser history.""" + name: str = "navigate_back" + description: str = "Navigate back to the previous page" + args_schema: Type[BaseModel] = NavigateBackToolInput + + def _run(self, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Get the current page + page = self.get_sync_page(thread_id) + + # Navigate back + try: + page.go_back() + return "Navigated back to the previous page" + except Exception as nav_error: + return f"Unable to navigate back: {str(nav_error)}" + except Exception as e: + return f"Error navigating back: {str(e)}" + + async def _arun(self, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Get the current page + page = await self.get_async_page(thread_id) + + # Navigate back + try: + await page.go_back() + return "Navigated back to the previous page" + except Exception as nav_error: + return f"Unable to navigate back: {str(nav_error)}" + except Exception as e: + return f"Error navigating back: {str(e)}" + + +class ExtractTextTool(BrowserBaseTool): + """Tool for extracting text from a webpage.""" + name: str = "extract_text" + description: str = "Extract all the text on the current webpage" + args_schema: Type[BaseModel] = ExtractTextToolInput + + def _run(self, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Import BeautifulSoup + try: + from bs4 import BeautifulSoup + except ImportError: + return ( + "The 'beautifulsoup4' package is required to use this tool." + " Please install it with 'pip install beautifulsoup4'." + ) + + # Get the current page + page = self.get_sync_page(thread_id) + + # Extract text + content = page.content() + soup = BeautifulSoup(content, "html.parser") + return soup.get_text(separator="\n").strip() + except Exception as e: + return f"Error extracting text: {str(e)}" + + async def _arun(self, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Import BeautifulSoup + try: + from bs4 import BeautifulSoup + except ImportError: + return ( + "The 'beautifulsoup4' package is required to use this tool." + " Please install it with 'pip install beautifulsoup4'." + ) + + # Get the current page + page = await self.get_async_page(thread_id) + + # Extract text + content = await page.content() + soup = BeautifulSoup(content, "html.parser") + return soup.get_text(separator="\n").strip() + except Exception as e: + return f"Error extracting text: {str(e)}" + + +class ExtractHyperlinksTool(BrowserBaseTool): + """Tool for extracting hyperlinks from a webpage.""" + name: str = "extract_hyperlinks" + description: str = "Extract all hyperlinks on the current webpage" + args_schema: Type[BaseModel] = ExtractHyperlinksToolInput + + def _run(self, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Import BeautifulSoup + try: + from bs4 import BeautifulSoup + except ImportError: + return ( + "The 'beautifulsoup4' package is required to use this tool." + " Please install it with 'pip install beautifulsoup4'." + ) + + # Get the current page + page = self.get_sync_page(thread_id) + + # Extract hyperlinks + content = page.content() + soup = BeautifulSoup(content, "html.parser") + links = [] + for link in soup.find_all("a", href=True): + text = link.get_text().strip() + href = link["href"] + if href.startswith("http") or href.startswith("https"): + links.append({"text": text, "url": href}) + + if not links: + return "No hyperlinks found on the current page." + + return json.dumps(links, indent=2) + except Exception as e: + return f"Error extracting hyperlinks: {str(e)}" + + async def _arun(self, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Import BeautifulSoup + try: + from bs4 import BeautifulSoup + except ImportError: + return ( + "The 'beautifulsoup4' package is required to use this tool." + " Please install it with 'pip install beautifulsoup4'." + ) + + # Get the current page + page = await self.get_async_page(thread_id) + + # Extract hyperlinks + content = await page.content() + soup = BeautifulSoup(content, "html.parser") + links = [] + for link in soup.find_all("a", href=True): + text = link.get_text().strip() + href = link["href"] + if href.startswith("http") or href.startswith("https"): + links.append({"text": text, "url": href}) + + if not links: + return "No hyperlinks found on the current page." + + return json.dumps(links, indent=2) + except Exception as e: + return f"Error extracting hyperlinks: {str(e)}" + + +class GetElementsTool(BrowserBaseTool): + """Tool for getting elements from a webpage.""" + name: str = "get_elements" + description: str = "Get elements from the webpage using a CSS selector" + args_schema: Type[BaseModel] = GetElementsToolInput + + def _run(self, selector: str, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Get the current page + page = self.get_sync_page(thread_id) + + # Get elements + elements = page.query_selector_all(selector) + if not elements: + return f"No elements found with selector '{selector}'" + + elements_text = [] + for i, element in enumerate(elements): + text = element.text_content() + elements_text.append(f"Element {i+1}: {text.strip()}") + + return "\n".join(elements_text) + except Exception as e: + return f"Error getting elements: {str(e)}" + + async def _arun(self, selector: str, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Get the current page + page = await self.get_async_page(thread_id) + + # Get elements + elements = await page.query_selector_all(selector) + if not elements: + return f"No elements found with selector '{selector}'" + + elements_text = [] + for i, element in enumerate(elements): + text = await element.text_content() + elements_text.append(f"Element {i+1}: {text.strip()}") + + return "\n".join(elements_text) + except Exception as e: + return f"Error getting elements: {str(e)}" + + +class CurrentWebPageTool(BrowserBaseTool): + """Tool for getting information about the current webpage.""" + name: str = "current_webpage" + description: str = "Get information about the current webpage" + args_schema: Type[BaseModel] = CurrentWebPageToolInput + + def _run(self, thread_id: str = "default", **kwargs) -> str: + """Use the sync tool.""" + try: + # Get the current page + page = self.get_sync_page(thread_id) + + # Get information + url = page.url + title = page.title() + return f"URL: {url}\nTitle: {title}" + except Exception as e: + return f"Error getting current webpage info: {str(e)}" + + async def _arun(self, thread_id: str = "default", **kwargs) -> str: + """Use the async tool.""" + try: + # Get the current page + page = await self.get_async_page(thread_id) + + # Get information + url = page.url + title = await page.title() + return f"URL: {url}\nTitle: {title}" + except Exception as e: + return f"Error getting current webpage info: {str(e)}" + + +class BrowserToolkit: + """Toolkit for navigating web with AWS Bedrock browser. + + This toolkit provides a set of tools for working with a remote browser + and supports multiple threads by maintaining separate browser sessions + for each thread ID. Browsers are created lazily only when needed. + + Example: + ```python + from crewai import Agent, Task, Crew + from crewai_tools.aws.bedrock.browser import create_browser_toolkit + + # Create the browser toolkit + toolkit, browser_tools = create_browser_toolkit(region="us-west-2") + + # Create a CrewAI agent that uses the browser tools + research_agent = Agent( + role="Web Researcher", + goal="Research and summarize web content", + backstory="You're an expert at finding information online.", + tools=browser_tools + ) + + # Create a task for the agent + research_task = Task( + description="Navigate to https://example.com and extract all text content. Summarize the main points.", + agent=research_agent + ) + + # Create and run the crew + crew = Crew( + agents=[research_agent], + tasks=[research_task] + ) + result = crew.kickoff() + + # Clean up browser resources when done + import asyncio + asyncio.run(toolkit.cleanup()) + ``` + """ + + def __init__(self, region: str = "us-west-2"): + """ + Initialize the toolkit + + Args: + region: AWS region for the browser client + """ + self.region = region + self.session_manager = BrowserSessionManager(region=region) + self.tools: List[BaseTool] = [] + self._nest_current_loop() + self._setup_tools() + + def _nest_current_loop(self): + """Apply nest_asyncio if we're in an asyncio loop.""" + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + try: + import nest_asyncio + nest_asyncio.apply(loop) + except Exception as e: + logger.warning(f"Failed to apply nest_asyncio: {str(e)}") + except RuntimeError: + pass + + def _setup_tools(self) -> None: + """Initialize tools without creating any browsers.""" + self.tools = [ + NavigateTool(session_manager=self.session_manager), + ClickTool(session_manager=self.session_manager), + NavigateBackTool(session_manager=self.session_manager), + ExtractTextTool(session_manager=self.session_manager), + ExtractHyperlinksTool(session_manager=self.session_manager), + GetElementsTool(session_manager=self.session_manager), + CurrentWebPageTool(session_manager=self.session_manager) + ] + + def get_tools(self) -> List[BaseTool]: + """ + Get the list of browser tools + + Returns: + List of CrewAI tools + """ + return self.tools + + def get_tools_by_name(self) -> Dict[str, BaseTool]: + """ + Get a dictionary of tools mapped by their names + + Returns: + Dictionary of {tool_name: tool} + """ + return {tool.name: tool for tool in self.tools} + + async def cleanup(self) -> None: + """Clean up all browser sessions asynchronously""" + await self.session_manager.close_all_browsers() + logger.info("All browser sessions cleaned up") + + def sync_cleanup(self) -> None: + """Clean up all browser sessions from synchronous code""" + import asyncio + + try: + loop = asyncio.get_event_loop() + if loop.is_running(): + asyncio.create_task(self.cleanup()) + else: + loop.run_until_complete(self.cleanup()) + except RuntimeError: + asyncio.run(self.cleanup()) + + +def create_browser_toolkit( + region: str = "us-west-2", +) -> Tuple[BrowserToolkit, List[BaseTool]]: + """ + Create a BrowserToolkit + + Args: + region: AWS region for browser client + + Returns: + Tuple of (toolkit, tools) + """ + toolkit = BrowserToolkit(region=region) + tools = toolkit.get_tools() + return toolkit, tools diff --git a/src/crewai_tools/aws/bedrock/browser/utils.py b/src/crewai_tools/aws/bedrock/browser/utils.py new file mode 100644 index 000000000..6e8b48e3a --- /dev/null +++ b/src/crewai_tools/aws/bedrock/browser/utils.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Union + +if TYPE_CHECKING: + from playwright.async_api import Browser as AsyncBrowser + from playwright.async_api import Page as AsyncPage + from playwright.sync_api import Browser as SyncBrowser + from playwright.sync_api import Page as SyncPage + + +async def aget_current_page(browser: Union[AsyncBrowser, Any]) -> AsyncPage: + """ + Asynchronously get the current page of the browser. + Args: + browser: The browser (AsyncBrowser) to get the current page from. + Returns: + AsyncPage: The current page. + """ + if not browser.contexts: + context = await browser.new_context() + return await context.new_page() + context = browser.contexts[0] + if not context.pages: + return await context.new_page() + return context.pages[-1] + + +def get_current_page(browser: Union[SyncBrowser, Any]) -> SyncPage: + """ + Get the current page of the browser. + Args: + browser: The browser to get the current page from. + Returns: + SyncPage: The current page. + """ + if not browser.contexts: + context = browser.new_context() + return context.new_page() + context = browser.contexts[0] + if not context.pages: + return context.new_page() + return context.pages[-1] \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/code_interpreter/README.md b/src/crewai_tools/aws/bedrock/code_interpreter/README.md new file mode 100644 index 000000000..92e8ec5b2 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/code_interpreter/README.md @@ -0,0 +1,217 @@ +# AWS Bedrock Code Interpreter Tools + +This toolkit provides a set of tools for interacting with the AWS Bedrock Code Interpreter environment. It enables your CrewAI agents to execute code, run shell commands, manage files, and perform computational tasks in a secure, isolated environment. + +## Features + +- Execute code in various languages (primarily Python) +- Run shell commands in the environment +- Read, write, list, and delete files +- Manage long-running tasks asynchronously +- Multiple code interpreter sessions with thread-based isolation + +## Installation + +Ensure you have the necessary dependencies: + +```bash +uv add crewai-tools bedrock-agentcore +``` + +## Usage + +### Basic Usage + +```python +from crewai import Agent, Task, Crew, LLM +from crewai_tools.aws import create_code_interpreter_toolkit + +# Create the code interpreter toolkit +toolkit, code_tools = create_code_interpreter_toolkit(region="us-west-2") + +# Create the Bedrock LLM +llm = LLM( + model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0", + region_name="us-west-2", +) + +# Create a CrewAI agent that uses the code interpreter tools +developer_agent = Agent( + role="Python Developer", + goal="Create and execute Python code to solve problems.", + backstory="You're a skilled Python developer with expertise in data analysis.", + tools=code_tools, + llm=llm +) + +# Create a task for the agent +coding_task = Task( + description="Write a Python function that calculates the factorial of a number and test it. Do not use any imports from outside the Python standard library.", + expected_output="The Python function created, and the test results.", + agent=developer_agent +) + +# Create and run the crew +crew = Crew( + agents=[developer_agent], + tasks=[coding_task] +) +result = crew.kickoff() + +print(f"\n***Final result:***\n\n{result}") + +# Clean up resources when done +import asyncio +asyncio.run(toolkit.cleanup()) +``` + +### Available Tools + +The toolkit provides the following tools: + +1. `execute_code` - Run code in various languages (primarily Python) +2. `execute_command` - Run shell commands in the environment +3. `read_files` - Read content of files in the environment +4. `list_files` - List files in directories +5. `delete_files` - Remove files from the environment +6. `write_files` - Create or update files +7. `start_command_execution` - Start long-running commands asynchronously +8. `get_task` - Check status of async tasks +9. `stop_task` - Stop running tasks + +### Advanced Usage + +```python +from crewai import Agent, Task, Crew, LLM +from crewai_tools.aws import create_code_interpreter_toolkit + +# Create the code interpreter toolkit +toolkit, code_tools = create_code_interpreter_toolkit(region="us-west-2") +tools_by_name = toolkit.get_tools_by_name() + +# Create the Bedrock LLM +llm = LLM( + model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0", + region_name="us-west-2", +) + +# Create agents with specific tools +code_agent = Agent( + role="Code Developer", + goal="Write and execute code", + backstory="You write and test code to solve complex problems.", + tools=[ + # Use specific tools by name + tools_by_name["execute_code"], + tools_by_name["execute_command"], + tools_by_name["read_files"], + tools_by_name["write_files"] + ], + llm=llm +) + +file_agent = Agent( + role="File Manager", + goal="Manage files in the environment", + backstory="You help organize and manage files in the code environment.", + tools=[ + # Use specific tools by name + tools_by_name["list_files"], + tools_by_name["read_files"], + tools_by_name["write_files"], + tools_by_name["delete_files"] + ], + llm=llm +) + +# Create tasks for the agents +coding_task = Task( + description="Write a Python script to analyze data from a CSV file. Do not use any imports from outside the Python standard library.", + expected_output="The Python function created.", + agent=code_agent +) + +file_task = Task( + description="Organize the created files into separate directories.", + agent=file_agent +) + +# Create and run the crew +crew = Crew( + agents=[code_agent, file_agent], + tasks=[coding_task, file_task] +) +result = crew.kickoff() + +print(f"\n***Final result:***\n\n{result}") + +# Clean up code interpreter resources when done +import asyncio +asyncio.run(toolkit.cleanup()) +``` + +### Example: Data Analysis with Python + +```python +from crewai import Agent, Task, Crew, LLM +from crewai_tools.aws import create_code_interpreter_toolkit + +# Create toolkit and tools +toolkit, code_tools = create_code_interpreter_toolkit(region="us-west-2") + +# Create the Bedrock LLM +llm = LLM( + model="bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0", + region_name="us-west-2", +) + +# Create a data analyst agent +analyst_agent = Agent( + role="Data Analyst", + goal="Analyze data using Python", + backstory="You're an expert data analyst who uses Python for data processing.", + tools=code_tools, + llm=llm +) + +# Create a task for the agent +analysis_task = Task( + description=""" + For all of the below, do not use any imports from outside the Python standard library. + 1. Create a sample dataset with random data + 2. Perform statistical analysis on the dataset + 3. Generate visualizations of the results + 4. Save the results and visualizations to files + """, + agent=analyst_agent +) + +# Create and run the crew +crew = Crew( + agents=[analyst_agent], + tasks=[analysis_task] +) +result = crew.kickoff() + +print(f"\n***Final result:***\n\n{result}") + +# Clean up resources +import asyncio +asyncio.run(toolkit.cleanup()) +``` + +## Resource Cleanup + +Always clean up code interpreter resources when done to prevent resource leaks: + +```python +import asyncio + +# Clean up all code interpreter sessions +asyncio.run(toolkit.cleanup()) +``` + +## Requirements + +- AWS account with access to Bedrock AgentCore API +- Properly configured AWS credentials \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/code_interpreter/__init__.py b/src/crewai_tools/aws/bedrock/code_interpreter/__init__.py new file mode 100644 index 000000000..903c84e24 --- /dev/null +++ b/src/crewai_tools/aws/bedrock/code_interpreter/__init__.py @@ -0,0 +1,3 @@ +from .code_interpreter_toolkit import CodeInterpreterToolkit, create_code_interpreter_toolkit + +__all__ = ["CodeInterpreterToolkit", "create_code_interpreter_toolkit"] \ No newline at end of file diff --git a/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py b/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py new file mode 100644 index 000000000..4e697cafe --- /dev/null +++ b/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py @@ -0,0 +1,543 @@ +"""Toolkit for working with AWS Bedrock Code Interpreter.""" +from __future__ import annotations + +import json +import logging +from typing import TYPE_CHECKING, Dict, List, Tuple, Optional, Type, Any + +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + +if TYPE_CHECKING: + from bedrock_agentcore.tools.code_interpreter_client import CodeInterpreter + +logger = logging.getLogger(__name__) + + +def extract_output_from_stream(response): + """ + Extract output from code interpreter response stream + + Args: + response: Response from code interpreter execution + + Returns: + Extracted output as string + """ + output = [] + for event in response["stream"]: + if "result" in event: + result = event["result"] + for content_item in result["content"]: + if content_item["type"] == "text": + output.append(content_item["text"]) + if content_item["type"] == "resource": + resource = content_item["resource"] + if "text" in resource: + file_path = resource["uri"].replace("file://", "") + file_content = resource["text"] + output.append(f"==== File: {file_path} ====\n{file_content}\n") + else: + output.append(json.dumps(resource)) + + return "\n".join(output) + + +# Input schemas +class ExecuteCodeInput(BaseModel): + """Input for ExecuteCode.""" + code: str = Field(description="The code to execute") + language: str = Field(default="python", description="The programming language of the code") + clear_context: bool = Field(default=False, description="Whether to clear execution context") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class ExecuteCommandInput(BaseModel): + """Input for ExecuteCommand.""" + command: str = Field(description="The command to execute") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class ReadFilesInput(BaseModel): + """Input for ReadFiles.""" + paths: List[str] = Field(description="List of file paths to read") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class ListFilesInput(BaseModel): + """Input for ListFiles.""" + directory_path: str = Field(default="", description="Path to the directory to list") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class DeleteFilesInput(BaseModel): + """Input for DeleteFiles.""" + paths: List[str] = Field(description="List of file paths to delete") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class WriteFilesInput(BaseModel): + """Input for WriteFiles.""" + files: List[Dict[str, str]] = Field(description="List of dictionaries with path and text fields") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class StartCommandInput(BaseModel): + """Input for StartCommand.""" + command: str = Field(description="The command to execute asynchronously") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class GetTaskInput(BaseModel): + """Input for GetTask.""" + task_id: str = Field(description="The ID of the task to check") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +class StopTaskInput(BaseModel): + """Input for StopTask.""" + task_id: str = Field(description="The ID of the task to stop") + thread_id: str = Field(default="default", description="Thread ID for the code interpreter session") + + +# Tool classes +class ExecuteCodeTool(BaseTool): + """Tool for executing code in various languages.""" + name: str = "execute_code" + description: str = "Execute code in various languages (primarily Python)" + args_schema: Type[BaseModel] = ExecuteCodeInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, code: str, language: str = "python", clear_context: bool = False, thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Execute code + response = code_interpreter.invoke( + method="executeCode", + params={"code": code, "language": language, "clearContext": clear_context}, + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error executing code: {str(e)}" + + async def _arun(self, code: str, language: str = "python", clear_context: bool = False, thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(code=code, language=language, clear_context=clear_context, thread_id=thread_id) + + +class ExecuteCommandTool(BaseTool): + """Tool for running shell commands in the code interpreter environment.""" + name: str = "execute_command" + description: str = "Run shell commands in the code interpreter environment" + args_schema: Type[BaseModel] = ExecuteCommandInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, command: str, thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Execute command + response = code_interpreter.invoke( + method="executeCommand", params={"command": command} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error executing command: {str(e)}" + + async def _arun(self, command: str, thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(command=command, thread_id=thread_id) + + +class ReadFilesTool(BaseTool): + """Tool for reading content of files in the environment.""" + name: str = "read_files" + description: str = "Read content of files in the environment" + args_schema: Type[BaseModel] = ReadFilesInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, paths: List[str], thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Read files + response = code_interpreter.invoke(method="readFiles", params={"paths": paths}) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error reading files: {str(e)}" + + async def _arun(self, paths: List[str], thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(paths=paths, thread_id=thread_id) + + +class ListFilesTool(BaseTool): + """Tool for listing files in directories in the environment.""" + name: str = "list_files" + description: str = "List files in directories in the environment" + args_schema: Type[BaseModel] = ListFilesInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, directory_path: str = "", thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # List files + response = code_interpreter.invoke( + method="listFiles", params={"directoryPath": directory_path} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error listing files: {str(e)}" + + async def _arun(self, directory_path: str = "", thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(directory_path=directory_path, thread_id=thread_id) + + +class DeleteFilesTool(BaseTool): + """Tool for removing files from the environment.""" + name: str = "delete_files" + description: str = "Remove files from the environment" + args_schema: Type[BaseModel] = DeleteFilesInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, paths: List[str], thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Remove files + response = code_interpreter.invoke( + method="removeFiles", params={"paths": paths} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error deleting files: {str(e)}" + + async def _arun(self, paths: List[str], thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(paths=paths, thread_id=thread_id) + + +class WriteFilesTool(BaseTool): + """Tool for creating or updating files in the environment.""" + name: str = "write_files" + description: str = "Create or update files in the environment" + args_schema: Type[BaseModel] = WriteFilesInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, files: List[Dict[str, str]], thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Write files + response = code_interpreter.invoke( + method="writeFiles", params={"content": files} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error writing files: {str(e)}" + + async def _arun(self, files: List[Dict[str, str]], thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(files=files, thread_id=thread_id) + + +class StartCommandTool(BaseTool): + """Tool for starting long-running commands asynchronously.""" + name: str = "start_command_execution" + description: str = "Start long-running commands asynchronously" + args_schema: Type[BaseModel] = StartCommandInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, command: str, thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Start command execution + response = code_interpreter.invoke( + method="startCommandExecution", params={"command": command} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error starting command: {str(e)}" + + async def _arun(self, command: str, thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(command=command, thread_id=thread_id) + + +class GetTaskTool(BaseTool): + """Tool for checking status of async tasks.""" + name: str = "get_task" + description: str = "Check status of async tasks" + args_schema: Type[BaseModel] = GetTaskInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, task_id: str, thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Get task status + response = code_interpreter.invoke(method="getTask", params={"taskId": task_id}) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error getting task status: {str(e)}" + + async def _arun(self, task_id: str, thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(task_id=task_id, thread_id=thread_id) + + +class StopTaskTool(BaseTool): + """Tool for stopping running tasks.""" + name: str = "stop_task" + description: str = "Stop running tasks" + args_schema: Type[BaseModel] = StopTaskInput + toolkit: Any = Field(default=None, exclude=True) + + def __init__(self, toolkit): + super().__init__() + self.toolkit = toolkit + + def _run(self, task_id: str, thread_id: str = "default") -> str: + try: + # Get or create code interpreter + code_interpreter = self.toolkit._get_or_create_interpreter(thread_id=thread_id) + + # Stop task + response = code_interpreter.invoke( + method="stopTask", params={"taskId": task_id} + ) + + return extract_output_from_stream(response) + except Exception as e: + return f"Error stopping task: {str(e)}" + + async def _arun(self, task_id: str, thread_id: str = "default") -> str: + # Use _run as we're working with a synchronous API that's thread-safe + return self._run(task_id=task_id, thread_id=thread_id) + + +class CodeInterpreterToolkit: + """Toolkit for working with AWS Bedrock code interpreter environment. + + This toolkit provides a set of tools for working with a remote code interpreter environment: + + * execute_code - Run code in various languages (primarily Python) + * execute_command - Run shell commands + * read_files - Read content of files in the environment + * list_files - List files in directories + * delete_files - Remove files from the environment + * write_files - Create or update files + * start_command_execution - Start long-running commands asynchronously + * get_task - Check status of async tasks + * stop_task - Stop running tasks + + The toolkit lazily initializes the code interpreter session on first use. + It supports multiple threads by maintaining separate code interpreter sessions for each thread ID. + + Example: + ```python + from crewai import Agent, Task, Crew + from crewai_tools.aws.bedrock.code_interpreter import create_code_interpreter_toolkit + + # Create the code interpreter toolkit + toolkit, code_tools = create_code_interpreter_toolkit(region="us-west-2") + + # Create a CrewAI agent that uses the code interpreter tools + developer_agent = Agent( + role="Python Developer", + goal="Create and execute Python code to solve problems", + backstory="You're a skilled Python developer with expertise in data analysis.", + tools=code_tools + ) + + # Create a task for the agent + coding_task = Task( + description="Write a Python function that calculates the factorial of a number and test it.", + agent=developer_agent + ) + + # Create and run the crew + crew = Crew( + agents=[developer_agent], + tasks=[coding_task] + ) + result = crew.kickoff() + + # Clean up resources when done + import asyncio + asyncio.run(toolkit.cleanup()) + ``` + """ + + def __init__(self, region: str = "us-west-2"): + """ + Initialize the toolkit + + Args: + region: AWS region for the code interpreter + """ + self.region = region + self._code_interpreters: Dict[str, CodeInterpreter] = {} + self.tools: List[BaseTool] = [] + self._setup_tools() + + def _setup_tools(self) -> None: + """Initialize tools without creating any code interpreter sessions.""" + self.tools = [ + ExecuteCodeTool(self), + ExecuteCommandTool(self), + ReadFilesTool(self), + ListFilesTool(self), + DeleteFilesTool(self), + WriteFilesTool(self), + StartCommandTool(self), + GetTaskTool(self), + StopTaskTool(self) + ] + + def _get_or_create_interpreter( + self, thread_id: str = "default" + ) -> CodeInterpreter: + """Get or create a code interpreter for the specified thread. + + Args: + thread_id: Thread ID for the code interpreter session + + Returns: + CodeInterpreter instance + """ + if thread_id in self._code_interpreters: + return self._code_interpreters[thread_id] + + # Create a new code interpreter for this thread + from bedrock_agentcore.tools.code_interpreter_client import CodeInterpreter + code_interpreter = CodeInterpreter(region=self.region) + code_interpreter.start() + logger.info( + f"Started code interpreter with session_id:{code_interpreter.session_id} for thread:{thread_id}" + ) + + # Store the interpreter + self._code_interpreters[thread_id] = code_interpreter + return code_interpreter + + + def get_tools(self) -> List[BaseTool]: + """ + Get the list of code interpreter tools + + Returns: + List of CrewAI tools + """ + return self.tools + + def get_tools_by_name(self) -> Dict[str, BaseTool]: + """ + Get a dictionary of tools mapped by their names + + Returns: + Dictionary of {tool_name: tool} + """ + return {tool.name: tool for tool in self.tools} + + async def cleanup(self, thread_id: Optional[str] = None) -> None: + """Clean up resources + + Args: + thread_id: Optional thread ID to clean up. If None, cleans up all sessions. + """ + if thread_id: + # Clean up a specific thread's session + if thread_id in self._code_interpreters: + try: + self._code_interpreters[thread_id].stop() + del self._code_interpreters[thread_id] + logger.info( + f"Code interpreter session for thread {thread_id} cleaned up" + ) + except Exception as e: + logger.warning( + f"Error stopping code interpreter for thread {thread_id}: {e}" + ) + else: + # Clean up all sessions + thread_ids = list(self._code_interpreters.keys()) + for tid in thread_ids: + try: + self._code_interpreters[tid].stop() + except Exception as e: + logger.warning( + f"Error stopping code interpreter for thread {tid}: {e}" + ) + + self._code_interpreters = {} + logger.info("All code interpreter sessions cleaned up") + + +def create_code_interpreter_toolkit( + region: str = "us-west-2", +) -> Tuple[CodeInterpreterToolkit, List[BaseTool]]: + """ + Create a CodeInterpreterToolkit + + Args: + region: AWS region for code interpreter + + Returns: + Tuple of (toolkit, tools) + """ + toolkit = CodeInterpreterToolkit(region=region) + tools = toolkit.get_tools() + return toolkit, tools \ No newline at end of file From c3e87fc31fbb69070666ea8bcadafbb744503c5b Mon Sep 17 00:00:00 2001 From: Filip Michalsky <31483888+filip-michalsky@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:05:29 -0400 Subject: [PATCH 360/391] Fm/update stagehand (#387) * Update tool specifications for * Update stagehand dependency from stagehand-py to stagehand v0.4.1 * uv add --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- src/crewai_tools/tools/stagehand_tool/stagehand_tool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 557c6cb6f..108575c3c 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -16,7 +16,7 @@ try: ExtractOptions, ObserveOptions, ) - from stagehand.utils import configure_logging + from stagehand import configure_logging _HAS_STAGEHAND = True except ImportError: # Define type stubs for when stagehand is not installed @@ -249,7 +249,7 @@ class StagehandTool(BaseTool): # Check if stagehand is available, but only if we're not in testing mode if not self._testing and not _HAS_STAGEHAND: raise ImportError( - "`stagehand-py` package not found, please run `uv add stagehand-py`" + "`stagehand` package not found, please run `uv add stagehand`" ) if not self.api_key: From 104485d18bd664ae502a19b10e9e73fcda51fd95 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 23 Jul 2025 10:22:47 -0700 Subject: [PATCH 361/391] feat: add SerperScrapeWebsiteTool for extracting clean content from URLs (#392) * feat: add SerperScrapeWebsiteTool for extracting clean content from URLs * feat: add required SERPER_API_KEY env var validation to SerperScrapeWebsiteTool --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../serper_scrape_website_tool.py | 80 +++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7831b957d..9d9796165 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -61,6 +61,7 @@ from .tools import ( SerpApiGoogleSearchTool, SerpApiGoogleShoppingTool, SerperDevTool, + SerperScrapeWebsiteTool, SerplyJobSearchTool, SerplyNewsSearchTool, SerplyScholarSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index d4b54c5ff..091fac62b 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -74,6 +74,7 @@ from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool +from .serper_scrape_website_tool.serper_scrape_website_tool import SerperScrapeWebsiteTool from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool diff --git a/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py b/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py new file mode 100644 index 000000000..cefb431f4 --- /dev/null +++ b/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py @@ -0,0 +1,80 @@ +from crewai.tools import BaseTool, EnvVar +from typing import Type, List +from pydantic import BaseModel, Field +import requests +import json +import os + + +class SerperScrapeWebsiteInput(BaseModel): + """Input schema for SerperScrapeWebsite.""" + url: str = Field(..., description="The URL of the website to scrape") + include_markdown: bool = Field( + default=True, + description="Whether to include markdown formatting in the scraped content" + ) + + +class SerperScrapeWebsiteTool(BaseTool): + name: str = "serper_scrape_website" + description: str = ( + "Scrapes website content using Serper's scraping API. " + "This tool can extract clean, readable content from any website URL, " + "optionally including markdown formatting for better structure." + ) + args_schema: Type[BaseModel] = SerperScrapeWebsiteInput + env_vars: List[EnvVar] = [ + EnvVar(name="SERPER_API_KEY", description="API key for Serper", required=True), + ] + + def _run(self, url: str, include_markdown: bool = True) -> str: + """ + Scrape website content using Serper API. + + Args: + url: The URL to scrape + include_markdown: Whether to include markdown formatting + + Returns: + Scraped website content as a string + """ + try: + # Serper API endpoint + api_url = "https://scrape.serper.dev" + + # Get API key from environment variable for security + api_key = os.getenv('SERPER_API_KEY') + + # Prepare the payload + payload = json.dumps({ + "url": url, + "includeMarkdown": include_markdown + }) + + # Set headers + headers = { + 'X-API-KEY': api_key, + 'Content-Type': 'application/json' + } + + # Make the API request + response = requests.post(api_url, headers=headers, data=payload) + + # Check if request was successful + if response.status_code == 200: + result = response.json() + + # Extract the scraped content + if 'text' in result: + return result['text'] + else: + return f"Successfully scraped {url}, but no text content found in response: {response.text}" + else: + return f"Error scraping {url}: HTTP {response.status_code} - {response.text}" + + except requests.exceptions.RequestException as e: + return f"Network error while scraping {url}: {str(e)}" + except json.JSONDecodeError as e: + return f"Error parsing JSON response while scraping {url}: {str(e)}" + except Exception as e: + return f"Unexpected error while scraping {url}: {str(e)}" From e6ec6cc332343838afe1a3663fbc801ad1979ec2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Jul 2025 12:28:07 -0500 Subject: [PATCH 362/391] Fix MongoDBVectorSearchTool serialization and schema (#389) * Fix MongoDBVectorSearchTool serialization * fix base class --- .../tools/mongodb_vector_search_tool/vector_search.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py b/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py index 3f8af315d..4112aa500 100644 --- a/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py +++ b/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py @@ -1,4 +1,3 @@ -import json import os from importlib.metadata import version from logging import getLogger @@ -46,7 +45,7 @@ class MongoDBVectorSearchConfig(BaseModel): ) -class MongoDBToolSchema(MongoDBVectorSearchConfig): +class MongoDBToolSchema(BaseModel): """Input for MongoDBTool.""" query: str = Field( @@ -264,6 +263,8 @@ class MongoDBVectorSearchTool(BaseTool): return [str(_id) for _id in result.upserted_ids.values()] def _run(self, query: str) -> str: + from bson import json_util + try: query_config = self.query_config or MongoDBVectorSearchConfig() limit = query_config.limit @@ -306,7 +307,7 @@ class MongoDBVectorSearchTool(BaseTool): # Format for doc in cursor: docs.append(doc) - return json.dumps(docs) + return json_util.dumps(docs) except Exception as e: logger.error(f"Error: {e}") return "" From b8bd3000c6ec613782c173f362b2c9e5f89697ff Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Mon, 28 Jul 2025 17:09:53 -0300 Subject: [PATCH 363/391] Make `RagTool` processs-safe (#399) This commit adds a file lock to `RagTool`. As it uses Embedchain and Chroma internally, this tool was not process-safe. --- src/crewai_tools/tools/rag/rag_tool.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 900a6ef36..f7e785bd7 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,8 +1,10 @@ +import portalocker + from abc import ABC, abstractmethod from typing import Any +from pydantic import BaseModel, ConfigDict, Field, model_validator from crewai.tools import BaseTool -from pydantic import BaseModel, ConfigDict, Field, model_validator class Adapter(BaseModel, ABC): @@ -39,10 +41,11 @@ class RagTool(BaseTool): def _set_default_adapter(self): if isinstance(self.adapter, RagTool._AdapterPlaceholder): from embedchain import App - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter - app = App.from_config(config=self.config) if self.config else App() + with portalocker.Lock("crewai-rag-tool.lock", timeout=10): + app = App.from_config(config=self.config) if self.config else App() + self.adapter = EmbedchainAdapter( embedchain_app=app, summarize=self.summarize ) From 30df46445bcb51975dab3d63bf3686ec5f57588b Mon Sep 17 00:00:00 2001 From: Harikrishnan K <128063333+HarikrishnanK9@users.noreply.github.com> Date: Tue, 29 Jul 2025 22:23:30 +0530 Subject: [PATCH 364/391] Adding Arxiv Paper tool (#310) * arxiv_paper_tool.py * Updating as per the review * Update __init__.py * Update __init__.py * Update arxiv_paper_tool.py * added test cases * Create README.md * Create Examples.md * Update Examples.md * Updated logger * Updated with package_dependencies,env_vars --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../tools/arxiv_paper_tool/Examples.md | 80 +++++++++ .../tools/arxiv_paper_tool/README.md | 142 ++++++++++++++++ .../arxiv_paper_tool/arxiv_paper_tool.py | 152 ++++++++++++++++++ .../arxiv_paper_tool/arxiv_paper_tool_test.py | 113 +++++++++++++ 6 files changed, 489 insertions(+) create mode 100644 src/crewai_tools/tools/arxiv_paper_tool/Examples.md create mode 100644 src/crewai_tools/tools/arxiv_paper_tool/README.md create mode 100644 src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py create mode 100644 src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 9d9796165..a88179ee9 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -10,6 +10,7 @@ from .aws import ( from .tools import ( AIMindTool, ApifyActorsTool, + ArxivPaperTool, BraveSearchTool, BrowserbaseLoadTool, CodeDocsSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 091fac62b..c7d51306f 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -1,5 +1,6 @@ from .ai_mind_tool.ai_mind_tool import AIMindTool from .apify_actors_tool.apify_actors_tool import ApifyActorsTool +from .arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool from .brave_search_tool.brave_search_tool import BraveSearchTool from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool diff --git a/src/crewai_tools/tools/arxiv_paper_tool/Examples.md b/src/crewai_tools/tools/arxiv_paper_tool/Examples.md new file mode 100644 index 000000000..676fa4106 --- /dev/null +++ b/src/crewai_tools/tools/arxiv_paper_tool/Examples.md @@ -0,0 +1,80 @@ +### Example 1: Fetching Research Papers from arXiv with CrewAI + +This example demonstrates how to build a simple CrewAI workflow that automatically searches for and downloads academic papers from [arXiv.org](https://arxiv.org). The setup uses: + +* A custom `ArxivPaperTool` to fetch metadata and download PDFs +* A single `Agent` tasked with locating relevant papers based on a given research topic +* A `Task` to define the data retrieval and download process +* A sequential `Crew` to orchestrate execution + +The downloaded PDFs are saved to a local directory (`./DOWNLOADS`). Filenames are optionally based on sanitized paper titles, ensuring compatibility with your operating system. + +> The saved PDFs can be further used in **downstream tasks**, such as: +> +> * **RAG (Retrieval-Augmented Generation)** +> * **Summarization** +> * **Citation extraction** +> * **Embedding-based search or analysis** + +--- + + +``` +from crewai import Agent, Task, Crew, Process, LLM +from crewai_tools import ArxivPaperTool + + + +llm = LLM( + model="ollama/llama3.1", + base_url="http://localhost:11434", + temperature=0.1 +) + + +topic = "Crew AI" +max_results = 3 +save_dir = "./DOWNLOADS" +use_title_as_filename = True + +tool = ArxivPaperTool( + download_pdfs=True, + save_dir=save_dir, + use_title_as_filename=True +) +tool.result_as_answer = True #Required,otherwise + + +arxiv_paper_fetch = Agent( + role="Arxiv Data Fetcher", + goal=f"Retrieve relevant papers from arXiv based on a research topic {topic} and maximum number of papers to be downloaded is{max_results},try to use title as filename {use_title_as_filename} and download PDFs to {save_dir},", + backstory="An expert in scientific data retrieval, skilled in extracting academic content from arXiv.", + # tools=[ArxivPaperTool()], + llm=llm, + verbose=True, + allow_delegation=False +) +fetch_task = Task( + description=( + f"Search arXiv for the topic '{topic}' and fetch up to {max_results} papers. " + f"Download PDFs for analysis and store them at {save_dir}." + ), + expected_output="PDFs saved to disk for downstream agents.", + agent=arxiv_paper_fetch, + tools=[tool], # Use the actual tool instance here + +) + + +pdf_qa_crew = Crew( + agents=[arxiv_paper_fetch], + tasks=[fetch_task], + process=Process.sequential, + verbose=True, +) + + +result = pdf_qa_crew.kickoff() + +print(f"\n🤖 Answer:\n\n{result.raw}\n") +``` diff --git a/src/crewai_tools/tools/arxiv_paper_tool/README.md b/src/crewai_tools/tools/arxiv_paper_tool/README.md new file mode 100644 index 000000000..f9ef56bdc --- /dev/null +++ b/src/crewai_tools/tools/arxiv_paper_tool/README.md @@ -0,0 +1,142 @@ +# ArxivPaperTool + + +# 📚 ArxivPaperTool + +The **ArxivPaperTool** is a utility for fetching metadata and optionally downloading PDFs of academic papers from the [arXiv](https://arxiv.org) platform using its public API. It supports configurable queries, batch retrieval, PDF downloading, and clean formatting for summaries and metadata. This tool is particularly useful for researchers, students, academic agents, and AI tools performing automated literature reviews. + +--- + +## Description + +This tool: + +* Accepts a **search query** and retrieves a list of papers from arXiv. +* Allows configuration of the **maximum number of results** to fetch. +* Optionally downloads the **PDFs** of the matched papers. +* Lets you specify whether to name PDF files using the **arXiv ID** or **paper title**. +* Saves downloaded files into a **custom or default directory**. +* Returns structured summaries of all fetched papers including metadata. + +--- + +## Arguments + +| Argument | Type | Required | Description | +| ----------------------- | ------ | -------- | --------------------------------------------------------------------------------- | +| `search_query` | `str` | ✅ | Search query string (e.g., `"transformer neural network"`). | +| `max_results` | `int` | ✅ | Number of results to fetch (between 1 and 100). | +| `download_pdfs` | `bool` | ⌠| Whether to download the corresponding PDFs. Defaults to `False`. | +| `save_dir` | `str` | ⌠| Directory to save PDFs (created if it doesn’t exist). Defaults to `./arxiv_pdfs`. | +| `use_title_as_filename` | `bool` | ⌠| Use the paper title as the filename (sanitized). Defaults to `False`. | + +--- + +## 📄 `ArxivPaperTool` Usage Examples + +This document shows how to use the `ArxivPaperTool` to fetch research paper metadata from arXiv and optionally download PDFs. + +### 🔧 Tool Initialization + +```python +from crewai_tools import ArxivPaperTool +``` + +--- + +### Example 1: Fetch Metadata Only (No Downloads) + +```python +tool = ArxivPaperTool() +result = tool._run( + search_query="deep learning", + max_results=1 +) +print(result) +``` + +--- + +### Example 2: Fetch and Download PDFs (arXiv ID as Filename) + +```python +tool = ArxivPaperTool(download_pdfs=True) +result = tool._run( + search_query="transformer models", + max_results=2 +) +print(result) +``` + +--- + +### Example 3: Download PDFs into a Custom Directory + +```python +tool = ArxivPaperTool( + download_pdfs=True, + save_dir="./my_papers" +) +result = tool._run( + search_query="graph neural networks", + max_results=2 +) +print(result) +``` + +--- + +### Example 4: Use Paper Titles as Filenames + +```python +tool = ArxivPaperTool( + download_pdfs=True, + use_title_as_filename=True +) +result = tool._run( + search_query="vision transformers", + max_results=1 +) +print(result) +``` + +--- + +### Example 5: All Options Combined + +```python +tool = ArxivPaperTool( + download_pdfs=True, + save_dir="./downloads", + use_title_as_filename=True +) +result = tool._run( + search_query="stable diffusion", + max_results=3 +) +print(result) +``` + +--- + +### Run via `__main__` + +Your file can also include: + +```python +if __name__ == "__main__": + tool = ArxivPaperTool( + download_pdfs=True, + save_dir="./downloads2", + use_title_as_filename=False + ) + result = tool._run( + search_query="deep learning", + max_results=1 + ) + print(result) +``` + +--- + + diff --git a/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py new file mode 100644 index 000000000..acd6bbe77 --- /dev/null +++ b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py @@ -0,0 +1,152 @@ +import re +import time +import urllib.request +import urllib.parse +import urllib.error +import xml.etree.ElementTree as ET +from typing import Type, List, Optional, ClassVar +from pydantic import BaseModel, Field +from crewai.tools import BaseTool,EnvVar +import logging +from pathlib import Path + +logger = logging.getLogger(__file__) + +class ArxivToolInput(BaseModel): + search_query: str = Field(..., description="Search query for Arxiv, e.g., 'transformer neural network'") + max_results: int = Field(5, ge=1, le=100, description="Max results to fetch; must be between 1 and 100") + +class ArxivPaperTool(BaseTool): + BASE_API_URL: ClassVar[str] = "http://export.arxiv.org/api/query" + SLEEP_DURATION: ClassVar[int] = 1 + SUMMARY_TRUNCATE_LENGTH: ClassVar[int] = 300 + ATOM_NAMESPACE: ClassVar[str] = "{http://www.w3.org/2005/Atom}" + REQUEST_TIMEOUT: ClassVar[int] = 10 + name: str = "Arxiv Paper Fetcher and Downloader" + description: str = "Fetches metadata from Arxiv based on a search query and optionally downloads PDFs." + args_schema: Type[BaseModel] = ArxivToolInput + model_config = {"extra": "allow"} + package_dependencies: List[str] = ["pydantic"] + env_vars: List[EnvVar] = [] + + def __init__(self, download_pdfs=False, save_dir="./arxiv_pdfs", use_title_as_filename=False): + super().__init__() + self.download_pdfs = download_pdfs + self.save_dir = save_dir + self.use_title_as_filename = use_title_as_filename + + def _run(self, search_query: str, max_results: int = 5) -> str: + try: + args = ArxivToolInput(search_query=search_query, max_results=max_results) + logger.info(f"Running Arxiv tool: query='{args.search_query}', max_results={args.max_results}, " + f"download_pdfs={self.download_pdfs}, save_dir='{self.save_dir}', " + f"use_title_as_filename={self.use_title_as_filename}") + + papers = self.fetch_arxiv_data(args.search_query, args.max_results) + + if self.download_pdfs: + save_dir = self._validate_save_path(self.save_dir) + for paper in papers: + if paper['pdf_url']: + if self.use_title_as_filename: + safe_title = re.sub(r'[\\/*?:"<>|]', "_", paper['title']).strip() + filename_base = safe_title or paper['arxiv_id'] + else: + filename_base = paper['arxiv_id'] + filename = f"{filename_base[:500]}.pdf" + save_path = Path(save_dir) / filename + + self.download_pdf(paper['pdf_url'], save_path) + time.sleep(self.SLEEP_DURATION) + + results = [self._format_paper_result(p) for p in papers] + return "\n\n" + "-" * 80 + "\n\n".join(results) + + except Exception as e: + logger.error(f"ArxivTool Error: {str(e)}") + return f"Failed to fetch or download Arxiv papers: {str(e)}" + + + def fetch_arxiv_data(self, search_query: str, max_results: int) -> List[dict]: + api_url = f"{self.BASE_API_URL}?search_query={urllib.parse.quote(search_query)}&start=0&max_results={max_results}" + logger.info(f"Fetching data from Arxiv API: {api_url}") + + try: + with urllib.request.urlopen(api_url, timeout=self.REQUEST_TIMEOUT) as response: + if response.status != 200: + raise Exception(f"HTTP {response.status}: {response.reason}") + data = response.read().decode('utf-8') + except urllib.error.URLError as e: + logger.error(f"Error fetching data from Arxiv: {e}") + raise + + root = ET.fromstring(data) + papers = [] + + for entry in root.findall(self.ATOM_NAMESPACE + "entry"): + raw_id = self._get_element_text(entry, "id") + arxiv_id = raw_id.split('/')[-1].replace('.', '_') if raw_id else "unknown" + + title = self._get_element_text(entry, "title") or "No Title" + summary = self._get_element_text(entry, "summary") or "No Summary" + published = self._get_element_text(entry, "published") or "No Publish Date" + authors = [ + self._get_element_text(author, "name") or "Unknown" + for author in entry.findall(self.ATOM_NAMESPACE + "author") + ] + + pdf_url = self._extract_pdf_url(entry) + + papers.append({ + "arxiv_id": arxiv_id, + "title": title, + "summary": summary, + "authors": authors, + "published_date": published, + "pdf_url": pdf_url + }) + + return papers + + @staticmethod + def _get_element_text(entry: ET.Element, element_name: str) -> Optional[str]: + elem = entry.find(f'{ArxivPaperTool.ATOM_NAMESPACE}{element_name}') + return elem.text.strip() if elem is not None and elem.text else None + + def _extract_pdf_url(self, entry: ET.Element) -> Optional[str]: + for link in entry.findall(self.ATOM_NAMESPACE + "link"): + if link.attrib.get('title', '').lower() == 'pdf': + return link.attrib.get('href') + for link in entry.findall(self.ATOM_NAMESPACE + "link"): + href = link.attrib.get('href') + if href and 'pdf' in href: + return href + return None + + def _format_paper_result(self, paper: dict) -> str: + summary = (paper['summary'][:self.SUMMARY_TRUNCATE_LENGTH] + '...') \ + if len(paper['summary']) > self.SUMMARY_TRUNCATE_LENGTH else paper['summary'] + authors_str = ', '.join(paper['authors']) + return (f"Title: {paper['title']}\n" + f"Authors: {authors_str}\n" + f"Published: {paper['published_date']}\n" + f"PDF: {paper['pdf_url'] or 'N/A'}\n" + f"Summary: {summary}") + + @staticmethod + def _validate_save_path(path: str) -> Path: + save_path = Path(path).resolve() + save_path.mkdir(parents=True, exist_ok=True) + return save_path + + def download_pdf(self, pdf_url: str, save_path: str): + try: + logger.info(f"Downloading PDF from {pdf_url} to {save_path}") + urllib.request.urlretrieve(pdf_url, str(save_path)) + logger.info(f"PDF saved: {save_path}") + except urllib.error.URLError as e: + logger.error(f"Network error occurred while downloading {pdf_url}: {e}") + raise + except OSError as e: + logger.error(f"File save error for {save_path}: {e}") + raise diff --git a/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py new file mode 100644 index 000000000..14f1e64d1 --- /dev/null +++ b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py @@ -0,0 +1,113 @@ +import pytest +import urllib.error +from unittest.mock import patch, MagicMock, mock_open +from pathlib import Path +import xml.etree.ElementTree as ET +from crewai_tools.tools.arxiv_paper_tool import ArxivPaperTool + +@pytest.fixture +def tool(): + return ArxivPaperTool(download_pdfs=False) + +def mock_arxiv_response(): + return ''' + + + http://arxiv.org/abs/1234.5678 + Sample Paper + This is a summary of the sample paper. + 2022-01-01T00:00:00Z + John Doe + + + ''' + +@patch("urllib.request.urlopen") +def test_fetch_arxiv_data(mock_urlopen, tool): + mock_response = MagicMock() + mock_response.status = 200 + mock_response.read.return_value = mock_arxiv_response().encode("utf-8") + mock_urlopen.return_value.__enter__.return_value = mock_response + + results = tool.fetch_arxiv_data("transformer", 1) + assert isinstance(results, list) + assert results[0]['title'] == "Sample Paper" + +@patch("urllib.request.urlopen", side_effect=urllib.error.URLError("Timeout")) +def test_fetch_arxiv_data_network_error(mock_urlopen, tool): + with pytest.raises(urllib.error.URLError): + tool.fetch_arxiv_data("transformer", 1) + +@patch("urllib.request.urlretrieve") +def test_download_pdf_success(mock_urlretrieve): + tool = ArxivPaperTool() + tool.download_pdf("http://arxiv.org/pdf/1234.5678.pdf", Path("test.pdf")) + mock_urlretrieve.assert_called_once() + +@patch("urllib.request.urlretrieve", side_effect=OSError("Permission denied")) +def test_download_pdf_oserror(mock_urlretrieve): + tool = ArxivPaperTool() + with pytest.raises(OSError): + tool.download_pdf("http://arxiv.org/pdf/1234.5678.pdf", Path("/restricted/test.pdf")) + +@patch("urllib.request.urlopen") +@patch("urllib.request.urlretrieve") +def test_run_with_download(mock_urlretrieve, mock_urlopen): + mock_response = MagicMock() + mock_response.status = 200 + mock_response.read.return_value = mock_arxiv_response().encode("utf-8") + mock_urlopen.return_value.__enter__.return_value = mock_response + + tool = ArxivPaperTool(download_pdfs=True) + output = tool._run("transformer", 1) + assert "Title: Sample Paper" in output + mock_urlretrieve.assert_called_once() + +@patch("urllib.request.urlopen") +def test_run_no_download(mock_urlopen): + mock_response = MagicMock() + mock_response.status = 200 + mock_response.read.return_value = mock_arxiv_response().encode("utf-8") + mock_urlopen.return_value.__enter__.return_value = mock_response + + tool = ArxivPaperTool(download_pdfs=False) + result = tool._run("transformer", 1) + assert "Title: Sample Paper" in result + +@patch("pathlib.Path.mkdir") +def test_validate_save_path_creates_directory(mock_mkdir): + path = ArxivPaperTool._validate_save_path("new_folder") + mock_mkdir.assert_called_once_with(parents=True, exist_ok=True) + assert isinstance(path, Path) + +@patch("urllib.request.urlopen") +def test_run_handles_exception(mock_urlopen): + mock_urlopen.side_effect = Exception("API failure") + tool = ArxivPaperTool() + result = tool._run("transformer", 1) + assert "Failed to fetch or download Arxiv papers" in result + + +@patch("urllib.request.urlopen") +def test_invalid_xml_response(mock_urlopen, tool): + mock_response = MagicMock() + mock_response.read.return_value = b"" + mock_response.status = 200 + mock_urlopen.return_value.__enter__.return_value = mock_response + + with pytest.raises(ET.ParseError): + tool.fetch_arxiv_data("quantum", 1) + +@patch.object(ArxivPaperTool, "fetch_arxiv_data") +def test_run_with_max_results(mock_fetch, tool): + mock_fetch.return_value = [{ + "arxiv_id": f"test_{i}", + "title": f"Title {i}", + "summary": "Summary", + "authors": ["Author"], + "published_date": "2023-01-01", + "pdf_url": None + } for i in range(100)] + + result = tool._run(search_query="test", max_results=100) + assert result.count("Title:") == 100 From 707c8583f4ef0a41054d42cd2b30b863cdd462e9 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 29 Jul 2025 15:56:20 -0300 Subject: [PATCH 365/391] chore: make tavily tools exportable (#400) --- src/crewai_tools/__init__.py | 2 ++ src/crewai_tools/tools/__init__.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index a88179ee9..4a782aeda 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -72,6 +72,8 @@ from .tools import ( SnowflakeSearchTool, SpiderTool, StagehandTool, + TavilyExtractorTool, + TavilySearchTool, TXTSearchTool, VisionTool, WeaviateVectorSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index c7d51306f..5e292e1fa 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -89,6 +89,8 @@ from .snowflake_search_tool import ( from .spider_tool.spider_tool import SpiderTool from .stagehand_tool.stagehand_tool import StagehandTool from .txt_search_tool.txt_search_tool import TXTSearchTool +from .tavily_extractor_tool.tavily_extractor_tool import TavilyExtractorTool +from .tavily_search_tool.tavily_search_tool import TavilySearchTool from .vision_tool.vision_tool import VisionTool from .weaviate_tool.vector_search import WeaviateVectorSearchTool from .website_search.website_search_tool import WebsiteSearchTool From 9220cfba286834a666063ea724d82a8c7c2c8e34 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 5 Aug 2025 10:46:11 -0300 Subject: [PATCH 366/391] fix: use proper ArxivPaperTool import (#408) --- .../tools/arxiv_paper_tool/arxiv_paper_tool_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py index 14f1e64d1..4f8747d2f 100644 --- a/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py +++ b/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool_test.py @@ -3,7 +3,7 @@ import urllib.error from unittest.mock import patch, MagicMock, mock_open from pathlib import Path import xml.etree.ElementTree as ET -from crewai_tools.tools.arxiv_paper_tool import ArxivPaperTool +from crewai_tools import ArxivPaperTool @pytest.fixture def tool(): @@ -97,7 +97,7 @@ def test_invalid_xml_response(mock_urlopen, tool): with pytest.raises(ET.ParseError): tool.fetch_arxiv_data("quantum", 1) - + @patch.object(ArxivPaperTool, "fetch_arxiv_data") def test_run_with_max_results(mock_fetch, tool): mock_fetch.return_value = [{ From 4daf18256d07c849269bac695027b88cfb2a0453 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 5 Aug 2025 10:53:57 -0300 Subject: [PATCH 367/391] feat: allow custom client_timeout for MCPAdapter (#409) --- src/crewai_tools/adapters/mcp_adapter.py | 12 +++++-- tests/adapters/mcp_adapter_test.py | 41 ++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/adapters/mcp_adapter.py b/src/crewai_tools/adapters/mcp_adapter.py index db4c15a24..8e602f376 100644 --- a/src/crewai_tools/adapters/mcp_adapter.py +++ b/src/crewai_tools/adapters/mcp_adapter.py @@ -50,13 +50,17 @@ class MCPServerAdapter: with MCPServerAdapter(..., "tool1", "tool2") as filtered_tools: # only tool1 and tool2 are available + # context manager with custom connect timeout (60 seconds) + with MCPServerAdapter(..., connect_timeout=60) as tools: + # tools is now available with longer timeout + # manually stop mcp server try: mcp_server = MCPServerAdapter(...) tools = mcp_server.tools # all tools - # or with filtered tools - mcp_server = MCPServerAdapter(..., "tool1", "tool2") + # or with filtered tools and custom timeout + mcp_server = MCPServerAdapter(..., "tool1", "tool2", connect_timeout=45) filtered_tools = mcp_server.tools # only tool1 and tool2 ... finally: @@ -70,6 +74,7 @@ class MCPServerAdapter: self, serverparams: StdioServerParameters | dict[str, Any], *tool_names: str, + connect_timeout: int = 30, ): """Initialize the MCP Server @@ -78,6 +83,7 @@ class MCPServerAdapter: `StdioServerParameters` or a `dict` respectively for STDIO and SSE. *tool_names: Optional names of tools to filter. If provided, only tools with matching names will be available. + connect_timeout: Connection timeout in seconds to the MCP server (default is 30s). """ @@ -106,7 +112,7 @@ class MCPServerAdapter: try: self._serverparams = serverparams - self._adapter = MCPAdapt(self._serverparams, CrewAIAdapter()) + self._adapter = MCPAdapt(self._serverparams, CrewAIAdapter(), connect_timeout) self.start() except Exception as e: diff --git a/tests/adapters/mcp_adapter_test.py b/tests/adapters/mcp_adapter_test.py index d0dc88680..81c3c529b 100644 --- a/tests/adapters/mcp_adapter_test.py +++ b/tests/adapters/mcp_adapter_test.py @@ -1,4 +1,5 @@ from textwrap import dedent +from unittest.mock import MagicMock, patch import pytest from mcp import StdioServerParameters @@ -187,3 +188,43 @@ def test_filter_with_only_nonexistent_tools(echo_server_script): # Should return an empty tool collection assert isinstance(tools, ToolCollection) assert len(tools) == 0 + +def test_connect_timeout_parameter(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + with MCPServerAdapter(serverparams, connect_timeout=60) as tools: + assert isinstance(tools, ToolCollection) + assert len(tools) == 2 + assert tools[0].name == "echo_tool" + assert tools[1].name == "calc_tool" + assert tools[0].run(text="hello") == "Echo: hello" + +def test_connect_timeout_with_filtered_tools(echo_server_script): + serverparams = StdioServerParameters( + command="uv", args=["run", "python", "-c", echo_server_script] + ) + with MCPServerAdapter(serverparams, "echo_tool", connect_timeout=45) as tools: + assert isinstance(tools, ToolCollection) + assert len(tools) == 1 + assert tools[0].name == "echo_tool" + assert tools[0].run(text="timeout test") == "Echo: timeout test" + +@patch('crewai_tools.adapters.mcp_adapter.MCPAdapt') +def test_connect_timeout_passed_to_mcpadapt(mock_mcpadapt): + mock_adapter_instance = MagicMock() + mock_mcpadapt.return_value = mock_adapter_instance + + serverparams = StdioServerParameters( + command="uv", args=["run", "echo", "test"] + ) + + MCPServerAdapter(serverparams) + mock_mcpadapt.assert_called_once() + assert mock_mcpadapt.call_args[0][2] == 30 + + mock_mcpadapt.reset_mock() + + MCPServerAdapter(serverparams, connect_timeout=5) + mock_mcpadapt.assert_called_once() + assert mock_mcpadapt.call_args[0][2] == 5 From d00c9764fc44563163515604410b0b322a52232b Mon Sep 17 00:00:00 2001 From: Volodymyr Tkachuk <57520563+volodymyr-memsql@users.noreply.github.com> Date: Thu, 7 Aug 2025 16:51:37 +0300 Subject: [PATCH 368/391] feat: Adding SingleStoreSearchTool (#349) * initial commit * Add actual SinglesStore Search Tool implementation * add the implementation * update readme * add tool's description * add tests * fix tests * review comments * remove schema from public exports * fix test failure * revert tools.specs.json * added dependencies and env vars descriptions --- src/crewai_tools/__init__.py | 5 +- src/crewai_tools/tools/__init__.py | 19 +- .../tools/singlestore_search_tool/README.md | 299 ++++++++++++ .../tools/singlestore_search_tool/__init__.py | 6 + .../singlestore_search_tool.py | 429 ++++++++++++++++++ tests/tools/singlestore_search_tool_test.py | 336 ++++++++++++++ 6 files changed, 1084 insertions(+), 10 deletions(-) create mode 100644 src/crewai_tools/tools/singlestore_search_tool/README.md create mode 100644 src/crewai_tools/tools/singlestore_search_tool/__init__.py create mode 100644 src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py create mode 100644 tests/tools/singlestore_search_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 4a782aeda..7e1a7c584 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -42,10 +42,10 @@ from .tools import ( MultiOnTool, MySQLSearchTool, NL2SQLTool, - OxylabsUniversalScraperTool, - OxylabsGoogleSearchScraperTool, OxylabsAmazonProductScraperTool, OxylabsAmazonSearchScraperTool, + OxylabsGoogleSearchScraperTool, + OxylabsUniversalScraperTool, PatronusEvalTool, PatronusLocalEvaluatorTool, PatronusPredefinedCriteriaEvalTool, @@ -68,6 +68,7 @@ from .tools import ( SerplyScholarSearchTool, SerplyWebpageToMarkdownTool, SerplyWebSearchTool, + SingleStoreSearchTool, SnowflakeConfig, SnowflakeSearchTool, SpiderTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 5e292e1fa..44aa2500a 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -39,18 +39,18 @@ from .mongodb_vector_search_tool import ( from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool -from .oxylabs_universal_scraper_tool.oxylabs_universal_scraper_tool import ( - OxylabsUniversalScraperTool, -) -from .oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import ( - OxylabsGoogleSearchScraperTool, -) from .oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import ( OxylabsAmazonProductScraperTool, ) from .oxylabs_amazon_search_scraper_tool.oxylabs_amazon_search_scraper_tool import ( OxylabsAmazonSearchScraperTool, ) +from .oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import ( + OxylabsGoogleSearchScraperTool, +) +from .oxylabs_universal_scraper_tool.oxylabs_universal_scraper_tool import ( + OxylabsUniversalScraperTool, +) from .patronus_eval_tool import ( PatronusEvalTool, PatronusLocalEvaluatorTool, @@ -75,12 +75,15 @@ from .selenium_scraping_tool.selenium_scraping_tool import SeleniumScrapingTool from .serpapi_tool.serpapi_google_search_tool import SerpApiGoogleSearchTool from .serpapi_tool.serpapi_google_shopping_tool import SerpApiGoogleShoppingTool from .serper_dev_tool.serper_dev_tool import SerperDevTool -from .serper_scrape_website_tool.serper_scrape_website_tool import SerperScrapeWebsiteTool +from .serper_scrape_website_tool.serper_scrape_website_tool import ( + SerperScrapeWebsiteTool, +) from .serply_api_tool.serply_job_search_tool import SerplyJobSearchTool from .serply_api_tool.serply_news_search_tool import SerplyNewsSearchTool from .serply_api_tool.serply_scholar_search_tool import SerplyScholarSearchTool from .serply_api_tool.serply_web_search_tool import SerplyWebSearchTool from .serply_api_tool.serply_webpage_to_markdown_tool import SerplyWebpageToMarkdownTool +from .singlestore_search_tool import SingleStoreSearchTool from .snowflake_search_tool import ( SnowflakeConfig, SnowflakeSearchTool, @@ -88,9 +91,9 @@ from .snowflake_search_tool import ( ) from .spider_tool.spider_tool import SpiderTool from .stagehand_tool.stagehand_tool import StagehandTool -from .txt_search_tool.txt_search_tool import TXTSearchTool from .tavily_extractor_tool.tavily_extractor_tool import TavilyExtractorTool from .tavily_search_tool.tavily_search_tool import TavilySearchTool +from .txt_search_tool.txt_search_tool import TXTSearchTool from .vision_tool.vision_tool import VisionTool from .weaviate_tool.vector_search import WeaviateVectorSearchTool from .website_search.website_search_tool import WebsiteSearchTool diff --git a/src/crewai_tools/tools/singlestore_search_tool/README.md b/src/crewai_tools/tools/singlestore_search_tool/README.md new file mode 100644 index 000000000..954264683 --- /dev/null +++ b/src/crewai_tools/tools/singlestore_search_tool/README.md @@ -0,0 +1,299 @@ +# SingleStoreSearchTool + +## Description +The SingleStoreSearchTool is designed to facilitate semantic searches and SQL queries within SingleStore database tables. This tool provides a secure interface for executing SELECT and SHOW queries against SingleStore databases, with built-in connection pooling for optimal performance. It supports various connection methods and allows you to work with specific table subsets within your database. + +## Installation +To install the `crewai_tools` package with SingleStore support, execute the following command: + +```shell +pip install 'crewai[tools]' +``` + +Or install with the SingleStore extra for the latest dependencies: + +```shell +uv sync --extra singlestore +``` + +Or install the required dependencies manually: + +```shell +pip install singlestoredb>=1.12.4 SQLAlchemy>=2.0.40 +``` + +## Features + +- 🔒 **Secure Query Execution**: Only SELECT and SHOW queries are allowed for security +- 🚀 **Connection Pooling**: Built-in connection pooling for optimal performance +- 📊 **Table Subset Support**: Work with specific tables or all tables in the database +- 🔧 **Flexible Configuration**: Multiple connection methods supported +- ðŸ›¡ï¸ **SSL/TLS Support**: Comprehensive SSL configuration options +- âš¡ **Efficient Resource Management**: Automatic connection lifecycle management + +## Basic Usage + +### Simple Connection + +```python +from crewai_tools import SingleStoreSearchTool + +# Basic connection using host/user/password +tool = SingleStoreSearchTool( + host='localhost', + user='your_username', + password='your_password', + database='your_database', + port=3306 +) + +# Execute a search query +result = tool._run("SELECT * FROM employees WHERE department = 'Engineering' LIMIT 10") +print(result) +``` + +### Working with Specific Tables + +```python +# Initialize tool for specific tables only +tool = SingleStoreSearchTool( + tables=['employees', 'departments'], # Only work with these tables + host='your_host', + user='your_username', + password='your_password', + database='your_database' +) +``` + +## Complete CrewAI Integration Example + +Here's a complete example showing how to use the SingleStoreSearchTool with CrewAI agents and tasks: + +```python +from crewai import Agent, Task, Crew +from crewai_tools import SingleStoreSearchTool + +# Initialize the SingleStore search tool +singlestore_tool = SingleStoreSearchTool( + tables=["products", "sales", "customers"], # Specify the tables you want to search + host="localhost", + port=3306, + user="root", + password="pass", + database="crewai", +) + +# Create an agent that uses this tool +data_analyst = Agent( + role="Business Analyst", + goal="Analyze and answer business questions using SQL data", + backstory="Expert in interpreting business needs and transforming them into data queries.", + tools=[singlestore_tool], + verbose=True, + embedder={ + "provider": "ollama", + "config": { + "model": "nomic-embed-text", + }, + }, +) + +# Define a task +task = Task( + description="List the top 2 customers by total sales amount.", + agent=data_analyst, + expected_output="A ranked list of top 2 customers that have the highest total sales amount, including their names and total sales figures.", +) + +# Run the crew +crew = Crew(tasks=[task], verbose=True) +result = crew.kickoff() +``` + +### Advanced CrewAI Example with Multiple Agents + +```python +from crewai import Agent, Task, Crew +from crewai_tools import SingleStoreSearchTool + +# Initialize the tool with connection URL +singlestore_tool = SingleStoreSearchTool( + host="user:password@localhost:3306/ecommerce_db", + tables=["orders", "products", "customers", "order_items"] +) + +# Data Analyst Agent +data_analyst = Agent( + role="Senior Data Analyst", + goal="Extract insights from database queries and provide data-driven recommendations", + backstory="You are an experienced data analyst with expertise in SQL and business intelligence.", + tools=[singlestore_tool], + verbose=True +) + +# Business Intelligence Agent +bi_specialist = Agent( + role="Business Intelligence Specialist", + goal="Transform data insights into actionable business recommendations", + backstory="You specialize in translating complex data analysis into clear business strategies.", + verbose=True +) + +# Define multiple tasks +data_extraction_task = Task( + description=""" + Analyze the sales data to find: + 1. Top 5 best-selling products by quantity + 2. Monthly sales trends for the last 6 months + 3. Customer segments by purchase frequency + """, + agent=data_analyst, + expected_output="Detailed SQL query results with sales analysis including product rankings, trends, and customer segments." +) + +insights_task = Task( + description=""" + Based on the sales data analysis, provide business recommendations for: + 1. Inventory management for top products + 2. Marketing strategies for different customer segments + 3. Sales forecasting insights + """, + agent=bi_specialist, + expected_output="Strategic business recommendations with actionable insights based on the data analysis.", + context=[data_extraction_task] +) + +# Create and run the crew +analytics_crew = Crew( + agents=[data_analyst, bi_specialist], + tasks=[data_extraction_task, insights_task], + verbose=True +) + +result = analytics_crew.kickoff() +``` + +## Connection Methods + +SingleStore supports multiple connection methods. Choose the one that best fits your environment: + +### 1. Standard Connection + +```python +tool = SingleStoreSearchTool( + host='your_host', + user='your_username', + password='your_password', + database='your_database', + port=3306 +) +``` + +### 2. Connection URL (Recommended) + +You can use a complete connection URL in the `host` parameter for simplified configuration: + +```python +# Using connection URL in host parameter +tool = SingleStoreSearchTool( + host='user:password@localhost:3306/database_name' +) + +# Or for SingleStore Cloud +tool = SingleStoreSearchTool( + host='user:password@your_cloud_host:3333/database_name?ssl_disabled=false' +) +``` + +### 3. Environment Variable Configuration + +Set the `SINGLESTOREDB_URL` environment variable and initialize the tool without any connection arguments: + +```bash +# Set the environment variable +export SINGLESTOREDB_URL="singlestoredb://user:password@localhost:3306/database_name" + +# Or for cloud connections +export SINGLESTOREDB_URL="singlestoredb://user:password@your_cloud_host:3333/database_name?ssl_disabled=false" +``` + +```python +# No connection arguments needed when using environment variable +tool = SingleStoreSearchTool() + +# Or specify only table subset +tool = SingleStoreSearchTool(tables=['employees', 'departments']) +``` + +### 4. Connection with SSL + +```python +tool = SingleStoreSearchTool( + host='your_host', + user='your_username', + password='your_password', + database='your_database', + ssl_ca='/path/to/ca-cert.pem', + ssl_cert='/path/to/client-cert.pem', + ssl_key='/path/to/client-key.pem' +) +``` + +### 5. Advanced Configuration + +```python +tool = SingleStoreSearchTool( + host='your_host', + user='your_username', + password='your_password', + database='your_database', + # Connection pool settings + pool_size=10, + max_overflow=20, + timeout=60, + # Advanced options + charset='utf8mb4', + autocommit=True, + connect_timeout=30, + results_format='tuple', + # Custom connection attributes + conn_attrs={ + 'program_name': 'MyApp', + 'custom_attr': 'value' + } +) +``` + +## Configuration Parameters + +### Basic Connection Parameters +- `host`: Database host address or complete connection URL +- `user`: Database username +- `password`: Database password +- `port`: Database port (default: 3306) +- `database`: Database name +- `tables`: List of specific tables to work with (optional) + +### Connection Pool Parameters +- `pool_size`: Maximum number of connections in the pool (default: 5) +- `max_overflow`: Maximum overflow connections beyond pool_size (default: 10) +- `timeout`: Connection timeout in seconds (default: 30) + +### SSL/TLS Parameters +- `ssl_key`: Path to client private key file +- `ssl_cert`: Path to client certificate file +- `ssl_ca`: Path to certificate authority file +- `ssl_disabled`: Disable SSL (default: None) +- `ssl_verify_cert`: Verify server certificate +- `ssl_verify_identity`: Verify server identity + +### Advanced Parameters +- `charset`: Character set for the connection +- `autocommit`: Enable autocommit mode +- `connect_timeout`: Connection timeout in seconds +- `results_format`: Format for query results ('tuple', 'dict', etc.) +- `vector_data_format`: Format for vector data ('binary', 'json') +- `parse_json`: Parse JSON columns automatically + + +For more detailed connection options and advanced configurations, refer to the [SingleStore Python SDK documentation](https://singlestoredb-python.labs.singlestore.com/getting-started.html). diff --git a/src/crewai_tools/tools/singlestore_search_tool/__init__.py b/src/crewai_tools/tools/singlestore_search_tool/__init__.py new file mode 100644 index 000000000..4ec997152 --- /dev/null +++ b/src/crewai_tools/tools/singlestore_search_tool/__init__.py @@ -0,0 +1,6 @@ +from .singlestore_search_tool import SingleStoreSearchTool, SingleStoreSearchToolSchema + +__all__ = [ + "SingleStoreSearchTool", + "SingleStoreSearchToolSchema", +] diff --git a/src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py b/src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py new file mode 100644 index 000000000..4c8d768a3 --- /dev/null +++ b/src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py @@ -0,0 +1,429 @@ +from typing import Any, Callable, Dict, List, Optional, Type + +from crewai.tools import BaseTool, EnvVar +from pydantic import BaseModel, Field + +try: + from singlestoredb import connect + from sqlalchemy.pool import QueuePool + + SINGLSTORE_AVAILABLE = True + +except ImportError: + SINGLSTORE_AVAILABLE = False + + +class SingleStoreSearchToolSchema(BaseModel): + """Input schema for SingleStoreSearchTool. + + This schema defines the expected input format for the search tool, + ensuring that only valid SELECT and SHOW queries are accepted. + """ + + search_query: str = Field( + ..., + description=( + "Mandatory semantic search query you want to use to search the database's content. " + "Only SELECT and SHOW queries are supported." + ), + ) + + +class SingleStoreSearchTool(BaseTool): + """A tool for performing semantic searches on SingleStore database tables. + + This tool provides a safe interface for executing SELECT and SHOW queries + against a SingleStore database with connection pooling for optimal performance. + """ + + name: str = "Search a database's table(s) content" + description: str = ( + "A tool that can be used to semantic search a query from a database." + ) + args_schema: Type[BaseModel] = SingleStoreSearchToolSchema + + package_dependencies: List[str] = ["singlestoredb", "SQLAlchemy"] + env_vars: List[EnvVar] = [ + EnvVar( + name="SINGLESTOREDB_URL", + description="A comprehensive URL string that can encapsulate host, port," + " username, password, and database information, often used in environments" + " like SingleStore notebooks or specific frameworks." + " For example: 'me:p455w0rd@s2-host.com/my_db'", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_HOST", + description="Specifies the hostname, IP address, or URL of" + " the SingleStoreDB workspace or cluster", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_PORT", + description="Defines the port number on which the" + " SingleStoreDB server is listening", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_USER", + description="Specifies the database user name", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_PASSWORD", + description="Specifies the database user password", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_DATABASE", + description="Name of the database to connect to", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_SSL_KEY", + description="File containing SSL key", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_SSL_CERT", + description="File containing SSL certificate", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_SSL_CA", + description="File containing SSL certificate authority", + required=False, + default=None, + ), + EnvVar( + name="SINGLESTOREDB_CONNECT_TIMEOUT", + description="The timeout for connecting to the database in seconds", + required=False, + default=None, + ), + ] + + connection_args: dict = {} + connection_pool: Optional[Any] = None + + def __init__( + self, + tables: List[str] = [], + # Basic connection parameters + host: Optional[str] = None, + user: Optional[str] = None, + password: Optional[str] = None, + port: Optional[int] = None, + database: Optional[str] = None, + driver: Optional[str] = None, + # Connection behavior options + pure_python: Optional[bool] = None, + local_infile: Optional[bool] = None, + charset: Optional[str] = None, + # SSL/TLS configuration + ssl_key: Optional[str] = None, + ssl_cert: Optional[str] = None, + ssl_ca: Optional[str] = None, + ssl_disabled: Optional[bool] = None, + ssl_cipher: Optional[str] = None, + ssl_verify_cert: Optional[bool] = None, + tls_sni_servername: Optional[str] = None, + ssl_verify_identity: Optional[bool] = None, + # Advanced connection options + conv: Optional[Dict[int, Callable[..., Any]]] = None, + credential_type: Optional[str] = None, + autocommit: Optional[bool] = None, + # Result formatting options + results_type: Optional[str] = None, + buffered: Optional[bool] = None, + results_format: Optional[str] = None, + program_name: Optional[str] = None, + conn_attrs: Optional[Dict[str, str]] = {}, + # Query execution options + multi_statements: Optional[bool] = None, + client_found_rows: Optional[bool] = None, + connect_timeout: Optional[int] = None, + # Data type handling + nan_as_null: Optional[bool] = None, + inf_as_null: Optional[bool] = None, + encoding_errors: Optional[str] = None, + track_env: Optional[bool] = None, + enable_extended_data_types: Optional[bool] = None, + vector_data_format: Optional[str] = None, + parse_json: Optional[bool] = None, + # Connection pool configuration + pool_size: Optional[int] = 5, + max_overflow: Optional[int] = 10, + timeout: Optional[float] = 30, + **kwargs, + ): + """Initialize the SingleStore search tool. + + Args: + tables: List of table names to work with. If empty, all tables will be used. + host: Database host address + user: Database username + password: Database password + port: Database port number + database: Database name + pool_size: Maximum number of connections in the pool + max_overflow: Maximum overflow connections beyond pool_size + timeout: Connection timeout in seconds + **kwargs: Additional arguments passed to the parent class + """ + + if not SINGLSTORE_AVAILABLE: + import click + + if click.confirm( + "You are missing the 'singlestore' package. Would you like to install it?" + ): + import subprocess + + try: + subprocess.run( + ["uv", "add", "crewai-tools[singlestore]"], check=True + ) + + except subprocess.CalledProcessError: + raise ImportError("Failed to install singlestore package") + else: + raise ImportError( + "`singlestore` package not found, please run `uv add crewai-tools[singlestore]`" + ) + + # Set the data type for the parent class + kwargs["data_type"] = "singlestore" + super().__init__(**kwargs) + + # Build connection arguments dictionary with sensible defaults + self.connection_args = { + # Basic connection parameters + "host": host, + "user": user, + "password": password, + "port": port, + "database": database, + "driver": driver, + # Connection behavior + "pure_python": pure_python, + "local_infile": local_infile, + "charset": charset, + # SSL/TLS settings + "ssl_key": ssl_key, + "ssl_cert": ssl_cert, + "ssl_ca": ssl_ca, + "ssl_disabled": ssl_disabled, + "ssl_cipher": ssl_cipher, + "ssl_verify_cert": ssl_verify_cert, + "tls_sni_servername": tls_sni_servername, + "ssl_verify_identity": ssl_verify_identity, + # Advanced options + "conv": conv or {}, + "credential_type": credential_type, + "autocommit": autocommit, + # Result formatting + "results_type": results_type, + "buffered": buffered, + "results_format": results_format, + "program_name": program_name, + "conn_attrs": conn_attrs or {}, + # Query execution + "multi_statements": multi_statements, + "client_found_rows": client_found_rows, + "connect_timeout": connect_timeout or 10, # Default: 10 seconds + # Data type handling with defaults + "nan_as_null": nan_as_null or False, + "inf_as_null": inf_as_null or False, + "encoding_errors": encoding_errors or "strict", + "track_env": track_env or False, + "enable_extended_data_types": enable_extended_data_types or False, + "vector_data_format": vector_data_format or "binary", + "parse_json": parse_json or True, + } + + # Ensure connection attributes are properly initialized + if "conn_attrs" not in self.connection_args or not self.connection_args.get( + "conn_attrs" + ): + self.connection_args["conn_attrs"] = dict() + + # Add tool identification to connection attributes + self.connection_args["conn_attrs"][ + "_connector_name" + ] = "crewAI SingleStore Tool" + self.connection_args["conn_attrs"]["_connector_version"] = "1.0" + + # Initialize connection pool for efficient connection management + self.connection_pool = QueuePool( + creator=self._create_connection, + pool_size=pool_size, + max_overflow=max_overflow, + timeout=timeout, + ) + + # Validate database schema and initialize table information + self._initialize_tables(tables) + + def _initialize_tables(self, tables: List[str]) -> None: + """Initialize and validate the tables that this tool will work with. + + Args: + tables: List of table names to validate and use + + Raises: + ValueError: If no tables exist or specified tables don't exist + """ + conn = self._get_connection() + try: + with conn.cursor() as cursor: + # Get all existing tables in the database + cursor.execute("SHOW TABLES") + existing_tables = {table[0] for table in cursor.fetchall()} + + # Validate that the database has tables + if not existing_tables or len(existing_tables) == 0: + raise ValueError( + "No tables found in the database. " + "Please ensure the database is initialized with the required tables." + ) + + # Use all tables if none specified + if not tables or len(tables) == 0: + tables = existing_tables + + # Build table definitions for description + table_definitions = [] + for table in tables: + if table not in existing_tables: + raise ValueError( + f"Table {table} does not exist in the database. " + f"Please ensure the table is created." + ) + + # Get column information for each table + cursor.execute(f"SHOW COLUMNS FROM {table}") + columns = cursor.fetchall() + column_info = ", ".join(f"{row[0]} {row[1]}" for row in columns) + table_definitions.append(f"{table}({column_info})") + finally: + # Ensure the connection is returned to the pool + conn.close() + + # Update the tool description with actual table information + self.description = ( + f"A tool that can be used to semantic search a query from a SingleStore " + f"database's {', '.join(table_definitions)} table(s) content." + ) + self._generate_description() + + def _get_connection(self) -> Optional[Any]: + """Get a connection from the connection pool. + + Returns: + Connection: A SingleStore database connection + + Raises: + Exception: If connection cannot be established + """ + try: + conn = self.connection_pool.connect() + return conn + except Exception: + # Re-raise the exception to be handled by the caller + raise + + def _create_connection(self) -> Optional[Any]: + """Create a new SingleStore connection. + + This method is used by the connection pool to create new connections + when needed. + + Returns: + Connection: A new SingleStore database connection + + Raises: + Exception: If connection cannot be created + """ + try: + conn = connect(**self.connection_args) + return conn + except Exception: + # Re-raise the exception to be handled by the caller + raise + + def _validate_query(self, search_query: str) -> tuple[bool, str]: + """Validate the search query to ensure it's safe to execute. + + Only SELECT and SHOW statements are allowed for security reasons. + + Args: + search_query: The SQL query to validate + + Returns: + tuple: (is_valid: bool, message: str) + """ + # Check if the input is a string + if not isinstance(search_query, str): + return False, "Search query must be a string." + + # Remove leading/trailing whitespace and convert to lowercase for checking + query_lower = search_query.strip().lower() + + # Allow only SELECT and SHOW statements + if not (query_lower.startswith("select") or query_lower.startswith("show")): + return ( + False, + "Only SELECT and SHOW queries are supported for security reasons.", + ) + + return True, "Valid query" + + def _run(self, search_query: str) -> Any: + """Execute the search query against the SingleStore database. + + Args: + search_query: The SQL query to execute + **kwargs: Additional keyword arguments (unused) + + Returns: + str: Formatted search results or error message + """ + # Validate the query before execution + valid, message = self._validate_query(search_query) + if not valid: + return f"Invalid search query: {message}" + + # Execute the query using a connection from the pool + conn = self._get_connection() + try: + with conn.cursor() as cursor: + try: + # Execute the validated search query + cursor.execute(search_query) + results = cursor.fetchall() + + # Handle empty results + if not results: + return "No results found." + + # Format the results for readable output + formatted_results = "\n".join( + [", ".join([str(item) for item in row]) for row in results] + ) + return f"Search Results:\n{formatted_results}" + + except Exception as e: + return f"Error executing search query: {e}" + + finally: + # Ensure the connection is returned to the pool + conn.close() diff --git a/tests/tools/singlestore_search_tool_test.py b/tests/tools/singlestore_search_tool_test.py new file mode 100644 index 000000000..fb0f22c14 --- /dev/null +++ b/tests/tools/singlestore_search_tool_test.py @@ -0,0 +1,336 @@ +import os +from typing import Generator + +import pytest +from singlestoredb import connect +from singlestoredb.server import docker + +from crewai_tools import SingleStoreSearchTool +from crewai_tools.tools.singlestore_search_tool import SingleStoreSearchToolSchema + + +@pytest.fixture(scope="session") +def docker_server_url() -> Generator[str, None, None]: + """Start a SingleStore Docker server for tests.""" + try: + sdb = docker.start(license="") + conn = sdb.connect() + curr = conn.cursor() + curr.execute("CREATE DATABASE test_crewai") + curr.close() + conn.close() + yield sdb.connection_url + sdb.stop() + except Exception as e: + pytest.skip(f"Could not start SingleStore Docker container: {e}") + + +@pytest.fixture(scope="function") +def clean_db_url(docker_server_url) -> Generator[str, None, None]: + """Provide a clean database URL and clean up tables after test.""" + yield docker_server_url + try: + conn = connect(host=docker_server_url, database="test_crewai") + curr = conn.cursor() + curr.execute("SHOW TABLES") + results = curr.fetchall() + for result in results: + curr.execute(f"DROP TABLE {result[0]}") + curr.close() + conn.close() + except Exception: + # Ignore cleanup errors + pass + + +@pytest.fixture +def sample_table_setup(clean_db_url): + """Set up sample tables for testing.""" + conn = connect(host=clean_db_url, database="test_crewai") + curr = conn.cursor() + + # Create sample tables + curr.execute( + """ + CREATE TABLE employees ( + id INT PRIMARY KEY, + name VARCHAR(100), + department VARCHAR(50), + salary DECIMAL(10,2) + ) + """ + ) + + curr.execute( + """ + CREATE TABLE departments ( + id INT PRIMARY KEY, + name VARCHAR(100), + budget DECIMAL(12,2) + ) + """ + ) + + # Insert sample data + curr.execute( + """ + INSERT INTO employees VALUES + (1, 'Alice Smith', 'Engineering', 75000.00), + (2, 'Bob Johnson', 'Marketing', 65000.00), + (3, 'Carol Davis', 'Engineering', 80000.00) + """ + ) + + curr.execute( + """ + INSERT INTO departments VALUES + (1, 'Engineering', 500000.00), + (2, 'Marketing', 300000.00) + """ + ) + + curr.close() + conn.close() + return clean_db_url + + +class TestSingleStoreSearchTool: + """Test suite for SingleStoreSearchTool.""" + + def test_tool_creation_with_connection_params(self, sample_table_setup): + """Test tool creation with individual connection parameters.""" + # Parse URL components for individual parameters + url_parts = sample_table_setup.split("@")[1].split(":") + host = url_parts[0] + port = int(url_parts[1].split("/")[0]) + user = "root" + password = sample_table_setup.split("@")[0].split(":")[2] + tool = SingleStoreSearchTool( + tables=[], + host=host, + port=port, + user=user, + password=password, + database="test_crewai", + ) + + assert tool.name == "Search a database's table(s) content" + assert "SingleStore" in tool.description + assert ( + "employees(id int(11), name varchar(100), department varchar(50), salary decimal(10,2))" + in tool.description.lower() + ) + assert ( + "departments(id int(11), name varchar(100), budget decimal(12,2))" + in tool.description.lower() + ) + assert tool.args_schema == SingleStoreSearchToolSchema + assert tool.connection_pool is not None + + def test_tool_creation_with_connection_url(self, sample_table_setup): + """Test tool creation with connection URL.""" + tool = SingleStoreSearchTool(host=f"{sample_table_setup}/test_crewai") + + assert tool.name == "Search a database's table(s) content" + assert tool.connection_pool is not None + + def test_tool_creation_with_specific_tables(self, sample_table_setup): + """Test tool creation with specific table list.""" + tool = SingleStoreSearchTool( + tables=["employees"], + host=sample_table_setup, + database="test_crewai", + ) + + # Check that description includes specific tables + assert "employees" in tool.description + assert "departments" not in tool.description + + def test_tool_creation_with_nonexistent_table(self, sample_table_setup): + """Test tool creation fails with non-existent table.""" + + with pytest.raises(ValueError, match="Table nonexistent does not exist"): + SingleStoreSearchTool( + tables=["employees", "nonexistent"], + host=sample_table_setup, + database="test_crewai", + ) + + def test_tool_creation_with_empty_database(self, clean_db_url): + """Test tool creation fails with empty database.""" + + with pytest.raises(ValueError, match="No tables found in the database"): + SingleStoreSearchTool(host=clean_db_url, database="test_crewai") + + def test_description_generation(self, sample_table_setup): + """Test that tool description is properly generated with table info.""" + + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + # Check description contains table definitions + assert "employees(" in tool.description + assert "departments(" in tool.description + assert "id int" in tool.description.lower() + assert "name varchar" in tool.description.lower() + + def test_query_validation_select_allowed(self, sample_table_setup): + """Test that SELECT queries are allowed.""" + os.environ["SINGLESTOREDB_URL"] = sample_table_setup + tool = SingleStoreSearchTool(database="test_crewai") + + valid, message = tool._validate_query("SELECT * FROM employees") + assert valid is True + assert message == "Valid query" + + def test_query_validation_show_allowed(self, sample_table_setup): + """Test that SHOW queries are allowed.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, message = tool._validate_query("SHOW TABLES") + assert valid is True + assert message == "Valid query" + + def test_query_validation_case_insensitive(self, sample_table_setup): + """Test that query validation is case insensitive.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, _ = tool._validate_query("select * from employees") + assert valid is True + + valid, _ = tool._validate_query("SHOW tables") + assert valid is True + + def test_query_validation_insert_denied(self, sample_table_setup): + """Test that INSERT queries are denied.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, message = tool._validate_query( + "INSERT INTO employees VALUES (4, 'Test', 'Test', 1000)" + ) + assert valid is False + assert "Only SELECT and SHOW queries are supported" in message + + def test_query_validation_update_denied(self, sample_table_setup): + """Test that UPDATE queries are denied.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, message = tool._validate_query("UPDATE employees SET salary = 90000") + assert valid is False + assert "Only SELECT and SHOW queries are supported" in message + + def test_query_validation_delete_denied(self, sample_table_setup): + """Test that DELETE queries are denied.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, message = tool._validate_query("DELETE FROM employees WHERE id = 1") + assert valid is False + assert "Only SELECT and SHOW queries are supported" in message + + def test_query_validation_non_string(self, sample_table_setup): + """Test that non-string queries are rejected.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + valid, message = tool._validate_query(123) + assert valid is False + assert "Search query must be a string" in message + + def test_run_select_query(self, sample_table_setup): + """Test executing a SELECT query.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run("SELECT * FROM employees ORDER BY id") + + assert "Search Results:" in result + assert "Alice Smith" in result + assert "Bob Johnson" in result + assert "Carol Davis" in result + + def test_run_filtered_query(self, sample_table_setup): + """Test executing a filtered SELECT query.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run( + "SELECT name FROM employees WHERE department = 'Engineering'" + ) + + assert "Search Results:" in result + assert "Alice Smith" in result + assert "Carol Davis" in result + assert "Bob Johnson" not in result + + def test_run_show_query(self, sample_table_setup): + """Test executing a SHOW query.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run("SHOW TABLES") + + assert "Search Results:" in result + assert "employees" in result + assert "departments" in result + + def test_run_empty_result(self, sample_table_setup): + """Test executing a query that returns no results.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run("SELECT * FROM employees WHERE department = 'NonExistent'") + + assert result == "No results found." + + def test_run_invalid_query_syntax(self, sample_table_setup): + """Test executing a query with invalid syntax.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run("SELECT * FORM employees") # Intentional typo + + assert "Error executing search query:" in result + + def test_run_denied_query(self, sample_table_setup): + """Test that denied queries return appropriate error message.""" + tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai") + + result = tool._run("DELETE FROM employees") + + assert "Invalid search query:" in result + assert "Only SELECT and SHOW queries are supported" in result + + def test_connection_pool_usage(self, sample_table_setup): + """Test that connection pooling works correctly.""" + tool = SingleStoreSearchTool( + host=sample_table_setup, + database="test_crewai", + pool_size=2, + ) + + # Execute multiple queries to test pool usage + results = [] + for _ in range(5): + result = tool._run("SELECT COUNT(*) FROM employees") + results.append(result) + + # All queries should succeed + for result in results: + assert "Search Results:" in result + assert "3" in result # Count of employees + + def test_tool_schema_validation(self): + """Test that the tool schema validation works correctly.""" + # Valid input + valid_input = SingleStoreSearchToolSchema(search_query="SELECT * FROM test") + assert valid_input.search_query == "SELECT * FROM test" + + # Test that description is present + schema_dict = SingleStoreSearchToolSchema.model_json_schema() + assert "search_query" in schema_dict["properties"] + assert "description" in schema_dict["properties"]["search_query"] + + def test_connection_error_handling(self): + """Test handling of connection errors.""" + with pytest.raises(Exception): + # This should fail due to invalid connection parameters + SingleStoreSearchTool( + host="invalid_host", + port=9999, + user="invalid_user", + password="invalid_password", + database="invalid_db", + ) From 41ce4981ac08a05fcf6c4ae9e3d30d1db6a69918 Mon Sep 17 00:00:00 2001 From: meirk-brd Date: Thu, 7 Aug 2025 17:29:51 +0300 Subject: [PATCH 369/391] feat: Add Bright Data tools (#314) * Initial commit of BrightData tools * Renamed the BrightData test file names * Refactored and improved the overall BrightData tools * Add BrightData tools * Add tools to init * Added config class * Fix test failures and add missing __init__.py files - Remove problematic brightdata_dataset_tool_test.py that referenced non-existent classes - Fix brightdata_serp_tool_test.py to expect string responses instead of dict - Fix brightdata_webunlocker_tool_test.py to expect string responses instead of dict - Add missing tests/tools/__init__.py for proper test imports --------- Co-authored-by: Ranjan Dailata Co-authored-by: Tony Kipkemboi --- src/crewai_tools/__init__.py | 3 + src/crewai_tools/tools/__init__.py | 5 + .../tools/brightdata_tool/README.md | 79 +++ .../tools/brightdata_tool/__init__.py | 9 + .../brightdata_tool/brightdata_dataset.py | 566 ++++++++++++++++++ .../tools/brightdata_tool/brightdata_serp.py | 204 +++++++ .../brightdata_tool/brightdata_unlocker.py | 119 ++++ tests/tools/__init__.py | 0 tests/tools/brightdata_serp_tool_test.py | 54 ++ .../tools/brightdata_webunlocker_tool_test.py | 64 ++ 10 files changed, 1103 insertions(+) create mode 100644 src/crewai_tools/tools/brightdata_tool/README.md create mode 100644 src/crewai_tools/tools/brightdata_tool/__init__.py create mode 100644 src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py create mode 100644 src/crewai_tools/tools/brightdata_tool/brightdata_serp.py create mode 100644 src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py create mode 100644 tests/tools/__init__.py create mode 100644 tests/tools/brightdata_serp_tool_test.py create mode 100644 tests/tools/brightdata_webunlocker_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7e1a7c584..05482ae70 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -12,6 +12,9 @@ from .tools import ( ApifyActorsTool, ArxivPaperTool, BraveSearchTool, + BrightDataWebUnlockerTool, + BrightDataSearchTool, + BrightDataDatasetTool, BrowserbaseLoadTool, CodeDocsSearchTool, CodeInterpreterTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 44aa2500a..05219c4f7 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -102,4 +102,9 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool +from .brightdata_tool import ( + BrightDataDatasetTool, + BrightDataSearchTool, + BrightDataWebUnlockerTool +) from .zapier_action_tool.zapier_action_tool import ZapierActionTools diff --git a/src/crewai_tools/tools/brightdata_tool/README.md b/src/crewai_tools/tools/brightdata_tool/README.md new file mode 100644 index 000000000..f16b5ac73 --- /dev/null +++ b/src/crewai_tools/tools/brightdata_tool/README.md @@ -0,0 +1,79 @@ +# BrightData Tools Documentation + +## Description + +A comprehensive suite of CrewAI tools that leverage Bright Data's powerful infrastructure for web scraping, data extraction, and search operations. These tools provide three distinct capabilities: + +- **BrightDataDatasetTool**: Extract structured data from popular data feeds (Amazon, LinkedIn, Instagram, etc.) using pre-built datasets +- **BrightDataSearchTool**: Perform web searches across multiple search engines with geo-targeting and device simulation +- **BrightDataWebUnlockerTool**: Scrape any website content while bypassing bot protection mechanisms + +## Installation + +To incorporate these tools into your project, follow the installation instructions below: + +```shell +pip install crewai[tools] aiohttp requests +``` + +## Examples + +### Dataset Tool - Extract Amazon Product Data +```python +from crewai_tools import BrightDataDatasetTool + +# Initialize with specific dataset and URL +tool = BrightDataDatasetTool( + dataset_type="amazon_product", + url="https://www.amazon.com/dp/B08QB1QMJ5/" +) +result = tool.run() +``` + +### Search Tool - Perform Web Search +```python +from crewai_tools import BrightDataSearchTool + +# Initialize with search query +tool = BrightDataSearchTool( + query="latest AI trends 2025", + search_engine="google", + country="us" +) +result = tool.run() +``` + +### Web Unlocker Tool - Scrape Website Content +```python +from crewai_tools import BrightDataWebUnlockerTool + +# Initialize with target URL +tool = BrightDataWebUnlockerTool( + url="https://example.com", + data_format="markdown" +) +result = tool.run() +``` + +## Steps to Get Started + +To effectively use the BrightData Tools, follow these steps: + +1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. + +2. **API Key Acquisition**: Register for a Bright Data account at `https://brightdata.com/` and obtain your API credentials from your account settings. + +3. **Environment Configuration**: Set up the required environment variables: + ```bash + export BRIGHT_DATA_API_KEY="your_api_key_here" + export BRIGHT_DATA_ZONE="your_zone_here" + ``` + +4. **Tool Selection**: Choose the appropriate tool based on your needs: + - Use **DatasetTool** for structured data from supported platforms + - Use **SearchTool** for web search operations + - Use **WebUnlockerTool** for general website scraping + +## Conclusion + +By integrating BrightData Tools into your CrewAI agents, you gain access to enterprise-grade web scraping and data extraction capabilities. These tools handle complex challenges like bot protection, geo-restrictions, and data parsing, allowing you to focus on building your applications rather than managing scraping infrastructure. \ No newline at end of file diff --git a/src/crewai_tools/tools/brightdata_tool/__init__.py b/src/crewai_tools/tools/brightdata_tool/__init__.py new file mode 100644 index 000000000..0842e97ea --- /dev/null +++ b/src/crewai_tools/tools/brightdata_tool/__init__.py @@ -0,0 +1,9 @@ +from .brightdata_dataset import BrightDataDatasetTool +from .brightdata_serp import BrightDataSearchTool +from .brightdata_unlocker import BrightDataWebUnlockerTool + +__all__ = [ + "BrightDataDatasetTool", + "BrightDataSearchTool", + "BrightDataWebUnlockerTool" +] \ No newline at end of file diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py b/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py new file mode 100644 index 000000000..bd0dcc1c3 --- /dev/null +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py @@ -0,0 +1,566 @@ +import asyncio +import os +from typing import Any, Dict, Optional, Type + +import aiohttp +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from pydantic_settings import BaseSettings + +class BrightDataConfig(BaseSettings): + API_URL: str = "https://api.brightdata.com" + DEFAULT_TIMEOUT: int = 600 + DEFAULT_POLLING_INTERVAL: int = 1 + + class Config: + env_prefix = "BRIGHTDATA_" +class BrightDataDatasetToolException(Exception): + """Exception raised for custom error in the application.""" + + def __init__(self, message, error_code): + self.message = message + super().__init__(message) + self.error_code = error_code + + def __str__(self): + return f"{self.message} (Error Code: {self.error_code})" + + +class BrightDataDatasetToolSchema(BaseModel): + """ + Schema for validating input parameters for the BrightDataDatasetTool. + + Attributes: + dataset_type (str): Required Bright Data Dataset Type used to specify which dataset to access. + format (str): Response format (json by default). Multiple formats exist - json, ndjson, jsonl, csv + url (str): The URL from which structured data needs to be extracted. + zipcode (Optional[str]): An optional ZIP code to narrow down the data geographically. + additional_params (Optional[Dict]): Extra parameters for the Bright Data API call. + """ + + dataset_type: str = Field(..., description="The Bright Data Dataset Type") + format: Optional[str] = Field( + default="json", description="Response format (json by default)" + ) + url: str = Field(..., description="The URL to extract data from") + zipcode: Optional[str] = Field(default=None, description="Optional zipcode") + additional_params: Optional[Dict[str, Any]] = Field( + default=None, description="Additional params if any" + ) + +config = BrightDataConfig() + +BRIGHTDATA_API_URL = config.API_URL +timeout = config.DEFAULT_TIMEOUT + +datasets = [ + { + "id": "amazon_product", + "dataset_id": "gd_l7q7dkf244hwjntr0", + "description": "\n".join( + [ + "Quickly read structured amazon product data.", + "Requires a valid product URL with /dp/ in it.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "amazon_product_reviews", + "dataset_id": "gd_le8e811kzy4ggddlq", + "description": "\n".join( + [ + "Quickly read structured amazon product review data.", + "Requires a valid product URL with /dp/ in it.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "amazon_product_search", + "dataset_id": "gd_lwdb4vjm1ehb499uxs", + "description": "\n".join( + [ + "Quickly read structured amazon product search data.", + "Requires a valid search keyword and amazon domain URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["keyword", "url", "pages_to_search"], + "defaults": {"pages_to_search": "1"}, + }, + { + "id": "walmart_product", + "dataset_id": "gd_l95fol7l1ru6rlo116", + "description": "\n".join( + [ + "Quickly read structured walmart product data.", + "Requires a valid product URL with /ip/ in it.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "walmart_seller", + "dataset_id": "gd_m7ke48w81ocyu4hhz0", + "description": "\n".join( + [ + "Quickly read structured walmart seller data.", + "Requires a valid walmart seller URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "ebay_product", + "dataset_id": "gd_ltr9mjt81n0zzdk1fb", + "description": "\n".join( + [ + "Quickly read structured ebay product data.", + "Requires a valid ebay product URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "homedepot_products", + "dataset_id": "gd_lmusivh019i7g97q2n", + "description": "\n".join( + [ + "Quickly read structured homedepot product data.", + "Requires a valid homedepot product URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "zara_products", + "dataset_id": "gd_lct4vafw1tgx27d4o0", + "description": "\n".join( + [ + "Quickly read structured zara product data.", + "Requires a valid zara product URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "etsy_products", + "dataset_id": "gd_ltppk0jdv1jqz25mz", + "description": "\n".join( + [ + "Quickly read structured etsy product data.", + "Requires a valid etsy product URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "bestbuy_products", + "dataset_id": "gd_ltre1jqe1jfr7cccf", + "description": "\n".join( + [ + "Quickly read structured bestbuy product data.", + "Requires a valid bestbuy product URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "linkedin_person_profile", + "dataset_id": "gd_l1viktl72bvl7bjuj0", + "description": "\n".join( + [ + "Quickly read structured linkedin people profile data.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "linkedin_company_profile", + "dataset_id": "gd_l1vikfnt1wgvvqz95w", + "description": "\n".join( + [ + "Quickly read structured linkedin company profile data", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "linkedin_job_listings", + "dataset_id": "gd_lpfll7v5hcqtkxl6l", + "description": "\n".join( + [ + "Quickly read structured linkedin job listings data", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "linkedin_posts", + "dataset_id": "gd_lyy3tktm25m4avu764", + "description": "\n".join( + [ + "Quickly read structured linkedin posts data", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "linkedin_people_search", + "dataset_id": "gd_m8d03he47z8nwb5xc", + "description": "\n".join( + [ + "Quickly read structured linkedin people search data", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url", "first_name", "last_name"], + }, + { + "id": "crunchbase_company", + "dataset_id": "gd_l1vijqt9jfj7olije", + "description": "\n".join( + [ + "Quickly read structured crunchbase company data", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "zoominfo_company_profile", + "dataset_id": "gd_m0ci4a4ivx3j5l6nx", + "description": "\n".join( + [ + "Quickly read structured ZoomInfo company profile data.", + "Requires a valid ZoomInfo company URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "instagram_profiles", + "dataset_id": "gd_l1vikfch901nx3by4", + "description": "\n".join( + [ + "Quickly read structured Instagram profile data.", + "Requires a valid Instagram URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "instagram_posts", + "dataset_id": "gd_lk5ns7kz21pck8jpis", + "description": "\n".join( + [ + "Quickly read structured Instagram post data.", + "Requires a valid Instagram URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "instagram_reels", + "dataset_id": "gd_lyclm20il4r5helnj", + "description": "\n".join( + [ + "Quickly read structured Instagram reel data.", + "Requires a valid Instagram URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "instagram_comments", + "dataset_id": "gd_ltppn085pokosxh13", + "description": "\n".join( + [ + "Quickly read structured Instagram comments data.", + "Requires a valid Instagram URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "facebook_posts", + "dataset_id": "gd_lyclm1571iy3mv57zw", + "description": "\n".join( + [ + "Quickly read structured Facebook post data.", + "Requires a valid Facebook post URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "facebook_marketplace_listings", + "dataset_id": "gd_lvt9iwuh6fbcwmx1a", + "description": "\n".join( + [ + "Quickly read structured Facebook marketplace listing data.", + "Requires a valid Facebook marketplace listing URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "facebook_company_reviews", + "dataset_id": "gd_m0dtqpiu1mbcyc2g86", + "description": "\n".join( + [ + "Quickly read structured Facebook company reviews data.", + "Requires a valid Facebook company URL and number of reviews.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url", "num_of_reviews"], + }, + { + "id": "facebook_events", + "dataset_id": "gd_m14sd0to1jz48ppm51", + "description": "\n".join( + [ + "Quickly read structured Facebook events data.", + "Requires a valid Facebook event URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "tiktok_profiles", + "dataset_id": "gd_l1villgoiiidt09ci", + "description": "\n".join( + [ + "Quickly read structured Tiktok profiles data.", + "Requires a valid Tiktok profile URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "tiktok_posts", + "dataset_id": "gd_lu702nij2f790tmv9h", + "description": "\n".join( + [ + "Quickly read structured Tiktok post data.", + "Requires a valid Tiktok post URL.", + "This can be a cache lookup, so it can be more reliable than scraping", + ] + ), + "inputs": ["url"], + }, + { + "id": "tiktok_shop", + "dataset_id": "gd_m45m1u911dsa4274pi", + "description": "\n".join( + [ + "Quickly read structured Tiktok shop data.", + "Requires a valid Tiktok shop product URL.", + "This can be a cache lookup...", + ] + ), + "inputs": ["url"], + }, +] + + +class BrightDataDatasetTool(BaseTool): + """ + CrewAI-compatible tool for scraping structured data using Bright Data Datasets. + + Attributes: + name (str): Tool name displayed in the CrewAI environment. + description (str): Tool description shown to agents or users. + args_schema (Type[BaseModel]): Pydantic schema for validating input arguments. + """ + + name: str = "Bright Data Dataset Tool" + description: str = "Scrapes structured data using Bright Data Dataset API from a URL and optional input parameters" + args_schema: Type[BaseModel] = BrightDataDatasetToolSchema + dataset_type: Optional[str] = None + url: Optional[str] = None + format: str = "json" + zipcode: Optional[str] = None + additional_params: Optional[Dict[str, Any]] = None + + def __init__(self, dataset_type: str = None, url: str = None, format: str = "json", zipcode: str = None, additional_params: Dict[str, Any] = None): + super().__init__() + self.dataset_type = dataset_type + self.url = url + self.format = format + self.zipcode = zipcode + self.additional_params = additional_params + + def filter_dataset_by_id(self, target_id): + return [dataset for dataset in datasets if dataset["id"] == target_id] + + async def get_dataset_data_async( + self, + dataset_type: str, + output_format: str, + url: str, + zipcode: Optional[str] = None, + additional_params: Optional[Dict[str, Any]] = None, + polling_interval: int = 1, + ) -> Dict: + """ + Asynchronously trigger and poll Bright Data dataset scraping. + + Args: + dataset_type (str): Bright Data Dataset Type. + url (str): Target URL to scrape. + zipcode (Optional[str]): Optional ZIP code for geo-specific data. + additional_params (Optional[Dict]): Extra API parameters. + polling_interval (int): Time interval in seconds between polling attempts. + + Returns: + Dict: Structured dataset result from Bright Data. + + Raises: + Exception: If any API step fails or the job fails. + TimeoutError: If polling times out before job completion. + """ + request_data = {"url": url} + if zipcode is not None: + request_data["zipcode"] = zipcode + + # Set additional parameters dynamically depending upon the dataset that is being requested + if additional_params: + request_data.update(additional_params) + + api_key = os.getenv("BRIGHT_DATA_API_KEY") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + dataset_id = "" + dataset = self.filter_dataset_by_id(dataset_type) + + if len(dataset) == 1: + dataset_id = dataset[0]["dataset_id"] + else: + raise ValueError( + f"Unable to find the dataset for {dataset_type}. Please make sure to pass a valid one" + ) + + async with aiohttp.ClientSession() as session: + # Step 1: Trigger job + async with session.post( + f"{BRIGHTDATA_API_URL}/datasets/v3/trigger", + params={"dataset_id": dataset_id, "include_errors": "true"}, + json=[request_data], + headers=headers, + ) as trigger_response: + if trigger_response.status != 200: + raise BrightDataDatasetToolException( + f"Trigger failed: {await trigger_response.text()}", + trigger_response.status, + ) + trigger_data = await trigger_response.json() + print(trigger_data) + snapshot_id = trigger_data.get("snapshot_id") + + # Step 2: Poll for completion + elapsed = 0 + while elapsed < timeout: + await asyncio.sleep(polling_interval) + elapsed += polling_interval + + async with session.get( + f"{BRIGHTDATA_API_URL}/datasets/v3/progress/{snapshot_id}", + headers=headers, + ) as status_response: + if status_response.status != 200: + raise BrightDataDatasetToolException( + f"Status check failed: {await status_response.text()}", + status_response.status, + ) + status_data = await status_response.json() + if status_data.get("status") == "ready": + print("Job is ready") + break + elif status_data.get("status") == "error": + raise BrightDataDatasetToolException( + f"Job failed: {status_data}", 0 + ) + else: + raise TimeoutError("Polling timed out before job completed.") + + # Step 3: Retrieve result + async with session.get( + f"{BRIGHTDATA_API_URL}/datasets/v3/snapshot/{snapshot_id}", + params={"format": output_format}, + headers=headers, + ) as snapshot_response: + if snapshot_response.status != 200: + raise BrightDataDatasetToolException( + f"Result fetch failed: {await snapshot_response.text()}", + snapshot_response.status, + ) + + return await snapshot_response.text() + + def _run(self, url: str = None, dataset_type: str = None, format: str = None, zipcode: str = None, additional_params: Dict[str, Any] = None, **kwargs: Any) -> Any: + dataset_type = dataset_type or self.dataset_type + output_format = format or self.format + url = url or self.url + zipcode = zipcode or self.zipcode + additional_params = additional_params or self.additional_params + + if not dataset_type: + raise ValueError("dataset_type is required either in constructor or method call") + if not url: + raise ValueError("url is required either in constructor or method call") + + valid_output_formats = {"json", "ndjson", "jsonl", "csv"} + if output_format not in valid_output_formats: + raise ValueError( + f"Unsupported output format: {output_format}. Must be one of {', '.join(valid_output_formats)}." + ) + + api_key = os.getenv("BRIGHT_DATA_API_KEY") + if not api_key: + raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.") + + try: + return asyncio.run( + self.get_dataset_data_async( + dataset_type=dataset_type, + output_format=output_format, + url=url, + zipcode=zipcode, + additional_params=additional_params, + ) + ) + except TimeoutError as e: + return f"Timeout Exception occured in method : get_dataset_data_async. Details - {str(e)}" + except BrightDataDatasetToolException as e: + return f"Exception occured in method : get_dataset_data_async. Details - {str(e)}" + except Exception as e: + return f"Bright Data API error: {str(e)}" diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py b/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py new file mode 100644 index 000000000..3b1170713 --- /dev/null +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py @@ -0,0 +1,204 @@ +import os +import urllib.parse +from typing import Any, Optional, Type + +import requests +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from pydantic_settings import BaseSettings + +class BrightDataConfig(BaseSettings): + API_URL: str = "https://api.brightdata.com/request" + class Config: + env_prefix = "BRIGHTDATA_" + +class BrightDataSearchToolSchema(BaseModel): + """ + Schema that defines the input arguments for the BrightDataSearchToolSchema. + + Attributes: + query (str): The search query to be executed (e.g., "latest AI news"). + search_engine (Optional[str]): The search engine to use ("google", "bing", "yandex"). Default is "google". + country (Optional[str]): Two-letter country code for geo-targeting (e.g., "us", "in"). Default is "us". + language (Optional[str]): Language code for search results (e.g., "en", "es"). Default is "en". + search_type (Optional[str]): Type of search, such as "isch" (images), "nws" (news), "jobs", etc. + device_type (Optional[str]): Device type to simulate ("desktop", "mobile", "ios", "android"). Default is "desktop". + parse_results (Optional[bool]): If True, results will be returned in structured JSON. If False, raw HTML. Default is True. + """ + + query: str = Field(..., description="Search query to perform") + search_engine: Optional[str] = Field( + default="google", + description="Search engine domain (e.g., 'google', 'bing', 'yandex')", + ) + country: Optional[str] = Field( + default="us", + description="Two-letter country code for geo-targeting (e.g., 'us', 'gb')", + ) + language: Optional[str] = Field( + default="en", + description="Language code (e.g., 'en', 'es') used in the query URL", + ) + search_type: Optional[str] = Field( + default=None, + description="Type of search (e.g., 'isch' for images, 'nws' for news)", + ) + device_type: Optional[str] = Field( + default="desktop", + description="Device type to simulate (e.g., 'mobile', 'desktop', 'ios')", + ) + parse_results: Optional[bool] = Field( + default=True, + description="Whether to parse and return JSON (True) or raw HTML/text (False)", + ) + + +class BrightDataSearchTool(BaseTool): + """ + A web search tool that utilizes Bright Data's SERP API to perform queries and return either structured results + or raw page content from search engines like Google or Bing. + + Attributes: + name (str): Tool name used by the agent. + description (str): A brief explanation of what the tool does. + args_schema (Type[BaseModel]): Schema class for validating tool arguments. + base_url (str): The Bright Data API endpoint used for making the POST request. + api_key (str): Bright Data API key loaded from the environment variable 'BRIGHT_DATA_API_KEY'. + zone (str): Zone identifier from Bright Data, loaded from the environment variable 'BRIGHT_DATA_ZONE'. + + Raises: + ValueError: If API key or zone environment variables are not set. + """ + + name: str = "Bright Data SERP Search" + description: str = "Tool to perform web search using Bright Data SERP API." + args_schema: Type[BaseModel] = BrightDataSearchToolSchema + _config = BrightDataConfig() + base_url: str = "" + api_key: str = "" + zone: str = "" + query: Optional[str] = None + search_engine: str = "google" + country: str = "us" + language: str = "en" + search_type: Optional[str] = None + device_type: str = "desktop" + parse_results: bool = True + + def __init__(self, query: str = None, search_engine: str = "google", country: str = "us", language: str = "en", search_type: str = None, device_type: str = "desktop", parse_results: bool = True): + super().__init__() + self.base_url = self._config.API_URL + self.query = query + self.search_engine = search_engine + self.country = country + self.language = language + self.search_type = search_type + self.device_type = device_type + self.parse_results = parse_results + + self.api_key = os.getenv("BRIGHT_DATA_API_KEY") + self.zone = os.getenv("BRIGHT_DATA_ZONE") + if not self.api_key: + raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.") + if not self.zone: + raise ValueError("BRIGHT_DATA_ZONE environment variable is required.") + + def get_search_url(self, engine: str, query: str): + if engine == "yandex": + return f"https://yandex.com/search/?text=${query}" + elif engine == "bing": + return f"https://www.bing.com/search?q=${query}" + return f"https://www.google.com/search?q=${query}" + + def _run(self, query: str = None, search_engine: str = None, country: str = None, language: str = None, search_type: str = None, device_type: str = None, parse_results: bool = None, **kwargs) -> Any: + """ + Executes a search query using Bright Data SERP API and returns results. + + Args: + query (str): The search query string (URL encoded internally). + search_engine (str): The search engine to use (default: "google"). + country (str): Country code for geotargeting (default: "us"). + language (str): Language code for the query (default: "en"). + search_type (str): Optional type of search such as "nws", "isch", "jobs". + device_type (str): Optional device type to simulate (e.g., "mobile", "ios", "desktop"). + parse_results (bool): If True, returns structured data; else raw page (default: True). + results_count (str or int): Number of search results to fetch (default: "10"). + + Returns: + dict or str: Parsed JSON data from Bright Data if available, otherwise error message. + """ + + query = query or self.query + search_engine = search_engine or self.search_engine + country = country or self.country + language = language or self.language + search_type = search_type or self.search_type + device_type = device_type or self.device_type + parse_results = parse_results if parse_results is not None else self.parse_results + results_count = kwargs.get("results_count", "10") + + # Validate required parameters + if not query: + raise ValueError("query is required either in constructor or method call") + + # Build the search URL + query = urllib.parse.quote(query) + url = self.get_search_url(search_engine, query) + + # Add parameters to the URL + params = [] + + if country: + params.append(f"gl={country}") + + if language: + params.append(f"hl={language}") + + if results_count: + params.append(f"num={results_count}") + + if parse_results: + params.append(f"brd_json=1") + + if search_type: + if search_type == "jobs": + params.append("ibp=htl;jobs") + else: + params.append(f"tbm={search_type}") + + if device_type: + if device_type == "mobile": + params.append("brd_mobile=1") + elif device_type == "ios": + params.append("brd_mobile=ios") + elif device_type == "android": + params.append("brd_mobile=android") + + # Combine parameters with the URL + if params: + url += "&" + "&".join(params) + + # Set up the API request parameters + request_params = {"zone": self.zone, "url": url, "format": "raw"} + + request_params = {k: v for k, v in request_params.items() if v is not None} + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + try: + response = requests.post( + self.base_url, json=request_params, headers=headers + ) + + print(f"Status code: {response.status_code}") + response.raise_for_status() + + return response.text + + except requests.RequestException as e: + return f"Error performing BrightData search: {str(e)}" + except Exception as e: + return f"Error fetching results: {str(e)}" diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py b/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py new file mode 100644 index 000000000..fb8c2fb07 --- /dev/null +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py @@ -0,0 +1,119 @@ +import os +from typing import Any, Optional, Type + +import requests +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +from pydantic_settings import BaseSettings + +class BrightDataConfig(BaseSettings): + API_URL: str = "https://api.brightdata.com/request" + class Config: + env_prefix = "BRIGHTDATA_" + +class BrightDataUnlockerToolSchema(BaseModel): + """ + Pydantic schema for input parameters used by the BrightDataWebUnlockerTool. + + This schema defines the structure and validation for parameters passed when performing + a web scraping request using Bright Data's Web Unlocker. + + Attributes: + url (str): The target URL to scrape. + format (Optional[str]): Format of the response returned by Bright Data. Default 'raw' format. + data_format (Optional[str]): Response data format (html by default). markdown is one more option. + """ + + url: str = Field(..., description="URL to perform the web scraping") + format: Optional[str] = Field( + default="raw", description="Response format (raw is standard)" + ) + data_format: Optional[str] = Field( + default="markdown", description="Response data format (html by default)" + ) + + +class BrightDataWebUnlockerTool(BaseTool): + """ + A tool for performing web scraping using the Bright Data Web Unlocker API. + + This tool allows automated and programmatic access to web pages by routing requests + through Bright Data's unlocking and proxy infrastructure, which can bypass bot + protection mechanisms like CAPTCHA, geo-restrictions, and anti-bot detection. + + Attributes: + name (str): Name of the tool. + description (str): Description of what the tool does. + args_schema (Type[BaseModel]): Pydantic model schema for expected input arguments. + base_url (str): Base URL of the Bright Data Web Unlocker API. + api_key (str): Bright Data API key (must be set in the BRIGHT_DATA_API_KEY environment variable). + zone (str): Bright Data zone identifier (must be set in the BRIGHT_DATA_ZONE environment variable). + + Methods: + _run(**kwargs: Any) -> Any: + Sends a scraping request to Bright Data's Web Unlocker API and returns the result. + """ + + name: str = "Bright Data Web Unlocker Scraping" + description: str = "Tool to perform web scraping using Bright Data Web Unlocker" + args_schema: Type[BaseModel] = BrightDataUnlockerToolSchema + _config = BrightDataConfig() + base_url: str = "" + api_key: str = "" + zone: str = "" + url: Optional[str] = None + format: str = "raw" + data_format: str = "markdown" + + def __init__(self, url: str = None, format: str = "raw", data_format: str = "markdown"): + super().__init__() + self.base_url = self._config.API_URL + self.url = url + self.format = format + self.data_format = data_format + + self.api_key = os.getenv("BRIGHT_DATA_API_KEY") + self.zone = os.getenv("BRIGHT_DATA_ZONE") + if not self.api_key: + raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.") + if not self.zone: + raise ValueError("BRIGHT_DATA_ZONE environment variable is required.") + + def _run(self, url: str = None, format: str = None, data_format: str = None, **kwargs: Any) -> Any: + url = url or self.url + format = format or self.format + data_format = data_format or self.data_format + + if not url: + raise ValueError("url is required either in constructor or method call") + + payload = { + "url": url, + "zone": self.zone, + "format": format, + } + valid_data_formats = {"html", "markdown"} + if data_format not in valid_data_formats: + raise ValueError( + f"Unsupported data format: {data_format}. Must be one of {', '.join(valid_data_formats)}." + ) + + if data_format == "markdown": + payload["data_format"] = "markdown" + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + try: + response = requests.post(self.base_url, json=payload, headers=headers) + print(f"Status Code: {response.status_code}") + response.raise_for_status() + + return response.text + + except requests.RequestException as e: + return f"HTTP Error performing BrightData Web Unlocker Scrape: {e}\nResponse: {getattr(e.response, 'text', '')}" + except Exception as e: + return f"Error fetching results: {str(e)}" diff --git a/tests/tools/__init__.py b/tests/tools/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/tools/brightdata_serp_tool_test.py b/tests/tools/brightdata_serp_tool_test.py new file mode 100644 index 000000000..11ca018e8 --- /dev/null +++ b/tests/tools/brightdata_serp_tool_test.py @@ -0,0 +1,54 @@ +import unittest +from unittest.mock import MagicMock, patch + +from crewai_tools.tools.brightdata_tool.brightdata_serp import BrightDataSearchTool + + +class TestBrightDataSearchTool(unittest.TestCase): + @patch.dict( + "os.environ", + {"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"}, + ) + def setUp(self): + self.tool = BrightDataSearchTool() + + @patch("requests.post") + def test_run_successful_search(self, mock_post): + # Sample mock JSON response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.text = "mock response text" + mock_post.return_value = mock_response + + # Define search input + input_data = { + "query": "latest AI news", + "search_engine": "google", + "country": "us", + "language": "en", + "search_type": "nws", + "device_type": "desktop", + "parse_results": True, + "save_file": False, + } + + result = self.tool._run(**input_data) + + # Assertions + self.assertIsInstance(result, str) # Your tool returns response.text (string) + mock_post.assert_called_once() + + @patch("requests.post") + def test_run_with_request_exception(self, mock_post): + mock_post.side_effect = Exception("Timeout") + + result = self.tool._run(query="AI", search_engine="google") + self.assertIn("Error", result) + + def tearDown(self): + # Clean up env vars + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/tools/brightdata_webunlocker_tool_test.py b/tests/tools/brightdata_webunlocker_tool_test.py new file mode 100644 index 000000000..629cb2e87 --- /dev/null +++ b/tests/tools/brightdata_webunlocker_tool_test.py @@ -0,0 +1,64 @@ +from unittest.mock import Mock, patch + +import requests + +from crewai_tools.tools.brightdata_tool.brightdata_unlocker import ( + BrightDataWebUnlockerTool, +) + + +@patch.dict( + "os.environ", + {"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"}, +) +@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post") +def test_run_success_html(mock_post): + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "Test" + mock_response.raise_for_status = Mock() + mock_post.return_value = mock_response + + tool = BrightDataWebUnlockerTool() + result = tool._run(url="https://example.com", format="html", save_file=False) + + print(result) + + +@patch.dict( + "os.environ", + {"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"}, +) +@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post") +def test_run_success_json(mock_post): + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "mock response text" + mock_response.raise_for_status = Mock() + mock_post.return_value = mock_response + + tool = BrightDataWebUnlockerTool() + result = tool._run(url="https://example.com", format="json") + + assert isinstance(result, str) + + +@patch.dict( + "os.environ", + {"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"}, +) +@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post") +def test_run_http_error(mock_post): + mock_response = Mock() + mock_response.status_code = 403 + mock_response.text = "Forbidden" + mock_response.raise_for_status.side_effect = requests.HTTPError( + response=mock_response + ) + mock_post.return_value = mock_response + + tool = BrightDataWebUnlockerTool() + result = tool._run(url="https://example.com") + + assert "HTTP Error" in result + assert "Forbidden" in result From 99e174e575ff7dd56f7e2aa712d1d17d0108dfbd Mon Sep 17 00:00:00 2001 From: nicoferdi96 Date: Wed, 13 Aug 2025 14:57:11 +0200 Subject: [PATCH 370/391] Stagehand tool improvements (#415) * Stagehand tool improvements This commit significantly improves the StagehandTool reliability and usability when working with CrewAI agents by addressing several critical issues: ## Key Improvements ### 1. Atomic Action Support - Added _extract_steps() method to break complex instructions into individual steps - Added _simplify_instruction() method for intelligent error recovery - Sequential execution of micro-actions with proper DOM settling between steps - Prevents token limit issues on complex pages by encouraging scoped actions ### 2. Enhanced Schema Design - Made instruction field optional to handle navigation-only commands - Added smart defaults for missing instructions based on command_type - Improved field descriptions to guide agents toward atomic actions with location context - Prevents "instruction Field required" validation errors ### 3. Intelligent API Key Management - Added _get_model_api_key() method with automatic detection based on model type - Support for OpenAI (GPT), Anthropic (Claude), and Google (Gemini) API keys - Removes need for manual model API key configuration ### 4. Robust Error Recovery - Step-by-step execution with individual error handling per atomic action - Automatic retry with simplified instructions when complex actions fail - Comprehensive error logging and reporting for debugging - Graceful degradation instead of complete failure ### 5. Token Management & Performance - Tool descriptions encourage atomic, scoped actions (e.g., "click search box in header") - Prevents "prompt too long" errors on complex pages like Wikipedia - Location-aware instruction patterns for better DOM targeting - Reduced observe-act cycles through better instruction decomposition ### 6. Enhanced Testing Support - Comprehensive async mock objects for testing mode - Proper async/sync compatibility for different execution contexts - Enhanced resource cleanup and session management * Update stagehand_tool.py removeing FixedStagehandTool in favour of StagehandTool * removed comment * Cleanup Revoved unused class Improved tool description --- .../tools/stagehand_tool/stagehand_tool.py | 545 +++++++++++------- 1 file changed, 338 insertions(+), 207 deletions(-) diff --git a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py index 108575c3c..d3a61f914 100644 --- a/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py +++ b/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py @@ -1,7 +1,8 @@ import asyncio import json -import logging -from typing import Dict, List, Optional, Type, Union, Any +import os +import re +from typing import Any, Dict, List, Optional, Type, Union from pydantic import BaseModel, Field @@ -9,14 +10,14 @@ from pydantic import BaseModel, Field _HAS_STAGEHAND = False try: - from stagehand import Stagehand, StagehandConfig, StagehandPage + from stagehand import Stagehand, StagehandConfig, StagehandPage, configure_logging from stagehand.schemas import ( ActOptions, AvailableModel, ExtractOptions, ObserveOptions, ) - from stagehand import configure_logging + _HAS_STAGEHAND = True except ImportError: # Define type stubs for when stagehand is not installed @@ -26,25 +27,19 @@ except ImportError: ActOptions = Any ExtractOptions = Any ObserveOptions = Any - + # Mock configure_logging function def configure_logging(level=None, remove_logger_name=None, quiet_dependencies=None): pass - + # Define only what's needed for class defaults class AvailableModel: CLAUDE_3_7_SONNET_LATEST = "anthropic.claude-3-7-sonnet-20240607" + from crewai.tools import BaseTool -class StagehandCommandType(str): - ACT = "act" - EXTRACT = "extract" - OBSERVE = "observe" - NAVIGATE = "navigate" - - class StagehandResult(BaseModel): """Result from a Stagehand operation. @@ -68,9 +63,9 @@ class StagehandResult(BaseModel): class StagehandToolSchema(BaseModel): """Input for StagehandTool.""" - instruction: str = Field( - ..., - description="Natural language instruction describing what you want to do on the website. Be specific about the action you want to perform, data to extract, or elements to observe. If your task is complex, break it down into simple, sequential steps. For example: 'Step 1: Navigate to https://example.com; Step 2: Click the login button; Step 3: Enter your credentials; Step 4: Submit the form.' Complex tasks like 'Search for OpenAI' should be broken down as: 'Step 1: Navigate to https://google.com; Step 2: Type OpenAI in the search box; Step 3: Press Enter or click the search button'.", + instruction: Optional[str] = Field( + None, + description="Single atomic action with location context. For reliability on complex pages, use ONE specific action with location hints. Good examples: 'Click the search input field in the header', 'Type Italy in the focused field', 'Press Enter', 'Click the first link in the results area'. Avoid combining multiple actions. For 'navigate' command type, this can be omitted if only URL is provided.", ) url: Optional[str] = Field( None, @@ -78,19 +73,18 @@ class StagehandToolSchema(BaseModel): ) command_type: Optional[str] = Field( "act", - description="""The type of command to execute (choose one): + description="""The type of command to execute (choose one): - 'act': Perform an action like clicking buttons, filling forms, etc. (default) - 'navigate': Specifically navigate to a URL - - 'extract': Extract structured data from the page + - 'extract': Extract structured data from the page - 'observe': Identify and analyze elements on the page """, ) class StagehandTool(BaseTool): - package_dependencies: List[str] = ["stagehand"] """ - A tool that uses Stagehand to automate web browser interactions using natural language. + A tool that uses Stagehand to automate web browser interactions using natural language with atomic action handling. Stagehand allows AI agents to interact with websites through a browser, performing actions like clicking buttons, filling forms, and extracting data. @@ -101,24 +95,6 @@ class StagehandTool(BaseTool): 3. extract - Extract structured data from web pages 4. observe - Identify and analyze elements on a page - Usage patterns: - 1. Using as a context manager (recommended): - ```python - with StagehandTool() as tool: - agent = Agent(tools=[tool]) - # ... use the agent - ``` - - 2. Manual resource management: - ```python - tool = StagehandTool() - try: - agent = Agent(tools=[tool]) - # ... use the agent - finally: - tool.close() - ``` - Usage examples: - Navigate to a website: instruction="Go to the homepage", url="https://example.com" - Click a button: instruction="Click the login button" @@ -136,7 +112,7 @@ class StagehandTool(BaseTool): name: str = "Web Automation Tool" description: str = """Use this tool to control a web browser and interact with websites using natural language. - + Capabilities: - Navigate to websites and follow links - Click buttons, links, and other elements @@ -144,13 +120,18 @@ class StagehandTool(BaseTool): - Search within websites - Extract information from web pages - Identify and analyze elements on a page - + To use this tool, provide a natural language instruction describing what you want to do. + For reliability on complex pages, use specific, atomic instructions with location hints: + - Good: "Click the search box in the header" + - Good: "Type 'Italy' in the focused field" + - Bad: "Search for Italy and click the first result" + For different types of tasks, specify the command_type: - - 'act': For performing actions (default) - - 'navigate': For navigating to a URL (shorthand for act with navigation) - - 'extract': For getting data from the page - - 'observe': For finding and analyzing elements + - 'act': For performing one atomic action (default) + - 'navigate': For navigating to a URL + - 'extract': For getting data from a specific page section + - 'observe': For finding elements in a specific area """ args_schema: Type[BaseModel] = StagehandToolSchema @@ -159,18 +140,21 @@ class StagehandTool(BaseTool): project_id: Optional[str] = None model_api_key: Optional[str] = None model_name: Optional[AvailableModel] = AvailableModel.CLAUDE_3_7_SONNET_LATEST - server_url: Optional[str] = "http://api.stagehand.browserbase.com/v1" + server_url: Optional[str] = "https://api.stagehand.browserbase.com/v1" headless: bool = False dom_settle_timeout_ms: int = 3000 self_heal: bool = True wait_for_captcha_solves: bool = True verbose: int = 1 + # Token management settings + max_retries_on_token_limit: int = 3 + use_simplified_dom: bool = True + # Instance variables _stagehand: Optional[Stagehand] = None _page: Optional[StagehandPage] = None _session_id: Optional[str] = None - _logger: Optional[logging.Logger] = None _testing: bool = False def __init__( @@ -186,7 +170,7 @@ class StagehandTool(BaseTool): self_heal: Optional[bool] = None, wait_for_captcha_solves: Optional[bool] = None, verbose: Optional[int] = None, - _testing: bool = False, # Flag to bypass dependency check in tests + _testing: bool = False, **kwargs, ): # Set testing flag early so that other init logic can rely on it @@ -194,21 +178,13 @@ class StagehandTool(BaseTool): super().__init__(**kwargs) # Set up logger + import logging + self._logger = logging.getLogger(__name__) - # For backward compatibility - browserbase_api_key = kwargs.get("browserbase_api_key") - browserbase_project_id = kwargs.get("browserbase_project_id") - - if api_key: - self.api_key = api_key - elif browserbase_api_key: - self.api_key = browserbase_api_key - - if project_id: - self.project_id = project_id - elif browserbase_project_id: - self.project_id = browserbase_project_id + # Set configuration from parameters or environment + self.api_key = api_key or os.getenv("BROWSERBASE_API_KEY") + self.project_id = project_id or os.getenv("BROWSERBASE_PROJECT_ID") if model_api_key: self.model_api_key = model_api_key @@ -230,226 +206,340 @@ class StagehandTool(BaseTool): self._session_id = session_id # Configure logging based on verbosity level - log_level = logging.ERROR - if self.verbose == 1: - log_level = logging.INFO - elif self.verbose == 2: - log_level = logging.WARNING - elif self.verbose >= 3: - log_level = logging.DEBUG - - configure_logging( - level=log_level, remove_logger_name=True, quiet_dependencies=True - ) + if not self._testing: + log_level = {1: "INFO", 2: "WARNING", 3: "DEBUG"}.get(self.verbose, "ERROR") + configure_logging( + level=log_level, remove_logger_name=True, quiet_dependencies=True + ) self._check_required_credentials() def _check_required_credentials(self): """Validate that required credentials are present.""" - # Check if stagehand is available, but only if we're not in testing mode if not self._testing and not _HAS_STAGEHAND: raise ImportError( "`stagehand` package not found, please run `uv add stagehand`" ) - + if not self.api_key: raise ValueError("api_key is required (or set BROWSERBASE_API_KEY in env).") if not self.project_id: raise ValueError( "project_id is required (or set BROWSERBASE_PROJECT_ID in env)." ) - if not self.model_api_key: - raise ValueError( - "model_api_key is required (or set OPENAI_API_KEY or ANTHROPIC_API_KEY in env)." + + def __del__(self): + """Ensure cleanup on deletion""" + try: + self.close() + except Exception: + pass + + def _get_model_api_key(self): + """Get the appropriate API key based on the model being used.""" + # Check model type and get appropriate key + model_str = str(self.model_name) + if "gpt" in model_str.lower(): + return self.model_api_key or os.getenv("OPENAI_API_KEY") + elif "claude" in model_str.lower() or "anthropic" in model_str.lower(): + return self.model_api_key or os.getenv("ANTHROPIC_API_KEY") + elif "gemini" in model_str.lower(): + return self.model_api_key or os.getenv("GOOGLE_API_KEY") + else: + # Default to trying OpenAI, then Anthropic + return ( + self.model_api_key + or os.getenv("OPENAI_API_KEY") + or os.getenv("ANTHROPIC_API_KEY") ) async def _setup_stagehand(self, session_id: Optional[str] = None): """Initialize Stagehand if not already set up.""" - + # If we're in testing mode, return mock objects if self._testing: if not self._stagehand: - # Create a minimal mock for testing with non-async methods + # Create mock objects for testing class MockPage: - def act(self, options): - mock_result = type('MockResult', (), {})() - mock_result.model_dump = lambda: {"message": "Action completed successfully"} + async def act(self, options): + mock_result = type("MockResult", (), {})() + mock_result.model_dump = lambda: { + "message": "Action completed successfully" + } return mock_result - - def goto(self, url): + + async def goto(self, url): return None - - def extract(self, options): - mock_result = type('MockResult', (), {})() + + async def extract(self, options): + mock_result = type("MockResult", (), {})() mock_result.model_dump = lambda: {"data": "Extracted content"} return mock_result - - def observe(self, options): - mock_result1 = type('MockResult', (), {"description": "Test element", "method": "click"})() + + async def observe(self, options): + mock_result1 = type( + "MockResult", + (), + {"description": "Test element", "method": "click"}, + )() return [mock_result1] - + + async def wait_for_load_state(self, state): + return None + class MockStagehand: def __init__(self): self.page = MockPage() self.session_id = "test-session-id" - - def init(self): + + async def init(self): return None - - def close(self): + + async def close(self): return None - + self._stagehand = MockStagehand() - # No need to await the init call in test mode - self._stagehand.init() + await self._stagehand.init() self._page = self._stagehand.page self._session_id = self._stagehand.session_id - + return self._stagehand, self._page # Normal initialization for non-testing mode if not self._stagehand: - self._logger.debug("Initializing Stagehand") - # Create model client options with the API key - model_client_options = {"apiKey": self.model_api_key} + # Get the appropriate API key based on model type + model_api_key = self._get_model_api_key() - # Build the StagehandConfig object + if not model_api_key: + raise ValueError( + "No appropriate API key found for model. Please set OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY" + ) + + # Build the StagehandConfig with proper parameter names config = StagehandConfig( env="BROWSERBASE", - api_key=self.api_key, - project_id=self.project_id, - headless=self.headless, - dom_settle_timeout_ms=self.dom_settle_timeout_ms, - model_name=self.model_name, - self_heal=self.self_heal, - wait_for_captcha_solves=self.wait_for_captcha_solves, - model_client_options=model_client_options, + apiKey=self.api_key, # Browserbase API key (camelCase) + projectId=self.project_id, # Browserbase project ID (camelCase) + modelApiKey=model_api_key, # LLM API key - auto-detected based on model + modelName=self.model_name, + apiUrl=self.server_url + if self.server_url + else "https://api.stagehand.browserbase.com/v1", + domSettleTimeoutMs=self.dom_settle_timeout_ms, + selfHeal=self.self_heal, + waitForCaptchaSolves=self.wait_for_captcha_solves, verbose=self.verbose, - session_id=session_id or self._session_id, + browserbaseSessionID=session_id or self._session_id, ) - # Initialize Stagehand with config and server_url - self._stagehand = Stagehand(config=config, server_url=self.server_url) + # Initialize Stagehand with config + self._stagehand = Stagehand(config=config) # Initialize the Stagehand instance await self._stagehand.init() self._page = self._stagehand.page self._session_id = self._stagehand.session_id - self._logger.info(f"Session ID: {self._stagehand.session_id}") - self._logger.info( - f"Browser session: https://www.browserbase.com/sessions/{self._stagehand.session_id}" - ) return self._stagehand, self._page + def _extract_steps(self, instruction: str) -> List[str]: + """Extract individual steps from multi-step instructions""" + # Check for numbered steps (Step 1:, Step 2:, etc.) + if re.search(r"Step \d+:", instruction, re.IGNORECASE): + steps = re.findall( + r"Step \d+:\s*([^;]+?)(?=Step \d+:|$)", + instruction, + re.IGNORECASE | re.DOTALL, + ) + return [step.strip() for step in steps if step.strip()] + # Check for semicolon-separated instructions + elif ";" in instruction: + return [step.strip() for step in instruction.split(";") if step.strip()] + else: + return [instruction] + + def _simplify_instruction(self, instruction: str) -> str: + """Simplify complex instructions to basic actions""" + # Extract the core action from complex instructions + instruction_lower = instruction.lower() + + if "search" in instruction_lower and "click" in instruction_lower: + # For search tasks, focus on the search action first + if "type" in instruction_lower or "enter" in instruction_lower: + return "click on the search input field" + else: + return "search for content on the page" + elif "click" in instruction_lower: + # Extract what to click + if "button" in instruction_lower: + return "click the button" + elif "link" in instruction_lower: + return "click the link" + elif "search" in instruction_lower: + return "click the search field" + else: + return "click on the element" + elif "type" in instruction_lower or "enter" in instruction_lower: + return "type in the input field" + else: + return instruction # Return as-is if can't simplify + async def _async_run( self, - instruction: str, + instruction: Optional[str] = None, url: Optional[str] = None, command_type: str = "act", - ) -> StagehandResult: - """Asynchronous implementation of the tool.""" + ): + """Override _async_run with improved atomic action handling""" + + # Handle missing instruction based on command type + if not instruction: + if command_type == "navigate" and url: + instruction = f"Navigate to {url}" + elif command_type == "observe": + instruction = "Observe elements on the page" + elif command_type == "extract": + instruction = "Extract information from the page" + else: + instruction = "Perform the requested action" + + # For testing mode, use parent implementation + if self._testing: + return await super()._async_run(instruction, url, command_type) + try: - # Special handling for test mode to avoid coroutine issues - if self._testing: - # Return predefined mock results based on command type - if command_type.lower() == "act": - return StagehandResult( - success=True, - data={"message": "Action completed successfully"} - ) - elif command_type.lower() == "navigate": - return StagehandResult( - success=True, - data={ - "url": url or "https://example.com", - "message": f"Successfully navigated to {url or 'https://example.com'}", - }, - ) - elif command_type.lower() == "extract": - return StagehandResult( - success=True, - data={"data": "Extracted content", "metadata": {"source": "test"}} - ) - elif command_type.lower() == "observe": - return StagehandResult( - success=True, - data=[ - {"index": 1, "description": "Test element", "method": "click"} - ], - ) - else: - return StagehandResult( - success=False, - data={}, - error=f"Unknown command type: {command_type}" - ) - - # Normal execution for non-test mode - stagehand, page = await self._setup_stagehand(self._session_id) + _, page = await self._setup_stagehand(self._session_id) self._logger.info( f"Executing {command_type} with instruction: {instruction}" ) + # Get the API key to pass to model operations + model_api_key = self._get_model_api_key() + model_client_options = {"apiKey": model_api_key} + + # Always navigate first if URL is provided and we're doing actions + if url and command_type.lower() == "act": + self._logger.info(f"Navigating to {url} before performing actions") + await page.goto(url) + await page.wait_for_load_state("networkidle") + # Small delay to ensure page is fully loaded + await asyncio.sleep(1) + # Process according to command type if command_type.lower() == "act": - # Create act options - act_options = ActOptions( - action=instruction, - model_name=self.model_name, - dom_settle_timeout_ms=self.dom_settle_timeout_ms, - ) + # Extract steps from complex instructions + steps = self._extract_steps(instruction) + self._logger.info(f"Extracted {len(steps)} steps: {steps}") - # Execute the act command - result = await page.act(act_options) - self._logger.info(f"Act operation completed: {result}") - return StagehandResult(success=True, data=result.model_dump()) + results = [] + for i, step in enumerate(steps): + self._logger.info(f"Executing step {i + 1}/{len(steps)}: {step}") + + try: + # Create act options with API key for each step + from stagehand.schemas import ActOptions + + act_options = ActOptions( + action=step, + modelName=self.model_name, + domSettleTimeoutMs=self.dom_settle_timeout_ms, + modelClientOptions=model_client_options, + ) + + result = await page.act(act_options) + results.append(result.model_dump()) + + # Small delay between steps to let DOM settle + if i < len(steps) - 1: # Don't delay after last step + await asyncio.sleep(0.5) + + except Exception as step_error: + error_msg = f"Step failed: {step_error}" + self._logger.warning(f"Step {i + 1} failed: {error_msg}") + + # Try with simplified instruction + try: + simplified = self._simplify_instruction(step) + if simplified != step: + self._logger.info( + f"Retrying with simplified instruction: {simplified}" + ) + + act_options = ActOptions( + action=simplified, + modelName=self.model_name, + domSettleTimeoutMs=self.dom_settle_timeout_ms, + modelClientOptions=model_client_options, + ) + + result = await page.act(act_options) + results.append(result.model_dump()) + else: + # If we can't simplify or retry fails, record the error + results.append({"error": error_msg, "step": step}) + except Exception as retry_error: + self._logger.error(f"Retry also failed: {retry_error}") + results.append({"error": str(retry_error), "step": step}) + + # Return combined results + if len(results) == 1: + # Single step, return as-is + if "error" in results[0]: + return self._format_result( + False, results[0], results[0]["error"] + ) + return self._format_result(True, results[0]) + else: + # Multiple steps, return all results + has_errors = any("error" in result for result in results) + return self._format_result(not has_errors, {"steps": results}) elif command_type.lower() == "navigate": # For navigation, use the goto method directly - target_url = url - - if not target_url: + if not url: error_msg = "No URL provided for navigation. Please provide a URL." self._logger.error(error_msg) - return StagehandResult(success=False, data={}, error=error_msg) + return self._format_result(False, {}, error_msg) - # Navigate using the goto method - result = await page.goto(target_url) - self._logger.info(f"Navigate operation completed to {target_url}") - return StagehandResult( - success=True, - data={ - "url": target_url, - "message": f"Successfully navigated to {target_url}", + result = await page.goto(url) + self._logger.info(f"Navigate operation completed to {url}") + return self._format_result( + True, + { + "url": url, + "message": f"Successfully navigated to {url}", }, ) elif command_type.lower() == "extract": - # Create extract options + # Create extract options with API key + from stagehand.schemas import ExtractOptions + extract_options = ExtractOptions( instruction=instruction, - model_name=self.model_name, - dom_settle_timeout_ms=self.dom_settle_timeout_ms, - use_text_extract=True, + modelName=self.model_name, + domSettleTimeoutMs=self.dom_settle_timeout_ms, + useTextExtract=True, + modelClientOptions=model_client_options, # Add API key here ) - # Execute the extract command result = await page.extract(extract_options) self._logger.info(f"Extract operation completed successfully {result}") - return StagehandResult(success=True, data=result.model_dump()) + return self._format_result(True, result.model_dump()) elif command_type.lower() == "observe": - # Create observe options + # Create observe options with API key + from stagehand.schemas import ObserveOptions + observe_options = ObserveOptions( instruction=instruction, - model_name=self.model_name, - only_visible=True, - dom_settle_timeout_ms=self.dom_settle_timeout_ms, + modelName=self.model_name, + onlyVisible=True, + domSettleTimeoutMs=self.dom_settle_timeout_ms, + modelClientOptions=model_client_options, # Add API key here ) - # Execute the observe command results = await page.observe(observe_options) # Format the observation results @@ -466,21 +556,25 @@ class StagehandTool(BaseTool): self._logger.info( f"Observe operation completed with {len(formatted_results)} elements found" ) - return StagehandResult(success=True, data=formatted_results) + return self._format_result(True, formatted_results) else: - error_msg = f"Unknown command type: {command_type}. Please use 'act', 'navigate', 'extract', or 'observe'." + error_msg = f"Unknown command type: {command_type}" self._logger.error(error_msg) - return StagehandResult(success=False, data={}, error=error_msg) + return self._format_result(False, {}, error_msg) except Exception as e: error_msg = f"Error using Stagehand: {str(e)}" self._logger.error(f"Operation failed: {error_msg}") - return StagehandResult(success=False, data={}, error=error_msg) + return self._format_result(False, {}, error_msg) + + def _format_result(self, success, data, error=None): + """Helper to format results consistently""" + return StagehandResult(success=success, data=data, error=error) def _run( self, - instruction: str, + instruction: Optional[str] = None, url: Optional[str] = None, command_type: str = "act", ) -> str: @@ -495,14 +589,28 @@ class StagehandTool(BaseTool): Returns: The result of the browser automation task """ + # Handle missing instruction based on command type + if not instruction: + if command_type == "navigate" and url: + instruction = f"Navigate to {url}" + elif command_type == "observe": + instruction = "Observe elements on the page" + elif command_type == "extract": + instruction = "Extract information from the page" + else: + instruction = "Perform the requested action" # Create an event loop if we're not already in one try: loop = asyncio.get_event_loop() if loop.is_running(): # We're in an existing event loop, use it - result = asyncio.run_coroutine_threadsafe( - self._async_run(instruction, url, command_type), loop - ).result() + import concurrent.futures + + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit( + asyncio.run, self._async_run(instruction, url, command_type) + ) + result = future.result() else: # We have a loop but it's not running result = loop.run_until_complete( @@ -512,7 +620,23 @@ class StagehandTool(BaseTool): # Format the result for output if result.success: if command_type.lower() == "act": - return f"Action result: {result.data.get('message', 'Completed')}" + if isinstance(result.data, dict) and "steps" in result.data: + # Multiple steps + step_messages = [] + for i, step in enumerate(result.data["steps"]): + if "error" in step: + step_messages.append( + f"Step {i + 1}: Failed - {step['error']}" + ) + else: + step_messages.append( + f"Step {i + 1}: {step.get('message', 'Completed')}" + ) + return "\n".join(step_messages) + else: + return ( + f"Action result: {result.data.get('message', 'Completed')}" + ) elif command_type.lower() == "extract": return f"Extracted data: {json.dumps(result.data, indent=2)}" elif command_type.lower() == "observe": @@ -525,7 +649,6 @@ class StagehandTool(BaseTool): formatted_results.append( f"Suggested action: {element['method']}" ) - return "\n".join(formatted_results) else: return json.dumps(result.data, indent=2) @@ -551,7 +674,7 @@ class StagehandTool(BaseTool): self._stagehand = None self._page = None return - + if self._stagehand: await self._stagehand.close() self._stagehand = None @@ -565,7 +688,7 @@ class StagehandTool(BaseTool): self._stagehand = None self._page = None return - + if self._stagehand: try: # Handle both synchronous and asynchronous cases @@ -574,7 +697,15 @@ class StagehandTool(BaseTool): try: loop = asyncio.get_event_loop() if loop.is_running(): - asyncio.run_coroutine_threadsafe(self._async_close(), loop).result() + import concurrent.futures + + with ( + concurrent.futures.ThreadPoolExecutor() as executor + ): + future = executor.submit( + asyncio.run, self._async_close() + ) + future.result() else: loop.run_until_complete(self._async_close()) except RuntimeError: @@ -584,11 +715,10 @@ class StagehandTool(BaseTool): self._stagehand.close() except Exception as e: # Log but don't raise - we're cleaning up - if self._logger: - self._logger.error(f"Error closing Stagehand: {str(e)}") - + print(f"Error closing Stagehand: {str(e)}") + self._stagehand = None - + if self._page: self._page = None @@ -599,3 +729,4 @@ class StagehandTool(BaseTool): def __exit__(self, exc_type, exc_val, exc_tb): """Exit the context manager and clean up resources.""" self.close() + From 16d613488b16445ebd9d18a008bcedfaee790e4a Mon Sep 17 00:00:00 2001 From: Gabe Milani Date: Thu, 14 Aug 2025 18:52:56 -0300 Subject: [PATCH 371/391] fix: use json_schema_extra instead of deprecated Field extra args (#417) --- .../tools/exa_tools/exa_search_tool.py | 15 ++++++++++----- tests/tools/test_import_without_warnings.py | 10 ++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) create mode 100644 tests/tools/test_import_without_warnings.py diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index b3d97d7af..332576039 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -1,7 +1,8 @@ -from typing import Any, Optional, Type, List -from pydantic import BaseModel, Field -from crewai.tools import BaseTool, EnvVar import os +from typing import Any, List, Optional, Type + +from crewai.tools import BaseTool, EnvVar +from pydantic import BaseModel, Field try: from exa_py import Exa @@ -38,10 +39,14 @@ class EXASearchTool(BaseTool): type: Optional[str] = "auto" package_dependencies: List[str] = ["exa_py"] api_key: Optional[str] = Field( - default_factory=lambda: os.getenv("EXA_API_KEY"), description="API key for Exa services", required=False + default_factory=lambda: os.getenv("EXA_API_KEY"), + description="API key for Exa services", + json_schema_extra={"required": False}, ) env_vars: List[EnvVar] = [ - EnvVar(name="EXA_API_KEY", description="API key for Exa services", required=False), + EnvVar( + name="EXA_API_KEY", description="API key for Exa services", required=False + ), ] def __init__( diff --git a/tests/tools/test_import_without_warnings.py b/tests/tools/test_import_without_warnings.py new file mode 100644 index 000000000..5635832ed --- /dev/null +++ b/tests/tools/test_import_without_warnings.py @@ -0,0 +1,10 @@ +import pytest +from pydantic.warnings import PydanticDeprecatedSince20 + + +@pytest.mark.filterwarnings("error", category=PydanticDeprecatedSince20) +def test_import_tools_without_pydantic_deprecation_warnings(): + # This test is to ensure that the import of crewai_tools does not raise any Pydantic deprecation warnings. + import crewai_tools + + assert crewai_tools From 23a16eb4468f9638b80d604ba24dbdf703d5db3b Mon Sep 17 00:00:00 2001 From: Joao Moura Date: Mon, 18 Aug 2025 22:40:30 -0700 Subject: [PATCH 372/391] working around OAI new update for now --- src/crewai_tools/tools/rag/rag_tool.py | 115 ++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index f7e785bd7..e56170480 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,4 +1,8 @@ import portalocker +import os +import tempfile +from contextlib import contextmanager +from copy import deepcopy from abc import ABC, abstractmethod from typing import Any @@ -7,6 +11,111 @@ from pydantic import BaseModel, ConfigDict, Field, model_validator from crewai.tools import BaseTool +def _fix_openai_config(config: dict[str, Any]) -> dict[str, Any]: + """Fix deprecated OpenAI configuration parameters in a config dictionary.""" + if not config: + return config + + # Create a deep copy to avoid modifying the original + fixed_config = deepcopy(config) + + def _is_azure_config(cfg: dict[str, Any]) -> bool: + """Determine if this is an Azure OpenAI configuration.""" + # Check for explicit Azure indicators + if cfg.get('provider') == 'azure_openai': + return True + + # Check for Azure URLs in various fields + for field in ['openai_api_base', 'base_url', 'api_base']: + url = cfg.get(field, '') + if isinstance(url, str) and 'azure' in url.lower(): + return True + + # Check if deployment is present (common in Azure configs) + # But only if we also have other Azure indicators + if 'deployment' in cfg: + for field in ['openai_api_base', 'base_url', 'api_base']: + url = cfg.get(field, '') + if isinstance(url, str) and 'azure' in url.lower(): + return True + + return False + + def _fix_config_recursively(cfg: dict[str, Any]) -> dict[str, Any]: + """Recursively fix OpenAI config parameters.""" + if not isinstance(cfg, dict): + return cfg + + # Only fix if this is definitely an Azure configuration + if _is_azure_config(cfg): + # Fix deprecated Azure OpenAI parameters + if 'openai_api_base' in cfg and 'azure_endpoint' not in cfg: + cfg['azure_endpoint'] = cfg.pop('openai_api_base') + + if 'base_url' in cfg and 'azure_endpoint' not in cfg: + # Only convert base_url to azure_endpoint for Azure URLs + base_url = cfg.get('base_url', '') + if 'openai.azure.com' in base_url: + cfg['azure_endpoint'] = cfg.pop('base_url') + + # Handle deployment -> azure_deployment conversion for Azure configs + if 'deployment' in cfg and 'azure_deployment' not in cfg: + cfg['azure_deployment'] = cfg.pop('deployment') + + # For non-Azure configs, we might still need to handle some deprecated parameters + # but we should NOT convert them to Azure format + else: + # For regular OpenAI configs, just remove the deprecated openai_api_base if present + # since it's not valid for regular OpenAI (should use base_url instead) + if 'openai_api_base' in cfg and 'base_url' not in cfg: + # Only convert to base_url if it's NOT an Azure URL + api_base = cfg.get('openai_api_base', '') + if isinstance(api_base, str) and 'openai.azure.com' not in api_base: + cfg['base_url'] = cfg.pop('openai_api_base') + + # Recursively fix nested dictionaries + for key, value in cfg.items(): + if isinstance(value, dict): + cfg[key] = _fix_config_recursively(value) + + return cfg + + return _fix_config_recursively(fixed_config) + + +@contextmanager +def _temporarily_unset_env_vars(): + """Temporarily unset problematic environment variables that cause OpenAI validation issues.""" + problematic_vars = [ + 'AZURE_API_BASE', + 'OPENAI_API_BASE', + 'OPENAI_BASE_URL' + ] + + # Store original values + original_values = {} + for var in problematic_vars: + if var in os.environ: + original_values[var] = os.environ[var] + del os.environ[var] + + try: + # Set the correct Azure environment variables if we had AZURE_API_BASE + if 'AZURE_API_BASE' in original_values: + os.environ['AZURE_OPENAI_ENDPOINT'] = original_values['AZURE_API_BASE'] + + yield + finally: + # Restore original values + for var, value in original_values.items(): + os.environ[var] = value + + # Clean up the temporary Azure endpoint we set + if 'AZURE_API_BASE' in original_values and 'AZURE_OPENAI_ENDPOINT' in os.environ: + if os.environ['AZURE_OPENAI_ENDPOINT'] == original_values['AZURE_API_BASE']: + del os.environ['AZURE_OPENAI_ENDPOINT'] + + class Adapter(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @@ -44,7 +153,11 @@ class RagTool(BaseTool): from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter with portalocker.Lock("crewai-rag-tool.lock", timeout=10): - app = App.from_config(config=self.config) if self.config else App() + # Fix both environment variables and config parameters + with _temporarily_unset_env_vars(): + # Fix deprecated OpenAI parameters in config + fixed_config = _fix_openai_config(self.config) + app = App.from_config(config=fixed_config) if fixed_config else App() self.adapter = EmbedchainAdapter( embedchain_app=app, summarize=self.summarize From 1ce016df8b3b43eb7a84ff290fe5df9a2814d685 Mon Sep 17 00:00:00 2001 From: Joao Moura Date: Tue, 19 Aug 2025 00:09:25 -0700 Subject: [PATCH 373/391] Revert "working around OAI new update for now" This reverts commit 23a16eb4468f9638b80d604ba24dbdf703d5db3b. --- src/crewai_tools/tools/rag/rag_tool.py | 115 +------------------------ 1 file changed, 1 insertion(+), 114 deletions(-) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index e56170480..f7e785bd7 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,8 +1,4 @@ import portalocker -import os -import tempfile -from contextlib import contextmanager -from copy import deepcopy from abc import ABC, abstractmethod from typing import Any @@ -11,111 +7,6 @@ from pydantic import BaseModel, ConfigDict, Field, model_validator from crewai.tools import BaseTool -def _fix_openai_config(config: dict[str, Any]) -> dict[str, Any]: - """Fix deprecated OpenAI configuration parameters in a config dictionary.""" - if not config: - return config - - # Create a deep copy to avoid modifying the original - fixed_config = deepcopy(config) - - def _is_azure_config(cfg: dict[str, Any]) -> bool: - """Determine if this is an Azure OpenAI configuration.""" - # Check for explicit Azure indicators - if cfg.get('provider') == 'azure_openai': - return True - - # Check for Azure URLs in various fields - for field in ['openai_api_base', 'base_url', 'api_base']: - url = cfg.get(field, '') - if isinstance(url, str) and 'azure' in url.lower(): - return True - - # Check if deployment is present (common in Azure configs) - # But only if we also have other Azure indicators - if 'deployment' in cfg: - for field in ['openai_api_base', 'base_url', 'api_base']: - url = cfg.get(field, '') - if isinstance(url, str) and 'azure' in url.lower(): - return True - - return False - - def _fix_config_recursively(cfg: dict[str, Any]) -> dict[str, Any]: - """Recursively fix OpenAI config parameters.""" - if not isinstance(cfg, dict): - return cfg - - # Only fix if this is definitely an Azure configuration - if _is_azure_config(cfg): - # Fix deprecated Azure OpenAI parameters - if 'openai_api_base' in cfg and 'azure_endpoint' not in cfg: - cfg['azure_endpoint'] = cfg.pop('openai_api_base') - - if 'base_url' in cfg and 'azure_endpoint' not in cfg: - # Only convert base_url to azure_endpoint for Azure URLs - base_url = cfg.get('base_url', '') - if 'openai.azure.com' in base_url: - cfg['azure_endpoint'] = cfg.pop('base_url') - - # Handle deployment -> azure_deployment conversion for Azure configs - if 'deployment' in cfg and 'azure_deployment' not in cfg: - cfg['azure_deployment'] = cfg.pop('deployment') - - # For non-Azure configs, we might still need to handle some deprecated parameters - # but we should NOT convert them to Azure format - else: - # For regular OpenAI configs, just remove the deprecated openai_api_base if present - # since it's not valid for regular OpenAI (should use base_url instead) - if 'openai_api_base' in cfg and 'base_url' not in cfg: - # Only convert to base_url if it's NOT an Azure URL - api_base = cfg.get('openai_api_base', '') - if isinstance(api_base, str) and 'openai.azure.com' not in api_base: - cfg['base_url'] = cfg.pop('openai_api_base') - - # Recursively fix nested dictionaries - for key, value in cfg.items(): - if isinstance(value, dict): - cfg[key] = _fix_config_recursively(value) - - return cfg - - return _fix_config_recursively(fixed_config) - - -@contextmanager -def _temporarily_unset_env_vars(): - """Temporarily unset problematic environment variables that cause OpenAI validation issues.""" - problematic_vars = [ - 'AZURE_API_BASE', - 'OPENAI_API_BASE', - 'OPENAI_BASE_URL' - ] - - # Store original values - original_values = {} - for var in problematic_vars: - if var in os.environ: - original_values[var] = os.environ[var] - del os.environ[var] - - try: - # Set the correct Azure environment variables if we had AZURE_API_BASE - if 'AZURE_API_BASE' in original_values: - os.environ['AZURE_OPENAI_ENDPOINT'] = original_values['AZURE_API_BASE'] - - yield - finally: - # Restore original values - for var, value in original_values.items(): - os.environ[var] = value - - # Clean up the temporary Azure endpoint we set - if 'AZURE_API_BASE' in original_values and 'AZURE_OPENAI_ENDPOINT' in os.environ: - if os.environ['AZURE_OPENAI_ENDPOINT'] == original_values['AZURE_API_BASE']: - del os.environ['AZURE_OPENAI_ENDPOINT'] - - class Adapter(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @@ -153,11 +44,7 @@ class RagTool(BaseTool): from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter with portalocker.Lock("crewai-rag-tool.lock", timeout=10): - # Fix both environment variables and config parameters - with _temporarily_unset_env_vars(): - # Fix deprecated OpenAI parameters in config - fixed_config = _fix_openai_config(self.config) - app = App.from_config(config=fixed_config) if fixed_config else App() + app = App.from_config(config=self.config) if self.config else App() self.adapter = EmbedchainAdapter( embedchain_app=app, summarize=self.summarize From dc039cfac8ee8783b74f030c23b5f806d553c9ea Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 19 Aug 2025 19:30:35 -0300 Subject: [PATCH 374/391] Adds RAG feature (#406) * feat: initialize rag * refactor: using cosine distance metric for chromadb * feat: use RecursiveCharacterTextSplitter as chunker strategy * feat: support chucker and loader per data_type * feat: adding JSON loader * feat: adding CSVLoader * feat: adding loader for DOCX files * feat: add loader for MDX files * feat: add loader for XML files * feat: add loader for parser Webpage * feat: support to load files from an entire directory * feat: support to auto-load the loaders for additional DataType * feat: add chuckers for some specific data type - Each chunker uses separators specific to its content type * feat: prevent document duplication and centralize content management - Implement document deduplication logic in RAG * Check for existing documents by source reference * Compare doc IDs to detect content changes * Automatically replace outdated content while preventing duplicates - Centralize common functionality for better maintainability * Create SourceContent class to handle URLs, files, and text uniformly * Extract shared utilities (compute_sha256) to misc.py * Standardize doc ID generation across all loaders - Improve RAG system architecture * All loaders now inherit consistent patterns from centralized BaseLoader * Better separation of concerns with dedicated content management classes * Standardized LoaderResult structure across all loader implementations * chore: split text loaders file * test: adding missing tests about RAG loaders * refactor: QOL * fix: add missing uv syntax on DOCXLoader --- src/crewai_tools/adapters/rag_adapter.py | 41 ++++ src/crewai_tools/rag/__init__.py | 8 + src/crewai_tools/rag/base_loader.py | 37 +++ src/crewai_tools/rag/chunkers/__init__.py | 15 ++ src/crewai_tools/rag/chunkers/base_chunker.py | 167 +++++++++++++ .../rag/chunkers/default_chunker.py | 6 + .../rag/chunkers/structured_chunker.py | 49 ++++ src/crewai_tools/rag/chunkers/text_chunker.py | 59 +++++ src/crewai_tools/rag/chunkers/web_chunker.py | 20 ++ src/crewai_tools/rag/core.py | 232 ++++++++++++++++++ src/crewai_tools/rag/data_types.py | 137 +++++++++++ src/crewai_tools/rag/loaders/__init__.py | 20 ++ src/crewai_tools/rag/loaders/csv_loader.py | 72 ++++++ .../rag/loaders/directory_loader.py | 142 +++++++++++ src/crewai_tools/rag/loaders/docx_loader.py | 72 ++++++ src/crewai_tools/rag/loaders/json_loader.py | 69 ++++++ src/crewai_tools/rag/loaders/mdx_loader.py | 59 +++++ src/crewai_tools/rag/loaders/text_loader.py | 28 +++ .../rag/loaders/webpage_loader.py | 47 ++++ src/crewai_tools/rag/loaders/xml_loader.py | 61 +++++ src/crewai_tools/rag/misc.py | 4 + src/crewai_tools/rag/source_content.py | 46 ++++ tests/rag/__init__.py | 0 tests/rag/test_csv_loader.py | 130 ++++++++++ tests/rag/test_directory_loader.py | 149 +++++++++++ tests/rag/test_docx_loader.py | 135 ++++++++++ tests/rag/test_json_loader.py | 180 ++++++++++++++ tests/rag/test_mdx_loader.py | 176 +++++++++++++ tests/rag/test_text_loaders.py | 160 ++++++++++++ tests/rag/test_webpage_loader.py | 137 +++++++++++ tests/rag/test_xml_loader.py | 137 +++++++++++ 31 files changed, 2595 insertions(+) create mode 100644 src/crewai_tools/adapters/rag_adapter.py create mode 100644 src/crewai_tools/rag/__init__.py create mode 100644 src/crewai_tools/rag/base_loader.py create mode 100644 src/crewai_tools/rag/chunkers/__init__.py create mode 100644 src/crewai_tools/rag/chunkers/base_chunker.py create mode 100644 src/crewai_tools/rag/chunkers/default_chunker.py create mode 100644 src/crewai_tools/rag/chunkers/structured_chunker.py create mode 100644 src/crewai_tools/rag/chunkers/text_chunker.py create mode 100644 src/crewai_tools/rag/chunkers/web_chunker.py create mode 100644 src/crewai_tools/rag/core.py create mode 100644 src/crewai_tools/rag/data_types.py create mode 100644 src/crewai_tools/rag/loaders/__init__.py create mode 100644 src/crewai_tools/rag/loaders/csv_loader.py create mode 100644 src/crewai_tools/rag/loaders/directory_loader.py create mode 100644 src/crewai_tools/rag/loaders/docx_loader.py create mode 100644 src/crewai_tools/rag/loaders/json_loader.py create mode 100644 src/crewai_tools/rag/loaders/mdx_loader.py create mode 100644 src/crewai_tools/rag/loaders/text_loader.py create mode 100644 src/crewai_tools/rag/loaders/webpage_loader.py create mode 100644 src/crewai_tools/rag/loaders/xml_loader.py create mode 100644 src/crewai_tools/rag/misc.py create mode 100644 src/crewai_tools/rag/source_content.py create mode 100644 tests/rag/__init__.py create mode 100644 tests/rag/test_csv_loader.py create mode 100644 tests/rag/test_directory_loader.py create mode 100644 tests/rag/test_docx_loader.py create mode 100644 tests/rag/test_json_loader.py create mode 100644 tests/rag/test_mdx_loader.py create mode 100644 tests/rag/test_text_loaders.py create mode 100644 tests/rag/test_webpage_loader.py create mode 100644 tests/rag/test_xml_loader.py diff --git a/src/crewai_tools/adapters/rag_adapter.py b/src/crewai_tools/adapters/rag_adapter.py new file mode 100644 index 000000000..78011328c --- /dev/null +++ b/src/crewai_tools/adapters/rag_adapter.py @@ -0,0 +1,41 @@ +from typing import Any, Optional + +from crewai_tools.rag.core import RAG +from crewai_tools.tools.rag.rag_tool import Adapter + + +class RAGAdapter(Adapter): + def __init__( + self, + collection_name: str = "crewai_knowledge_base", + persist_directory: Optional[str] = None, + embedding_model: str = "text-embedding-3-small", + top_k: int = 5, + embedding_api_key: Optional[str] = None, + **embedding_kwargs + ): + super().__init__() + + # Prepare embedding configuration + embedding_config = { + "api_key": embedding_api_key, + **embedding_kwargs + } + + self._adapter = RAG( + collection_name=collection_name, + persist_directory=persist_directory, + embedding_model=embedding_model, + top_k=top_k, + embedding_config=embedding_config + ) + + def query(self, question: str) -> str: + return self._adapter.query(question) + + def add( + self, + *args: Any, + **kwargs: Any, + ) -> None: + self._adapter.add(*args, **kwargs) diff --git a/src/crewai_tools/rag/__init__.py b/src/crewai_tools/rag/__init__.py new file mode 100644 index 000000000..8d08b2907 --- /dev/null +++ b/src/crewai_tools/rag/__init__.py @@ -0,0 +1,8 @@ +from crewai_tools.rag.core import RAG, EmbeddingService +from crewai_tools.rag.data_types import DataType + +__all__ = [ + "RAG", + "EmbeddingService", + "DataType", +] diff --git a/src/crewai_tools/rag/base_loader.py b/src/crewai_tools/rag/base_loader.py new file mode 100644 index 000000000..e38d6f8c1 --- /dev/null +++ b/src/crewai_tools/rag/base_loader.py @@ -0,0 +1,37 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional +from pydantic import BaseModel, Field + +from crewai_tools.rag.misc import compute_sha256 +from crewai_tools.rag.source_content import SourceContent + + +class LoaderResult(BaseModel): + content: str = Field(description="The text content of the source") + source: str = Field(description="The source of the content", default="unknown") + metadata: Dict[str, Any] = Field(description="The metadata of the source", default_factory=dict) + doc_id: str = Field(description="The id of the document") + + +class BaseLoader(ABC): + def __init__(self, config: Optional[Dict[str, Any]] = None): + self.config = config or {} + + @abstractmethod + def load(self, content: SourceContent, **kwargs) -> LoaderResult: + ... + + def generate_doc_id(self, source_ref: str | None = None, content: str | None = None) -> str: + """ + Generate a unique document id based on the source reference and content. + If the source reference is not provided, the content is used as the source reference. + If the content is not provided, the source reference is used as the content. + If both are provided, the source reference is used as the content. + + Both are optional because the TEXT content type does not have a source reference. In this case, the content is used as the source reference. + """ + + source_ref = source_ref or "" + content = content or "" + + return compute_sha256(source_ref + content) diff --git a/src/crewai_tools/rag/chunkers/__init__.py b/src/crewai_tools/rag/chunkers/__init__.py new file mode 100644 index 000000000..f48483391 --- /dev/null +++ b/src/crewai_tools/rag/chunkers/__init__.py @@ -0,0 +1,15 @@ +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from crewai_tools.rag.chunkers.default_chunker import DefaultChunker +from crewai_tools.rag.chunkers.text_chunker import TextChunker, DocxChunker, MdxChunker +from crewai_tools.rag.chunkers.structured_chunker import CsvChunker, JsonChunker, XmlChunker + +__all__ = [ + "BaseChunker", + "DefaultChunker", + "TextChunker", + "DocxChunker", + "MdxChunker", + "CsvChunker", + "JsonChunker", + "XmlChunker", +] diff --git a/src/crewai_tools/rag/chunkers/base_chunker.py b/src/crewai_tools/rag/chunkers/base_chunker.py new file mode 100644 index 000000000..deafbfc7a --- /dev/null +++ b/src/crewai_tools/rag/chunkers/base_chunker.py @@ -0,0 +1,167 @@ +from typing import List, Optional +import re + +class RecursiveCharacterTextSplitter: + """ + A text splitter that recursively splits text based on a hierarchy of separators. + """ + + def __init__( + self, + chunk_size: int = 4000, + chunk_overlap: int = 200, + separators: Optional[List[str]] = None, + keep_separator: bool = True, + ): + """ + Initialize the RecursiveCharacterTextSplitter. + + Args: + chunk_size: Maximum size of each chunk + chunk_overlap: Number of characters to overlap between chunks + separators: List of separators to use for splitting (in order of preference) + keep_separator: Whether to keep the separator in the split text + """ + if chunk_overlap >= chunk_size: + raise ValueError(f"Chunk overlap ({chunk_overlap}) cannot be >= chunk size ({chunk_size})") + + self._chunk_size = chunk_size + self._chunk_overlap = chunk_overlap + self._keep_separator = keep_separator + + self._separators = separators or [ + "\n\n", + "\n", + " ", + "", + ] + + def split_text(self, text: str) -> List[str]: + return self._split_text(text, self._separators) + + def _split_text(self, text: str, separators: List[str]) -> List[str]: + separator = separators[-1] + new_separators = [] + + for i, sep in enumerate(separators): + if sep == "": + separator = sep + break + if re.search(re.escape(sep), text): + separator = sep + new_separators = separators[i + 1:] + break + + splits = self._split_text_with_separator(text, separator) + + good_splits = [] + + for split in splits: + if len(split) < self._chunk_size: + good_splits.append(split) + else: + if new_separators: + other_info = self._split_text(split, new_separators) + good_splits.extend(other_info) + else: + good_splits.extend(self._split_by_characters(split)) + + return self._merge_splits(good_splits, separator) + + def _split_text_with_separator(self, text: str, separator: str) -> List[str]: + if separator == "": + return list(text) + + if self._keep_separator and separator in text: + parts = text.split(separator) + splits = [] + + for i, part in enumerate(parts): + if i == 0: + splits.append(part) + elif i == len(parts) - 1: + if part: + splits.append(separator + part) + else: + if part: + splits.append(separator + part) + else: + if splits: + splits[-1] += separator + + return [s for s in splits if s] + else: + return text.split(separator) + + def _split_by_characters(self, text: str) -> List[str]: + chunks = [] + for i in range(0, len(text), self._chunk_size): + chunks.append(text[i:i + self._chunk_size]) + return chunks + + def _merge_splits(self, splits: List[str], separator: str) -> List[str]: + """Merge splits into chunks with proper overlap.""" + docs = [] + current_doc = [] + total = 0 + + for split in splits: + split_len = len(split) + + if total + split_len > self._chunk_size and current_doc: + if separator == "": + doc = "".join(current_doc) + else: + doc = separator.join(current_doc) + + if doc: + docs.append(doc) + + # Handle overlap by keeping some of the previous content + while total > self._chunk_overlap and len(current_doc) > 1: + removed = current_doc.pop(0) + total -= len(removed) + if separator != "": + total -= len(separator) + + current_doc.append(split) + total += split_len + if separator != "" and len(current_doc) > 1: + total += len(separator) + + if current_doc: + if separator == "": + doc = "".join(current_doc) + else: + doc = separator.join(current_doc) + + if doc: + docs.append(doc) + + return docs + +class BaseChunker: + def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200, separators: Optional[List[str]] = None, keep_separator: bool = True): + """ + Initialize the Chunker + + Args: + chunk_size: Maximum size of each chunk + chunk_overlap: Number of characters to overlap between chunks + separators: List of separators to use for splitting + keep_separator: Whether to keep separators in the chunks + """ + + self._splitter = RecursiveCharacterTextSplitter( + chunk_size=chunk_size, + chunk_overlap=chunk_overlap, + separators=separators, + keep_separator=keep_separator, + ) + + + def chunk(self, text: str) -> List[str]: + if not text or not text.strip(): + return [] + + return self._splitter.split_text(text) diff --git a/src/crewai_tools/rag/chunkers/default_chunker.py b/src/crewai_tools/rag/chunkers/default_chunker.py new file mode 100644 index 000000000..0d0ec6935 --- /dev/null +++ b/src/crewai_tools/rag/chunkers/default_chunker.py @@ -0,0 +1,6 @@ +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from typing import List, Optional + +class DefaultChunker(BaseChunker): + def __init__(self, chunk_size: int = 2000, chunk_overlap: int = 20, separators: Optional[List[str]] = None, keep_separator: bool = True): + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) diff --git a/src/crewai_tools/rag/chunkers/structured_chunker.py b/src/crewai_tools/rag/chunkers/structured_chunker.py new file mode 100644 index 000000000..483f92588 --- /dev/null +++ b/src/crewai_tools/rag/chunkers/structured_chunker.py @@ -0,0 +1,49 @@ +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from typing import List, Optional + + +class CsvChunker(BaseChunker): + def __init__(self, chunk_size: int = 1200, chunk_overlap: int = 100, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\nRow ", # Row boundaries (from CSVLoader format) + "\n", # Line breaks + " | ", # Column separators + ", ", # Comma separators + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) + + +class JsonChunker(BaseChunker): + def __init__(self, chunk_size: int = 2000, chunk_overlap: int = 200, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n\n", # Object/array boundaries + "\n", # Line breaks + "},", # Object endings + "],", # Array endings + ", ", # Property separators + ": ", # Key-value separators + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) + + +class XmlChunker(BaseChunker): + def __init__(self, chunk_size: int = 2500, chunk_overlap: int = 250, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n\n", # Element boundaries + "\n", # Line breaks + ">", # Tag endings + ". ", # Sentence endings (for text content) + "! ", # Exclamation endings + "? ", # Question endings + ", ", # Comma separators + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) diff --git a/src/crewai_tools/rag/chunkers/text_chunker.py b/src/crewai_tools/rag/chunkers/text_chunker.py new file mode 100644 index 000000000..2e76df8ab --- /dev/null +++ b/src/crewai_tools/rag/chunkers/text_chunker.py @@ -0,0 +1,59 @@ +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from typing import List, Optional + + +class TextChunker(BaseChunker): + def __init__(self, chunk_size: int = 1500, chunk_overlap: int = 150, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n\n\n", # Multiple line breaks (sections) + "\n\n", # Paragraph breaks + "\n", # Line breaks + ". ", # Sentence endings + "! ", # Exclamation endings + "? ", # Question endings + "; ", # Semicolon breaks + ", ", # Comma breaks + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) + + +class DocxChunker(BaseChunker): + def __init__(self, chunk_size: int = 2500, chunk_overlap: int = 250, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n\n\n", # Multiple line breaks (major sections) + "\n\n", # Paragraph breaks + "\n", # Line breaks + ". ", # Sentence endings + "! ", # Exclamation endings + "? ", # Question endings + "; ", # Semicolon breaks + ", ", # Comma breaks + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) + + +class MdxChunker(BaseChunker): + def __init__(self, chunk_size: int = 3000, chunk_overlap: int = 300, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n## ", # H2 headers (major sections) + "\n### ", # H3 headers (subsections) + "\n#### ", # H4 headers (sub-subsections) + "\n\n", # Paragraph breaks + "\n```", # Code block boundaries + "\n", # Line breaks + ". ", # Sentence endings + "! ", # Exclamation endings + "? ", # Question endings + "; ", # Semicolon breaks + ", ", # Comma breaks + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) diff --git a/src/crewai_tools/rag/chunkers/web_chunker.py b/src/crewai_tools/rag/chunkers/web_chunker.py new file mode 100644 index 000000000..2712a6c69 --- /dev/null +++ b/src/crewai_tools/rag/chunkers/web_chunker.py @@ -0,0 +1,20 @@ +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from typing import List, Optional + + +class WebsiteChunker(BaseChunker): + def __init__(self, chunk_size: int = 2500, chunk_overlap: int = 250, separators: Optional[List[str]] = None, keep_separator: bool = True): + if separators is None: + separators = [ + "\n\n\n", # Major section breaks + "\n\n", # Paragraph breaks + "\n", # Line breaks + ". ", # Sentence endings + "! ", # Exclamation endings + "? ", # Question endings + "; ", # Semicolon breaks + ", ", # Comma breaks + " ", # Word breaks + "", # Character level + ] + super().__init__(chunk_size, chunk_overlap, separators, keep_separator) diff --git a/src/crewai_tools/rag/core.py b/src/crewai_tools/rag/core.py new file mode 100644 index 000000000..0aa4b666c --- /dev/null +++ b/src/crewai_tools/rag/core.py @@ -0,0 +1,232 @@ +import logging +from pathlib import Path +from typing import Any, Dict, List, Optional, Union +from uuid import uuid4 + +import chromadb +import litellm +from pydantic import BaseModel, Field, PrivateAttr + +from crewai_tools.tools.rag.rag_tool import Adapter +from crewai_tools.rag.data_types import DataType +from crewai_tools.rag.base_loader import BaseLoader +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from crewai_tools.rag.source_content import SourceContent +from crewai_tools.rag.misc import compute_sha256 + +logger = logging.getLogger(__name__) + + +class EmbeddingService: + def __init__(self, model: str = "text-embedding-3-small", **kwargs): + self.model = model + self.kwargs = kwargs + + def embed_text(self, text: str) -> List[float]: + try: + response = litellm.embedding( + model=self.model, + input=[text], + **self.kwargs + ) + return response.data[0]['embedding'] + except Exception as e: + logger.error(f"Error generating embedding: {e}") + raise + + def embed_batch(self, texts: List[str]) -> List[List[float]]: + if not texts: + return [] + + try: + response = litellm.embedding( + model=self.model, + input=texts, + **self.kwargs + ) + return [data['embedding'] for data in response.data] + except Exception as e: + logger.error(f"Error generating batch embeddings: {e}") + raise + + +class Document(BaseModel): + id: str = Field(default_factory=lambda: str(uuid4())) + content: str + metadata: Dict[str, Any] = Field(default_factory=dict) + data_type: DataType = DataType.TEXT + source: Optional[str] = None + + +class RAG(Adapter): + collection_name: str = "crewai_knowledge_base" + persist_directory: Optional[str] = None + embedding_model: str = "text-embedding-3-large" + summarize: bool = False + top_k: int = 5 + embedding_config: Dict[str, Any] = Field(default_factory=dict) + + _client: Any = PrivateAttr() + _collection: Any = PrivateAttr() + _embedding_service: EmbeddingService = PrivateAttr() + + def model_post_init(self, __context: Any) -> None: + try: + if self.persist_directory: + self._client = chromadb.PersistentClient(path=self.persist_directory) + else: + self._client = chromadb.Client() + + self._collection = self._client.get_or_create_collection( + name=self.collection_name, + metadata={"hnsw:space": "cosine", "description": "CrewAI Knowledge Base"} + ) + + self._embedding_service = EmbeddingService(model=self.embedding_model, **self.embedding_config) + except Exception as e: + logger.error(f"Failed to initialize ChromaDB: {e}") + raise + + super().model_post_init(__context) + + def add( + self, + content: str | Path, + data_type: Optional[Union[str, DataType]] = None, + metadata: Optional[Dict[str, Any]] = None, + loader: Optional[BaseLoader] = None, + chunker: Optional[BaseChunker] = None, + **kwargs: Any + ) -> None: + source_content = SourceContent(content) + + data_type = self._get_data_type(data_type=data_type, content=source_content) + + if not loader: + loader = data_type.get_loader() + + if not chunker: + chunker = data_type.get_chunker() + + loader_result = loader.load(source_content) + doc_id = loader_result.doc_id + + existing_doc = self._collection.get(where={"source": source_content.source_ref}, limit=1) + existing_doc_id = existing_doc and existing_doc['metadatas'][0]['doc_id'] if existing_doc['metadatas'] else None + + if existing_doc_id == doc_id: + logger.warning(f"Document with source {loader_result.source} already exists") + return + + # Document with same source ref does exists but the content has changed, deleting the oldest reference + if existing_doc_id and existing_doc_id != loader_result.doc_id: + logger.warning(f"Deleting old document with doc_id {existing_doc_id}") + self._collection.delete(where={"doc_id": existing_doc_id}) + + documents = [] + + chunks = chunker.chunk(loader_result.content) + for i, chunk in enumerate(chunks): + doc_metadata = (metadata or {}).copy() + doc_metadata['chunk_index'] = i + documents.append(Document( + id=compute_sha256(chunk), + content=chunk, + metadata=doc_metadata, + data_type=data_type, + source=loader_result.source + )) + + if not documents: + logger.warning("No documents to add") + return + + contents = [doc.content for doc in documents] + try: + embeddings = self._embedding_service.embed_batch(contents) + except Exception as e: + logger.error(f"Failed to generate embeddings: {e}") + return + + ids = [doc.id for doc in documents] + metadatas = [] + + for doc in documents: + doc_metadata = doc.metadata.copy() + doc_metadata.update({ + "data_type": doc.data_type.value, + "source": doc.source, + "doc_id": doc_id + }) + metadatas.append(doc_metadata) + + try: + self._collection.add( + ids=ids, + embeddings=embeddings, + documents=contents, + metadatas=metadatas, + ) + logger.info(f"Added {len(documents)} documents to knowledge base") + except Exception as e: + logger.error(f"Failed to add documents to ChromaDB: {e}") + + def query(self, question: str, where: Optional[Dict[str, Any]] = None) -> str: + try: + question_embedding = self._embedding_service.embed_text(question) + + results = self._collection.query( + query_embeddings=[question_embedding], + n_results=self.top_k, + where=where, + include=["documents", "metadatas", "distances"] + ) + + if not results or not results.get("documents") or not results["documents"][0]: + return "No relevant content found." + + documents = results["documents"][0] + metadatas = results.get("metadatas", [None])[0] or [] + distances = results.get("distances", [None])[0] or [] + + # Return sources with relevance scores + formatted_results = [] + for i, doc in enumerate(documents): + metadata = metadatas[i] if i < len(metadatas) else {} + distance = distances[i] if i < len(distances) else 1.0 + source = metadata.get("source", "unknown") if metadata else "unknown" + score = 1 - distance if distance is not None else 0 # Convert distance to similarity + formatted_results.append(f"[Source: {source}, Relevance: {score:.3f}]\n{doc}") + + return "\n\n".join(formatted_results) + except Exception as e: + logger.error(f"Query failed: {e}") + return f"Error querying knowledge base: {e}" + + def delete_collection(self) -> None: + try: + self._client.delete_collection(self.collection_name) + logger.info(f"Deleted collection: {self.collection_name}") + except Exception as e: + logger.error(f"Failed to delete collection: {e}") + + def get_collection_info(self) -> Dict[str, Any]: + try: + count = self._collection.count() + return { + "name": self.collection_name, + "count": count, + "embedding_model": self.embedding_model + } + except Exception as e: + logger.error(f"Failed to get collection info: {e}") + return {"error": str(e)} + + def _get_data_type(self, content: SourceContent, data_type: str | DataType | None = None) -> DataType: + try: + if isinstance(data_type, str): + return DataType(data_type) + except Exception as e: + pass + + return content.data_type diff --git a/src/crewai_tools/rag/data_types.py b/src/crewai_tools/rag/data_types.py new file mode 100644 index 000000000..d2d265cce --- /dev/null +++ b/src/crewai_tools/rag/data_types.py @@ -0,0 +1,137 @@ +from enum import Enum +from pathlib import Path +from urllib.parse import urlparse +import os +from crewai_tools.rag.chunkers.base_chunker import BaseChunker +from crewai_tools.rag.base_loader import BaseLoader + +class DataType(str, Enum): + PDF_FILE = "pdf_file" + TEXT_FILE = "text_file" + CSV = "csv" + JSON = "json" + XML = "xml" + DOCX = "docx" + MDX = "mdx" + + # Database types + MYSQL = "mysql" + POSTGRES = "postgres" + + # Repository types + GITHUB = "github" + DIRECTORY = "directory" + + # Web types + WEBSITE = "website" + DOCS_SITE = "docs_site" + + # Raw types + TEXT = "text" + + + def get_chunker(self) -> BaseChunker: + from importlib import import_module + + chunkers = { + DataType.TEXT_FILE: ("text_chunker", "TextChunker"), + DataType.TEXT: ("text_chunker", "TextChunker"), + DataType.DOCX: ("text_chunker", "DocxChunker"), + DataType.MDX: ("text_chunker", "MdxChunker"), + + # Structured formats + DataType.CSV: ("structured_chunker", "CsvChunker"), + DataType.JSON: ("structured_chunker", "JsonChunker"), + DataType.XML: ("structured_chunker", "XmlChunker"), + + DataType.WEBSITE: ("web_chunker", "WebsiteChunker"), + } + + module_name, class_name = chunkers.get(self, ("default_chunker", "DefaultChunker")) + module_path = f"crewai_tools.rag.chunkers.{module_name}" + + try: + module = import_module(module_path) + return getattr(module, class_name)() + except Exception as e: + raise ValueError(f"Error loading chunker for {self}: {e}") + + def get_loader(self) -> BaseLoader: + from importlib import import_module + + loaders = { + DataType.TEXT_FILE: ("text_loader", "TextFileLoader"), + DataType.TEXT: ("text_loader", "TextLoader"), + DataType.XML: ("xml_loader", "XMLLoader"), + DataType.WEBSITE: ("webpage_loader", "WebPageLoader"), + DataType.MDX: ("mdx_loader", "MDXLoader"), + DataType.JSON: ("json_loader", "JSONLoader"), + DataType.DOCX: ("docx_loader", "DOCXLoader"), + DataType.CSV: ("csv_loader", "CSVLoader"), + DataType.DIRECTORY: ("directory_loader", "DirectoryLoader"), + } + + module_name, class_name = loaders.get(self, ("text_loader", "TextLoader")) + module_path = f"crewai_tools.rag.loaders.{module_name}" + try: + module = import_module(module_path) + return getattr(module, class_name)() + except Exception as e: + raise ValueError(f"Error loading loader for {self}: {e}") + +class DataTypes: + @staticmethod + def from_content(content: str | Path | None = None) -> DataType: + if content is None: + return DataType.TEXT + + if isinstance(content, Path): + content = str(content) + + is_url = False + if isinstance(content, str): + try: + url = urlparse(content) + is_url = (url.scheme and url.netloc) or url.scheme == "file" + except Exception: + pass + + def get_file_type(path: str) -> DataType | None: + mapping = { + ".pdf": DataType.PDF_FILE, + ".csv": DataType.CSV, + ".mdx": DataType.MDX, + ".md": DataType.MDX, + ".docx": DataType.DOCX, + ".json": DataType.JSON, + ".xml": DataType.XML, + ".txt": DataType.TEXT_FILE, + } + for ext, dtype in mapping.items(): + if path.endswith(ext): + return dtype + return None + + if is_url: + dtype = get_file_type(url.path) + if dtype: + return dtype + + if "docs" in url.netloc or ("docs" in url.path and url.scheme != "file"): + return DataType.DOCS_SITE + if "github.com" in url.netloc: + return DataType.GITHUB + + return DataType.WEBSITE + + if os.path.isfile(content): + dtype = get_file_type(content) + if dtype: + return dtype + + if os.path.exists(content): + return DataType.TEXT_FILE + elif os.path.isdir(content): + return DataType.DIRECTORY + + return DataType.TEXT diff --git a/src/crewai_tools/rag/loaders/__init__.py b/src/crewai_tools/rag/loaders/__init__.py new file mode 100644 index 000000000..503651468 --- /dev/null +++ b/src/crewai_tools/rag/loaders/__init__.py @@ -0,0 +1,20 @@ +from crewai_tools.rag.loaders.text_loader import TextFileLoader, TextLoader +from crewai_tools.rag.loaders.xml_loader import XMLLoader +from crewai_tools.rag.loaders.webpage_loader import WebPageLoader +from crewai_tools.rag.loaders.mdx_loader import MDXLoader +from crewai_tools.rag.loaders.json_loader import JSONLoader +from crewai_tools.rag.loaders.docx_loader import DOCXLoader +from crewai_tools.rag.loaders.csv_loader import CSVLoader +from crewai_tools.rag.loaders.directory_loader import DirectoryLoader + +__all__ = [ + "TextFileLoader", + "TextLoader", + "XMLLoader", + "WebPageLoader", + "MDXLoader", + "JSONLoader", + "DOCXLoader", + "CSVLoader", + "DirectoryLoader", +] diff --git a/src/crewai_tools/rag/loaders/csv_loader.py b/src/crewai_tools/rag/loaders/csv_loader.py new file mode 100644 index 000000000..e389123a7 --- /dev/null +++ b/src/crewai_tools/rag/loaders/csv_loader.py @@ -0,0 +1,72 @@ +import csv +from io import StringIO + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class CSVLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + source_ref = source_content.source_ref + + content_str = source_content.source + if source_content.is_url(): + content_str = self._load_from_url(content_str, kwargs) + elif source_content.path_exists(): + content_str = self._load_from_file(content_str) + + return self._parse_csv(content_str, source_ref) + + + def _load_from_url(self, url: str, kwargs: dict) -> str: + import requests + + headers = kwargs.get("headers", { + "Accept": "text/csv, application/csv, text/plain", + "User-Agent": "Mozilla/5.0 (compatible; crewai-tools CSVLoader)" + }) + + try: + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + return response.text + except Exception as e: + raise ValueError(f"Error fetching CSV from URL {url}: {str(e)}") + + def _load_from_file(self, path: str) -> str: + with open(path, "r", encoding="utf-8") as file: + return file.read() + + def _parse_csv(self, content: str, source_ref: str) -> LoaderResult: + try: + csv_reader = csv.DictReader(StringIO(content)) + + text_parts = [] + headers = csv_reader.fieldnames + + if headers: + text_parts.append("Headers: " + " | ".join(headers)) + text_parts.append("-" * 50) + + for row_num, row in enumerate(csv_reader, 1): + row_text = " | ".join([f"{k}: {v}" for k, v in row.items() if v]) + text_parts.append(f"Row {row_num}: {row_text}") + + text = "\n".join(text_parts) + + metadata = { + "format": "csv", + "columns": headers, + "rows": len(text_parts) - 2 if headers else 0 + } + + except Exception as e: + text = content + metadata = {"format": "csv", "parse_error": str(e)} + + return LoaderResult( + content=text, + source=source_ref, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=source_ref, content=text) + ) diff --git a/src/crewai_tools/rag/loaders/directory_loader.py b/src/crewai_tools/rag/loaders/directory_loader.py new file mode 100644 index 000000000..7bc5f298b --- /dev/null +++ b/src/crewai_tools/rag/loaders/directory_loader.py @@ -0,0 +1,142 @@ +import os +from pathlib import Path +from typing import List + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class DirectoryLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + """ + Load and process all files from a directory recursively. + + Args: + source: Directory path or URL to a directory listing + **kwargs: Additional options: + - recursive: bool (default True) - Whether to search recursively + - include_extensions: list - Only include files with these extensions + - exclude_extensions: list - Exclude files with these extensions + - max_files: int - Maximum number of files to process + """ + source_ref = source_content.source_ref + + if source_content.is_url(): + raise ValueError("URL directory loading is not supported. Please provide a local directory path.") + + if not os.path.exists(source_ref): + raise FileNotFoundError(f"Directory does not exist: {source_ref}") + + if not os.path.isdir(source_ref): + raise ValueError(f"Path is not a directory: {source_ref}") + + return self._process_directory(source_ref, kwargs) + + def _process_directory(self, dir_path: str, kwargs: dict) -> LoaderResult: + recursive = kwargs.get("recursive", True) + include_extensions = kwargs.get("include_extensions", None) + exclude_extensions = kwargs.get("exclude_extensions", None) + max_files = kwargs.get("max_files", None) + + files = self._find_files(dir_path, recursive, include_extensions, exclude_extensions) + + if max_files and len(files) > max_files: + files = files[:max_files] + + all_contents = [] + processed_files = [] + errors = [] + + for file_path in files: + try: + result = self._process_single_file(file_path) + if result: + all_contents.append(f"=== File: {file_path} ===\n{result.content}") + processed_files.append({ + "path": file_path, + "metadata": result.metadata, + "source": result.source + }) + except Exception as e: + error_msg = f"Error processing {file_path}: {str(e)}" + errors.append(error_msg) + all_contents.append(f"=== File: {file_path} (ERROR) ===\n{error_msg}") + + combined_content = "\n\n".join(all_contents) + + metadata = { + "format": "directory", + "directory_path": dir_path, + "total_files": len(files), + "processed_files": len(processed_files), + "errors": len(errors), + "file_details": processed_files, + "error_details": errors + } + + return LoaderResult( + content=combined_content, + source=dir_path, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=dir_path, content=combined_content) + ) + + def _find_files(self, dir_path: str, recursive: bool, include_ext: List[str] | None = None, exclude_ext: List[str] | None = None) -> List[str]: + """Find all files in directory matching criteria.""" + files = [] + + if recursive: + for root, dirs, filenames in os.walk(dir_path): + dirs[:] = [d for d in dirs if not d.startswith('.')] + + for filename in filenames: + if self._should_include_file(filename, include_ext, exclude_ext): + files.append(os.path.join(root, filename)) + else: + try: + for item in os.listdir(dir_path): + item_path = os.path.join(dir_path, item) + if os.path.isfile(item_path) and self._should_include_file(item, include_ext, exclude_ext): + files.append(item_path) + except PermissionError: + pass + + return sorted(files) + + def _should_include_file(self, filename: str, include_ext: List[str] = None, exclude_ext: List[str] = None) -> bool: + """Determine if a file should be included based on criteria.""" + if filename.startswith('.'): + return False + + _, ext = os.path.splitext(filename.lower()) + + if include_ext: + if ext not in [e.lower() if e.startswith('.') else f'.{e.lower()}' for e in include_ext]: + return False + + if exclude_ext: + if ext in [e.lower() if e.startswith('.') else f'.{e.lower()}' for e in exclude_ext]: + return False + + return True + + def _process_single_file(self, file_path: str) -> LoaderResult: + from crewai_tools.rag.data_types import DataTypes + + data_type = DataTypes.from_content(Path(file_path)) + + loader = data_type.get_loader() + + result = loader.load(SourceContent(file_path)) + + if result.metadata is None: + result.metadata = {} + + result.metadata.update({ + "file_path": file_path, + "file_size": os.path.getsize(file_path), + "data_type": str(data_type), + "loader_type": loader.__class__.__name__ + }) + + return result diff --git a/src/crewai_tools/rag/loaders/docx_loader.py b/src/crewai_tools/rag/loaders/docx_loader.py new file mode 100644 index 000000000..2f5df23af --- /dev/null +++ b/src/crewai_tools/rag/loaders/docx_loader.py @@ -0,0 +1,72 @@ +import os +import tempfile + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class DOCXLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + try: + from docx import Document as DocxDocument + except ImportError: + raise ImportError("python-docx is required for DOCX loading. Install with: 'uv pip install python-docx' or pip install crewai-tools[rag]") + + source_ref = source_content.source_ref + + if source_content.is_url(): + temp_file = self._download_from_url(source_ref, kwargs) + try: + return self._load_from_file(temp_file, source_ref, DocxDocument) + finally: + os.unlink(temp_file) + elif source_content.path_exists(): + return self._load_from_file(source_ref, source_ref, DocxDocument) + else: + raise ValueError(f"Source must be a valid file path or URL, got: {source_content.source}") + + def _download_from_url(self, url: str, kwargs: dict) -> str: + import requests + + headers = kwargs.get("headers", { + "Accept": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "User-Agent": "Mozilla/5.0 (compatible; crewai-tools DOCXLoader)" + }) + + try: + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + + # Create temporary file to save the DOCX content + with tempfile.NamedTemporaryFile(suffix='.docx', delete=False) as temp_file: + temp_file.write(response.content) + return temp_file.name + except Exception as e: + raise ValueError(f"Error fetching DOCX from URL {url}: {str(e)}") + + def _load_from_file(self, file_path: str, source_ref: str, DocxDocument) -> LoaderResult: + try: + doc = DocxDocument(file_path) + + text_parts = [] + for paragraph in doc.paragraphs: + if paragraph.text.strip(): + text_parts.append(paragraph.text) + + content = "\n".join(text_parts) + + metadata = { + "format": "docx", + "paragraphs": len(doc.paragraphs), + "tables": len(doc.tables) + } + + return LoaderResult( + content=content, + source=source_ref, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=source_ref, content=content) + ) + + except Exception as e: + raise ValueError(f"Error loading DOCX file: {str(e)}") diff --git a/src/crewai_tools/rag/loaders/json_loader.py b/src/crewai_tools/rag/loaders/json_loader.py new file mode 100644 index 000000000..6efab393a --- /dev/null +++ b/src/crewai_tools/rag/loaders/json_loader.py @@ -0,0 +1,69 @@ +import json + +from crewai_tools.rag.source_content import SourceContent +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult + + +class JSONLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + source_ref = source_content.source_ref + content = source_content.source + + if source_content.is_url(): + content = self._load_from_url(source_ref, kwargs) + elif source_content.path_exists(): + content = self._load_from_file(source_ref) + + return self._parse_json(content, source_ref) + + def _load_from_url(self, url: str, kwargs: dict) -> str: + import requests + + headers = kwargs.get("headers", { + "Accept": "application/json", + "User-Agent": "Mozilla/5.0 (compatible; crewai-tools JSONLoader)" + }) + + try: + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + return response.text if not self._is_json_response(response) else json.dumps(response.json(), indent=2) + except Exception as e: + raise ValueError(f"Error fetching JSON from URL {url}: {str(e)}") + + def _is_json_response(self, response) -> bool: + try: + response.json() + return True + except ValueError: + return False + + def _load_from_file(self, path: str) -> str: + with open(path, "r", encoding="utf-8") as file: + return file.read() + + def _parse_json(self, content: str, source_ref: str) -> LoaderResult: + try: + data = json.loads(content) + if isinstance(data, dict): + text = "\n".join(f"{k}: {json.dumps(v, indent=0)}" for k, v in data.items()) + elif isinstance(data, list): + text = "\n".join(json.dumps(item, indent=0) for item in data) + else: + text = json.dumps(data, indent=0) + + metadata = { + "format": "json", + "type": type(data).__name__, + "size": len(data) if isinstance(data, (list, dict)) else 1 + } + except json.JSONDecodeError as e: + text = content + metadata = {"format": "json", "parse_error": str(e)} + + return LoaderResult( + content=text, + source=source_ref, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=source_ref, content=text) + ) diff --git a/src/crewai_tools/rag/loaders/mdx_loader.py b/src/crewai_tools/rag/loaders/mdx_loader.py new file mode 100644 index 000000000..6da9dc896 --- /dev/null +++ b/src/crewai_tools/rag/loaders/mdx_loader.py @@ -0,0 +1,59 @@ +import re + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + +class MDXLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + source_ref = source_content.source_ref + content = source_content.source + + if source_content.is_url(): + content = self._load_from_url(source_ref, kwargs) + elif source_content.path_exists(): + content = self._load_from_file(source_ref) + + return self._parse_mdx(content, source_ref) + + def _load_from_url(self, url: str, kwargs: dict) -> str: + import requests + + headers = kwargs.get("headers", { + "Accept": "text/markdown, text/x-markdown, text/plain", + "User-Agent": "Mozilla/5.0 (compatible; crewai-tools MDXLoader)" + }) + + try: + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + return response.text + except Exception as e: + raise ValueError(f"Error fetching MDX from URL {url}: {str(e)}") + + def _load_from_file(self, path: str) -> str: + with open(path, "r", encoding="utf-8") as file: + return file.read() + + def _parse_mdx(self, content: str, source_ref: str) -> LoaderResult: + cleaned_content = content + + # Remove import statements + cleaned_content = re.sub(r'^import\s+.*?\n', '', cleaned_content, flags=re.MULTILINE) + + # Remove export statements + cleaned_content = re.sub(r'^export\s+.*?(?:\n|$)', '', cleaned_content, flags=re.MULTILINE) + + # Remove JSX tags (simple approach) + cleaned_content = re.sub(r'<[^>]+>', '', cleaned_content) + + # Clean up extra whitespace + cleaned_content = re.sub(r'\n\s*\n\s*\n', '\n\n', cleaned_content) + cleaned_content = cleaned_content.strip() + + metadata = {"format": "mdx"} + return LoaderResult( + content=cleaned_content, + source=source_ref, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=source_ref, content=cleaned_content) + ) diff --git a/src/crewai_tools/rag/loaders/text_loader.py b/src/crewai_tools/rag/loaders/text_loader.py new file mode 100644 index 000000000..a97cf29f4 --- /dev/null +++ b/src/crewai_tools/rag/loaders/text_loader.py @@ -0,0 +1,28 @@ + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TextFileLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + source_ref = source_content.source_ref + if not source_content.path_exists(): + raise FileNotFoundError(f"The following file does not exist: {source_content.source}") + + with open(source_content.source, "r", encoding="utf-8") as file: + content = file.read() + + return LoaderResult( + content=content, + source=source_ref, + doc_id=self.generate_doc_id(source_ref=source_ref, content=content) + ) + + +class TextLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + return LoaderResult( + content=source_content.source, + source=source_content.source_ref, + doc_id=self.generate_doc_id(content=source_content.source) + ) diff --git a/src/crewai_tools/rag/loaders/webpage_loader.py b/src/crewai_tools/rag/loaders/webpage_loader.py new file mode 100644 index 000000000..4fcb1e0c4 --- /dev/null +++ b/src/crewai_tools/rag/loaders/webpage_loader.py @@ -0,0 +1,47 @@ +import re +import requests +from bs4 import BeautifulSoup + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + +class WebPageLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + url = source_content.source + headers = kwargs.get("headers", { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", + "Accept-Language": "en-US,en;q=0.9", + }) + + try: + response = requests.get(url, timeout=15, headers=headers) + response.encoding = response.apparent_encoding + + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.decompose() + + text = soup.get_text(" ") + text = re.sub("[ \t]+", " ", text) + text = re.sub("\\s+\n\\s+", "\n", text) + text = text.strip() + + title = soup.title.string.strip() if soup.title and soup.title.string else "" + metadata = { + "url": url, + "title": title, + "status_code": response.status_code, + "content_type": response.headers.get("content-type", "") + } + + return LoaderResult( + content=text, + source=url, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=url, content=text) + ) + + except Exception as e: + raise ValueError(f"Error loading webpage {url}: {str(e)}") diff --git a/src/crewai_tools/rag/loaders/xml_loader.py b/src/crewai_tools/rag/loaders/xml_loader.py new file mode 100644 index 000000000..ffafdb9d9 --- /dev/null +++ b/src/crewai_tools/rag/loaders/xml_loader.py @@ -0,0 +1,61 @@ +import os +import xml.etree.ElementTree as ET + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + +class XMLLoader(BaseLoader): + def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: + source_ref = source_content.source_ref + content = source_content.source + + if source_content.is_url(): + content = self._load_from_url(source_ref, kwargs) + elif os.path.exists(source_ref): + content = self._load_from_file(source_ref) + + return self._parse_xml(content, source_ref) + + def _load_from_url(self, url: str, kwargs: dict) -> str: + import requests + + headers = kwargs.get("headers", { + "Accept": "application/xml, text/xml, text/plain", + "User-Agent": "Mozilla/5.0 (compatible; crewai-tools XMLLoader)" + }) + + try: + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + return response.text + except Exception as e: + raise ValueError(f"Error fetching XML from URL {url}: {str(e)}") + + def _load_from_file(self, path: str) -> str: + with open(path, "r", encoding="utf-8") as file: + return file.read() + + def _parse_xml(self, content: str, source_ref: str) -> LoaderResult: + try: + if content.strip().startswith('<'): + root = ET.fromstring(content) + else: + root = ET.parse(source_ref).getroot() + + text_parts = [] + for text_content in root.itertext(): + if text_content and text_content.strip(): + text_parts.append(text_content.strip()) + + text = "\n".join(text_parts) + metadata = {"format": "xml", "root_tag": root.tag} + except ET.ParseError as e: + text = content + metadata = {"format": "xml", "parse_error": str(e)} + + return LoaderResult( + content=text, + source=source_ref, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=source_ref, content=text) + ) diff --git a/src/crewai_tools/rag/misc.py b/src/crewai_tools/rag/misc.py new file mode 100644 index 000000000..5b95f804e --- /dev/null +++ b/src/crewai_tools/rag/misc.py @@ -0,0 +1,4 @@ +import hashlib + +def compute_sha256(content: str) -> str: + return hashlib.sha256(content.encode("utf-8")).hexdigest() diff --git a/src/crewai_tools/rag/source_content.py b/src/crewai_tools/rag/source_content.py new file mode 100644 index 000000000..59530c8d8 --- /dev/null +++ b/src/crewai_tools/rag/source_content.py @@ -0,0 +1,46 @@ +import os +from urllib.parse import urlparse +from typing import TYPE_CHECKING +from pathlib import Path +from functools import cached_property + +from crewai_tools.rag.misc import compute_sha256 + +if TYPE_CHECKING: + from crewai_tools.rag.data_types import DataType + + +class SourceContent: + def __init__(self, source: str | Path): + self.source = str(source) + + def is_url(self) -> bool: + if not isinstance(self.source, str): + return False + try: + parsed_url = urlparse(self.source) + return bool(parsed_url.scheme and parsed_url.netloc) + except Exception: + return False + + def path_exists(self) -> bool: + return os.path.exists(self.source) + + @cached_property + def data_type(self) -> "DataType": + from crewai_tools.rag.data_types import DataTypes + + return DataTypes.from_content(self.source) + + @cached_property + def source_ref(self) -> str: + """" + Returns the source reference for the content. + If the content is a URL or a local file, returns the source. + Otherwise, returns the hash of the content. + """ + + if self.is_url() or self.path_exists(): + return self.source + + return compute_sha256(self.source) diff --git a/tests/rag/__init__.py b/tests/rag/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/rag/test_csv_loader.py b/tests/rag/test_csv_loader.py new file mode 100644 index 000000000..596cb4d58 --- /dev/null +++ b/tests/rag/test_csv_loader.py @@ -0,0 +1,130 @@ +import os +import tempfile +import pytest +from unittest.mock import patch, Mock + +from crewai_tools.rag.loaders.csv_loader import CSVLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +@pytest.fixture +def temp_csv_file(): + created_files = [] + + def _create(content: str): + f = tempfile.NamedTemporaryFile(mode="w", suffix=".csv", delete=False) + f.write(content) + f.close() + created_files.append(f.name) + return f.name + + yield _create + + for path in created_files: + os.unlink(path) + + +class TestCSVLoader: + def test_load_csv_from_file(self, temp_csv_file): + path = temp_csv_file("name,age,city\nJohn,25,New York\nJane,30,Chicago") + loader = CSVLoader() + result = loader.load(SourceContent(path)) + + assert isinstance(result, LoaderResult) + assert "Headers: name | age | city" in result.content + assert "Row 1: name: John | age: 25 | city: New York" in result.content + assert "Row 2: name: Jane | age: 30 | city: Chicago" in result.content + assert result.metadata == { + "format": "csv", + "columns": ["name", "age", "city"], + "rows": 2, + } + assert result.source == path + assert result.doc_id + + def test_load_csv_with_empty_values(self, temp_csv_file): + path = temp_csv_file("name,age,city\nJohn,,New York\n,30,") + result = CSVLoader().load(SourceContent(path)) + + assert "Row 1: name: John | city: New York" in result.content + assert "Row 2: age: 30" in result.content + assert result.metadata["rows"] == 2 + + def test_load_csv_malformed(self, temp_csv_file): + path = temp_csv_file("invalid,csv\nunclosed quote \"missing") + result = CSVLoader().load(SourceContent(path)) + + assert "Headers: invalid | csv" in result.content + assert 'Row 1: invalid: unclosed quote "missing' in result.content + assert result.metadata["columns"] == ["invalid", "csv"] + + def test_load_csv_empty_file(self, temp_csv_file): + path = temp_csv_file("") + result = CSVLoader().load(SourceContent(path)) + + assert result.content == "" + assert result.metadata["rows"] == 0 + + def test_load_csv_text_input(self): + raw_csv = "col1,col2\nvalue1,value2\nvalue3,value4" + result = CSVLoader().load(SourceContent(raw_csv)) + + assert "Headers: col1 | col2" in result.content + assert "Row 1: col1: value1 | col2: value2" in result.content + assert "Row 2: col1: value3 | col2: value4" in result.content + assert result.metadata["columns"] == ["col1", "col2"] + assert result.metadata["rows"] == 2 + + def test_doc_id_is_deterministic(self, temp_csv_file): + path = temp_csv_file("name,value\ntest,123") + loader = CSVLoader() + + result1 = loader.load(SourceContent(path)) + result2 = loader.load(SourceContent(path)) + + assert result1.doc_id == result2.doc_id + + @patch("requests.get") + def test_load_csv_from_url(self, mock_get): + mock_get.return_value = Mock( + text="name,value\ntest,123", + raise_for_status=Mock(return_value=None) + ) + + result = CSVLoader().load(SourceContent("https://example.com/data.csv")) + + assert "Headers: name | value" in result.content + assert "Row 1: name: test | value: 123" in result.content + headers = mock_get.call_args[1]["headers"] + assert "text/csv" in headers["Accept"] + assert "crewai-tools CSVLoader" in headers["User-Agent"] + + @patch("requests.get") + def test_load_csv_with_custom_headers(self, mock_get): + mock_get.return_value = Mock( + text="data,value\ntest,456", + raise_for_status=Mock(return_value=None) + ) + headers = {"Authorization": "Bearer token", "Custom-Header": "value"} + result = CSVLoader().load(SourceContent("https://example.com/data.csv"), headers=headers) + + assert "Headers: data | value" in result.content + assert mock_get.call_args[1]["headers"] == headers + + @patch("requests.get") + def test_csv_loader_handles_network_errors(self, mock_get): + mock_get.side_effect = Exception("Network error") + loader = CSVLoader() + + with pytest.raises(ValueError, match="Error fetching CSV from URL"): + loader.load(SourceContent("https://example.com/data.csv")) + + @patch("requests.get") + def test_csv_loader_handles_http_error(self, mock_get): + mock_get.return_value = Mock() + mock_get.return_value.raise_for_status.side_effect = Exception("404 Not Found") + loader = CSVLoader() + + with pytest.raises(ValueError, match="Error fetching CSV from URL"): + loader.load(SourceContent("https://example.com/notfound.csv")) diff --git a/tests/rag/test_directory_loader.py b/tests/rag/test_directory_loader.py new file mode 100644 index 000000000..7ddb38341 --- /dev/null +++ b/tests/rag/test_directory_loader.py @@ -0,0 +1,149 @@ +import os +import tempfile +import pytest + +from crewai_tools.rag.loaders.directory_loader import DirectoryLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +@pytest.fixture +def temp_directory(): + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + + +class TestDirectoryLoader: + def _create_file(self, directory, filename, content="test content"): + path = os.path.join(directory, filename) + with open(path, "w") as f: + f.write(content) + return path + + def test_load_non_recursive(self, temp_directory): + self._create_file(temp_directory, "file1.txt") + self._create_file(temp_directory, "file2.txt") + subdir = os.path.join(temp_directory, "subdir") + os.makedirs(subdir) + self._create_file(subdir, "file3.txt") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory), recursive=False) + + assert isinstance(result, LoaderResult) + assert "file1.txt" in result.content + assert "file2.txt" in result.content + assert "file3.txt" not in result.content + assert result.metadata["total_files"] == 2 + + def test_load_recursive(self, temp_directory): + self._create_file(temp_directory, "file1.txt") + nested = os.path.join(temp_directory, "subdir", "nested") + os.makedirs(nested) + self._create_file(os.path.join(temp_directory, "subdir"), "file2.txt") + self._create_file(nested, "file3.txt") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory), recursive=True) + + assert all(f"file{i}.txt" in result.content for i in range(1, 4)) + + def test_include_and_exclude_extensions(self, temp_directory): + self._create_file(temp_directory, "a.txt") + self._create_file(temp_directory, "b.py") + self._create_file(temp_directory, "c.md") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory), include_extensions=[".txt", ".py"]) + assert "a.txt" in result.content + assert "b.py" in result.content + assert "c.md" not in result.content + + result2 = loader.load(SourceContent(temp_directory), exclude_extensions=[".py", ".md"]) + assert "a.txt" in result2.content + assert "b.py" not in result2.content + assert "c.md" not in result2.content + + def test_max_files_limit(self, temp_directory): + for i in range(5): + self._create_file(temp_directory, f"file{i}.txt") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory), max_files=3) + + assert result.metadata["total_files"] == 3 + assert all(f"file{i}.txt" in result.content for i in range(3)) + + def test_hidden_files_and_dirs_excluded(self, temp_directory): + self._create_file(temp_directory, "visible.txt", "visible") + self._create_file(temp_directory, ".hidden.txt", "hidden") + + hidden_dir = os.path.join(temp_directory, ".hidden") + os.makedirs(hidden_dir) + self._create_file(hidden_dir, "inside_hidden.txt") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory), recursive=True) + + assert "visible.txt" in result.content + assert ".hidden.txt" not in result.content + assert "inside_hidden.txt" not in result.content + + def test_directory_does_not_exist(self): + loader = DirectoryLoader() + with pytest.raises(FileNotFoundError, match="Directory does not exist"): + loader.load(SourceContent("/path/does/not/exist")) + + def test_path_is_not_a_directory(self): + with tempfile.NamedTemporaryFile() as f: + loader = DirectoryLoader() + with pytest.raises(ValueError, match="Path is not a directory"): + loader.load(SourceContent(f.name)) + + def test_url_not_supported(self): + loader = DirectoryLoader() + with pytest.raises(ValueError, match="URL directory loading is not supported"): + loader.load(SourceContent("https://example.com")) + + def test_processing_error_handling(self, temp_directory): + self._create_file(temp_directory, "valid.txt") + error_file = self._create_file(temp_directory, "error.txt") + + loader = DirectoryLoader() + original_method = loader._process_single_file + + def mock(file_path): + if "error" in file_path: + raise ValueError("Mock error") + return original_method(file_path) + + loader._process_single_file = mock + result = loader.load(SourceContent(temp_directory)) + + assert "valid.txt" in result.content + assert "error.txt (ERROR)" in result.content + assert result.metadata["errors"] == 1 + assert len(result.metadata["error_details"]) == 1 + + def test_metadata_structure(self, temp_directory): + self._create_file(temp_directory, "test.txt", "Sample") + + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory)) + metadata = result.metadata + + expected_keys = { + "format", "directory_path", "total_files", "processed_files", + "errors", "file_details", "error_details" + } + + assert expected_keys.issubset(metadata) + assert all(k in metadata["file_details"][0] for k in ("path", "metadata", "source")) + + def test_empty_directory(self, temp_directory): + loader = DirectoryLoader() + result = loader.load(SourceContent(temp_directory)) + + assert result.content == "" + assert result.metadata["total_files"] == 0 + assert result.metadata["processed_files"] == 0 diff --git a/tests/rag/test_docx_loader.py b/tests/rag/test_docx_loader.py new file mode 100644 index 000000000..f95aa0662 --- /dev/null +++ b/tests/rag/test_docx_loader.py @@ -0,0 +1,135 @@ +import tempfile +import pytest +from unittest.mock import patch, Mock + +from crewai_tools.rag.loaders.docx_loader import DOCXLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TestDOCXLoader: + @patch('docx.Document') + def test_load_docx_from_file(self, mock_docx_class): + mock_doc = Mock() + mock_doc.paragraphs = [ + Mock(text="First paragraph"), + Mock(text="Second paragraph"), + Mock(text=" ") # Blank paragraph + ] + mock_doc.tables = [] + mock_docx_class.return_value = mock_doc + + with tempfile.NamedTemporaryFile(suffix='.docx') as f: + loader = DOCXLoader() + result = loader.load(SourceContent(f.name)) + + assert isinstance(result, LoaderResult) + assert result.content == "First paragraph\nSecond paragraph" + assert result.metadata == {"format": "docx", "paragraphs": 3, "tables": 0} + assert result.source == f.name + + @patch('docx.Document') + def test_load_docx_with_tables(self, mock_docx_class): + mock_doc = Mock() + mock_doc.paragraphs = [Mock(text="Document with table")] + mock_doc.tables = [Mock(), Mock()] + mock_docx_class.return_value = mock_doc + + with tempfile.NamedTemporaryFile(suffix='.docx') as f: + loader = DOCXLoader() + result = loader.load(SourceContent(f.name)) + + assert result.metadata["tables"] == 2 + + @patch('requests.get') + @patch('docx.Document') + @patch('tempfile.NamedTemporaryFile') + @patch('os.unlink') + def test_load_docx_from_url(self, mock_unlink, mock_tempfile, mock_docx_class, mock_get): + mock_get.return_value = Mock(content=b"fake docx content", raise_for_status=Mock()) + + mock_temp = Mock(name="/tmp/temp_docx_file.docx") + mock_temp.__enter__ = Mock(return_value=mock_temp) + mock_temp.__exit__ = Mock(return_value=None) + mock_tempfile.return_value = mock_temp + + mock_doc = Mock() + mock_doc.paragraphs = [Mock(text="Content from URL")] + mock_doc.tables = [] + mock_docx_class.return_value = mock_doc + + loader = DOCXLoader() + result = loader.load(SourceContent("https://example.com/test.docx")) + + assert "Content from URL" in result.content + assert result.source == "https://example.com/test.docx" + + headers = mock_get.call_args[1]['headers'] + assert "application/vnd.openxmlformats-officedocument.wordprocessingml.document" in headers['Accept'] + assert "crewai-tools DOCXLoader" in headers['User-Agent'] + + mock_temp.write.assert_called_once_with(b"fake docx content") + + @patch('requests.get') + @patch('docx.Document') + def test_load_docx_from_url_with_custom_headers(self, mock_docx_class, mock_get): + mock_get.return_value = Mock(content=b"fake docx content", raise_for_status=Mock()) + mock_docx_class.return_value = Mock(paragraphs=[], tables=[]) + + loader = DOCXLoader() + custom_headers = {"Authorization": "Bearer token"} + + with patch('tempfile.NamedTemporaryFile'), patch('os.unlink'): + loader.load(SourceContent("https://example.com/test.docx"), headers=custom_headers) + + assert mock_get.call_args[1]['headers'] == custom_headers + + @patch('requests.get') + def test_load_docx_url_download_error(self, mock_get): + mock_get.side_effect = Exception("Network error") + + loader = DOCXLoader() + with pytest.raises(ValueError, match="Error fetching DOCX from URL"): + loader.load(SourceContent("https://example.com/test.docx")) + + @patch('requests.get') + def test_load_docx_url_http_error(self, mock_get): + mock_get.return_value = Mock(raise_for_status=Mock(side_effect=Exception("404 Not Found"))) + + loader = DOCXLoader() + with pytest.raises(ValueError, match="Error fetching DOCX from URL"): + loader.load(SourceContent("https://example.com/notfound.docx")) + + def test_load_docx_invalid_source(self): + loader = DOCXLoader() + with pytest.raises(ValueError, match="Source must be a valid file path or URL"): + loader.load(SourceContent("not_a_file_or_url")) + + @patch('docx.Document') + def test_load_docx_parsing_error(self, mock_docx_class): + mock_docx_class.side_effect = Exception("Invalid DOCX file") + + with tempfile.NamedTemporaryFile(suffix='.docx') as f: + loader = DOCXLoader() + with pytest.raises(ValueError, match="Error loading DOCX file"): + loader.load(SourceContent(f.name)) + + @patch('docx.Document') + def test_load_docx_empty_document(self, mock_docx_class): + mock_docx_class.return_value = Mock(paragraphs=[], tables=[]) + + with tempfile.NamedTemporaryFile(suffix='.docx') as f: + loader = DOCXLoader() + result = loader.load(SourceContent(f.name)) + + assert result.content == "" + assert result.metadata == {"paragraphs": 0, "tables": 0, "format": "docx"} + + @patch('docx.Document') + def test_docx_doc_id_generation(self, mock_docx_class): + mock_docx_class.return_value = Mock(paragraphs=[Mock(text="Consistent content")], tables=[]) + + with tempfile.NamedTemporaryFile(suffix='.docx') as f: + loader = DOCXLoader() + source = SourceContent(f.name) + assert loader.load(source).doc_id == loader.load(source).doc_id diff --git a/tests/rag/test_json_loader.py b/tests/rag/test_json_loader.py new file mode 100644 index 000000000..b57480e16 --- /dev/null +++ b/tests/rag/test_json_loader.py @@ -0,0 +1,180 @@ +import json +import os +import tempfile +import pytest +from unittest.mock import patch, Mock + +from crewai_tools.rag.loaders.json_loader import JSONLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TestJSONLoader: + def _create_temp_json_file(self, data) -> str: + """Helper to write JSON data to a temporary file and return its path.""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + json.dump(data, f) + return f.name + + def _create_temp_raw_file(self, content: str) -> str: + """Helper to write raw content to a temporary file and return its path.""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + f.write(content) + return f.name + + def _load_from_path(self, path) -> LoaderResult: + loader = JSONLoader() + return loader.load(SourceContent(path)) + + def test_load_json_dict(self): + path = self._create_temp_json_file({"name": "John", "age": 30, "items": ["a", "b", "c"]}) + try: + result = self._load_from_path(path) + assert isinstance(result, LoaderResult) + assert all(k in result.content for k in ["name", "John", "age", "30"]) + assert result.metadata == { + "format": "json", "type": "dict", "size": 3 + } + assert result.source == path + finally: + os.unlink(path) + + def test_load_json_list(self): + path = self._create_temp_json_file([ + {"id": 1, "name": "Item 1"}, + {"id": 2, "name": "Item 2"}, + ]) + try: + result = self._load_from_path(path) + assert result.metadata["type"] == "list" + assert result.metadata["size"] == 2 + assert all(item in result.content for item in ["Item 1", "Item 2"]) + finally: + os.unlink(path) + + @pytest.mark.parametrize("value, expected_type", [ + ("simple string value", "str"), + (42, "int"), + ]) + def test_load_json_primitives(self, value, expected_type): + path = self._create_temp_json_file(value) + try: + result = self._load_from_path(path) + assert result.metadata["type"] == expected_type + assert result.metadata["size"] == 1 + assert str(value) in result.content + finally: + os.unlink(path) + + def test_load_malformed_json(self): + path = self._create_temp_raw_file('{"invalid": json,}') + try: + result = self._load_from_path(path) + assert result.metadata["format"] == "json" + assert "parse_error" in result.metadata + assert result.content == '{"invalid": json,}' + finally: + os.unlink(path) + + def test_load_empty_file(self): + path = self._create_temp_raw_file('') + try: + result = self._load_from_path(path) + assert "parse_error" in result.metadata + assert result.content == '' + finally: + os.unlink(path) + + def test_load_text_input(self): + json_text = '{"message": "hello", "count": 5}' + loader = JSONLoader() + result = loader.load(SourceContent(json_text)) + assert all(part in result.content for part in ["message", "hello", "count", "5"]) + assert result.metadata["type"] == "dict" + assert result.metadata["size"] == 2 + + def test_load_complex_nested_json(self): + data = { + "users": [ + {"id": 1, "profile": {"name": "Alice", "settings": {"theme": "dark"}}}, + {"id": 2, "profile": {"name": "Bob", "settings": {"theme": "light"}}} + ], + "meta": {"total": 2, "version": "1.0"} + } + path = self._create_temp_json_file(data) + try: + result = self._load_from_path(path) + for value in ["Alice", "Bob", "dark", "light"]: + assert value in result.content + assert result.metadata["size"] == 2 # top-level keys + finally: + os.unlink(path) + + def test_consistent_doc_id(self): + path = self._create_temp_json_file({"test": "data"}) + try: + result1 = self._load_from_path(path) + result2 = self._load_from_path(path) + assert result1.doc_id == result2.doc_id + finally: + os.unlink(path) + + # ------------------------------ + # URL-based tests + # ------------------------------ + + @patch('requests.get') + def test_url_response_valid_json(self, mock_get): + mock_get.return_value = Mock( + text='{"key": "value", "number": 123}', + json=Mock(return_value={"key": "value", "number": 123}), + raise_for_status=Mock() + ) + + loader = JSONLoader() + result = loader.load(SourceContent("https://api.example.com/data.json")) + + assert all(val in result.content for val in ["key", "value", "number", "123"]) + headers = mock_get.call_args[1]['headers'] + assert "application/json" in headers['Accept'] + assert "crewai-tools JSONLoader" in headers['User-Agent'] + + @patch('requests.get') + def test_url_response_not_json(self, mock_get): + mock_get.return_value = Mock( + text='{"key": "value"}', + json=Mock(side_effect=ValueError("Not JSON")), + raise_for_status=Mock() + ) + + loader = JSONLoader() + result = loader.load(SourceContent("https://example.com/data.json")) + assert all(part in result.content for part in ["key", "value"]) + + @patch('requests.get') + def test_url_with_custom_headers(self, mock_get): + mock_get.return_value = Mock( + text='{"data": "test"}', + json=Mock(return_value={"data": "test"}), + raise_for_status=Mock() + ) + headers = {"Authorization": "Bearer token", "Custom-Header": "value"} + + loader = JSONLoader() + loader.load(SourceContent("https://api.example.com/data.json"), headers=headers) + + assert mock_get.call_args[1]['headers'] == headers + + @patch('requests.get') + def test_url_network_failure(self, mock_get): + mock_get.side_effect = Exception("Network error") + loader = JSONLoader() + with pytest.raises(ValueError, match="Error fetching JSON from URL"): + loader.load(SourceContent("https://api.example.com/data.json")) + + @patch('requests.get') + def test_url_http_error(self, mock_get): + mock_get.return_value = Mock(raise_for_status=Mock(side_effect=Exception("404"))) + loader = JSONLoader() + with pytest.raises(ValueError, match="Error fetching JSON from URL"): + loader.load(SourceContent("https://api.example.com/404.json")) diff --git a/tests/rag/test_mdx_loader.py b/tests/rag/test_mdx_loader.py new file mode 100644 index 000000000..ef7944c28 --- /dev/null +++ b/tests/rag/test_mdx_loader.py @@ -0,0 +1,176 @@ +import os +import tempfile +import pytest +from unittest.mock import patch, Mock + +from crewai_tools.rag.loaders.mdx_loader import MDXLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TestMDXLoader: + + def _write_temp_mdx(self, content): + f = tempfile.NamedTemporaryFile(mode='w', suffix='.mdx', delete=False) + f.write(content) + f.close() + return f.name + + def _load_from_file(self, content): + path = self._write_temp_mdx(content) + try: + loader = MDXLoader() + return loader.load(SourceContent(path)), path + finally: + os.unlink(path) + + def test_load_basic_mdx_file(self): + content = """ +import Component from './Component' +export const meta = { title: 'Test' } + +# Test MDX File + +This is a **markdown** file with JSX. + + + +Some more content. + +
+

Nested content

+
+""" + result, path = self._load_from_file(content) + + assert isinstance(result, LoaderResult) + assert all(tag not in result.content for tag in ["import", "export", ""]) + assert all(text in result.content for text in ["# Test MDX File", "markdown", "Some more content", "Nested content"]) + assert result.metadata["format"] == "mdx" + assert result.source == path + + def test_mdx_multiple_imports_exports(self): + content = """ +import React from 'react' +import { useState } from 'react' +import CustomComponent from './custom' + +export default function Layout() { return null } +export const config = { test: true } + +# Content + +Regular markdown content here. +""" + result, _ = self._load_from_file(content) + assert "# Content" in result.content + assert "Regular markdown content here." in result.content + assert "import" not in result.content and "export" not in result.content + + def test_complex_jsx_cleanup(self): + content = """ +# MDX with Complex JSX + +
+ Info: This is important information. +
  • Item 1
  • Item 2
+
+ +Regular paragraph text. + +Nested content inside component +""" + result, _ = self._load_from_file(content) + assert all(tag not in result.content for tag in ["", "
    ", " +

    Only JSX content

    +

    No markdown here

    + +""" + result, _ = self._load_from_file(content) + assert all(tag not in result.content for tag in ["
    ", "

    ", "

    "]) + assert "Only JSX content" in result.content + assert "No markdown here" in result.content + + @patch('requests.get') + def test_load_mdx_from_url(self, mock_get): + mock_get.return_value = Mock(text="# MDX from URL\n\nContent here.\n\n", raise_for_status=lambda: None) + loader = MDXLoader() + result = loader.load(SourceContent("https://example.com/content.mdx")) + assert "# MDX from URL" in result.content + assert "" not in result.content + + @patch('requests.get') + def test_load_mdx_with_custom_headers(self, mock_get): + mock_get.return_value = Mock(text="# Custom headers test", raise_for_status=lambda: None) + loader = MDXLoader() + loader.load(SourceContent("https://example.com"), headers={"Authorization": "Bearer token"}) + assert mock_get.call_args[1]['headers'] == {"Authorization": "Bearer token"} + + @patch('requests.get') + def test_mdx_url_fetch_error(self, mock_get): + mock_get.side_effect = Exception("Network error") + with pytest.raises(ValueError, match="Error fetching MDX from URL"): + MDXLoader().load(SourceContent("https://example.com")) + + def test_load_inline_mdx_text(self): + content = """# Inline MDX\n\nimport Something from 'somewhere'\n\nContent with .\n\nexport const meta = { title: 'Test' }""" + loader = MDXLoader() + result = loader.load(SourceContent(content)) + assert "# Inline MDX" in result.content + assert "Content with ." in result.content + + def test_empty_result_after_cleaning(self): + content = """ +import Something from 'somewhere' +export const config = {} +

    +""" + result, _ = self._load_from_file(content) + assert result.content.strip() == "" + + def test_edge_case_parsing(self): + content = """ +# Title + + +Multi-line +JSX content + + +import { a, b } from 'module' + +export { x, y } + +Final text. +""" + result, _ = self._load_from_file(content) + assert "# Title" in result.content + assert "JSX content" in result.content + assert "Final text." in result.content + assert all(phrase not in result.content for phrase in ["import {", "export {", ""]) diff --git a/tests/rag/test_text_loaders.py b/tests/rag/test_text_loaders.py new file mode 100644 index 000000000..e72738778 --- /dev/null +++ b/tests/rag/test_text_loaders.py @@ -0,0 +1,160 @@ +import hashlib +import os +import tempfile +import pytest + +from crewai_tools.rag.loaders.text_loader import TextFileLoader, TextLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +def write_temp_file(content, suffix=".txt", encoding="utf-8"): + with tempfile.NamedTemporaryFile(mode="w", suffix=suffix, delete=False, encoding=encoding) as f: + f.write(content) + return f.name + + +def cleanup_temp_file(path): + try: + os.unlink(path) + except FileNotFoundError: + pass + + +class TestTextFileLoader: + def test_basic_text_file(self): + content = "This is test content\nWith multiple lines\nAnd more text" + path = write_temp_file(content) + try: + result = TextFileLoader().load(SourceContent(path)) + assert isinstance(result, LoaderResult) + assert result.content == content + assert result.source == path + assert result.doc_id + assert result.metadata in (None, {}) + finally: + cleanup_temp_file(path) + + def test_empty_file(self): + path = write_temp_file("") + try: + result = TextFileLoader().load(SourceContent(path)) + assert result.content == "" + finally: + cleanup_temp_file(path) + + def test_unicode_content(self): + content = "Hello 世界 🌠émojis 🎉 åäö" + path = write_temp_file(content) + try: + result = TextFileLoader().load(SourceContent(path)) + assert content in result.content + finally: + cleanup_temp_file(path) + + def test_large_file(self): + content = "\n".join(f"Line {i}" for i in range(100)) + path = write_temp_file(content) + try: + result = TextFileLoader().load(SourceContent(path)) + assert "Line 0" in result.content + assert "Line 99" in result.content + assert result.content.count("\n") == 99 + finally: + cleanup_temp_file(path) + + def test_missing_file(self): + with pytest.raises(FileNotFoundError): + TextFileLoader().load(SourceContent("/nonexistent/path.txt")) + + def test_permission_denied(self): + path = write_temp_file("Some content") + os.chmod(path, 0o000) + try: + with pytest.raises(PermissionError): + TextFileLoader().load(SourceContent(path)) + finally: + os.chmod(path, 0o644) + cleanup_temp_file(path) + + def test_doc_id_consistency(self): + content = "Consistent content" + path = write_temp_file(content) + try: + loader = TextFileLoader() + result1 = loader.load(SourceContent(path)) + result2 = loader.load(SourceContent(path)) + expected_id = hashlib.sha256((path + content).encode("utf-8")).hexdigest() + assert result1.doc_id == result2.doc_id == expected_id + finally: + cleanup_temp_file(path) + + def test_various_extensions(self): + content = "Same content" + for ext in [".txt", ".md", ".log", ".json"]: + path = write_temp_file(content, suffix=ext) + try: + result = TextFileLoader().load(SourceContent(path)) + assert result.content == content + finally: + cleanup_temp_file(path) + + +class TestTextLoader: + def test_basic_text(self): + content = "Raw text" + result = TextLoader().load(SourceContent(content)) + expected_hash = hashlib.sha256(content.encode("utf-8")).hexdigest() + assert result.content == content + assert result.source == expected_hash + assert result.doc_id == expected_hash + + def test_multiline_text(self): + content = "Line 1\nLine 2\nLine 3" + result = TextLoader().load(SourceContent(content)) + assert "Line 2" in result.content + + def test_empty_text(self): + result = TextLoader().load(SourceContent("")) + assert result.content == "" + assert result.source == hashlib.sha256("".encode("utf-8")).hexdigest() + + def test_unicode_text(self): + content = "世界 🌠émojis 🎉 åäö" + result = TextLoader().load(SourceContent(content)) + assert content in result.content + + def test_special_characters(self): + content = "!@#$$%^&*()_+-=~`{}[]\\|;:'\",.<>/?" + result = TextLoader().load(SourceContent(content)) + assert result.content == content + + def test_doc_id_uniqueness(self): + result1 = TextLoader().load(SourceContent("A")) + result2 = TextLoader().load(SourceContent("B")) + assert result1.doc_id != result2.doc_id + + def test_whitespace_text(self): + content = " \n\t " + result = TextLoader().load(SourceContent(content)) + assert result.content == content + + def test_long_text(self): + content = "A" * 10000 + result = TextLoader().load(SourceContent(content)) + assert len(result.content) == 10000 + + +class TestTextLoadersIntegration: + def test_consistency_between_loaders(self): + content = "Consistent content" + text_result = TextLoader().load(SourceContent(content)) + file_path = write_temp_file(content) + try: + file_result = TextFileLoader().load(SourceContent(file_path)) + + assert text_result.content == file_result.content + assert text_result.source != file_result.source + assert text_result.doc_id != file_result.doc_id + finally: + cleanup_temp_file(file_path) diff --git a/tests/rag/test_webpage_loader.py b/tests/rag/test_webpage_loader.py new file mode 100644 index 000000000..9e02f410b --- /dev/null +++ b/tests/rag/test_webpage_loader.py @@ -0,0 +1,137 @@ +import pytest +from unittest.mock import patch, Mock +from crewai_tools.rag.loaders.webpage_loader import WebPageLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TestWebPageLoader: + def setup_mock_response(self, text, status_code=200, content_type="text/html"): + response = Mock() + response.text = text + response.apparent_encoding = "utf-8" + response.status_code = status_code + response.headers = {"content-type": content_type} + return response + + def setup_mock_soup(self, text, title=None, script_style_elements=None): + soup = Mock() + soup.get_text.return_value = text + soup.title = Mock(string=title) if title is not None else None + soup.return_value = script_style_elements or [] + return soup + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_load_basic_webpage(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("Test Page

    Test content

    ") + mock_bs.return_value = self.setup_mock_soup("Test content", title="Test Page") + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com")) + + assert isinstance(result, LoaderResult) + assert result.content == "Test content" + assert result.metadata["title"] == "Test Page" + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_load_webpage_with_scripts_and_styles(self, mock_bs, mock_get): + html = """ + Page with Scripts +

    Visible content

    + """ + mock_get.return_value = self.setup_mock_response(html) + scripts = [Mock(), Mock()] + styles = [Mock()] + for el in scripts + styles: + el.decompose = Mock() + mock_bs.return_value = self.setup_mock_soup("Page with Scripts Visible content", title="Page with Scripts", script_style_elements=scripts + styles) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com/with-scripts")) + + assert "Visible content" in result.content + for el in scripts + styles: + el.decompose.assert_called_once() + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_text_cleaning_and_title_handling(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("

    Messy text

    ") + mock_bs.return_value = self.setup_mock_soup("Text with extra spaces\n\n More\t text \n\n", title=None) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com/messy-text")) + assert result.content is not None + assert result.metadata["title"] == "" + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_empty_or_missing_title(self, mock_bs, mock_get): + for title in [None, ""]: + mock_get.return_value = self.setup_mock_response("Content") + mock_bs.return_value = self.setup_mock_soup("Content", title=title) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com")) + assert result.metadata["title"] == "" + + @patch('requests.get') + def test_custom_and_default_headers(self, mock_get): + mock_get.return_value = self.setup_mock_response("Test") + custom_headers = {"User-Agent": "Bot", "Authorization": "Bearer xyz", "Accept": "text/html"} + + with patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') as mock_bs: + mock_bs.return_value = self.setup_mock_soup("Test") + WebPageLoader().load(SourceContent("https://example.com"), headers=custom_headers) + + assert mock_get.call_args[1]['headers'] == custom_headers + + @patch('requests.get') + def test_error_handling(self, mock_get): + for error in [Exception("Fail"), ValueError("Bad"), ImportError("Oops")]: + mock_get.side_effect = error + with pytest.raises(ValueError, match="Error loading webpage"): + WebPageLoader().load(SourceContent("https://example.com")) + + @patch('requests.get') + def test_timeout_and_http_error(self, mock_get): + import requests + mock_get.side_effect = requests.Timeout("Timeout") + with pytest.raises(ValueError): + WebPageLoader().load(SourceContent("https://example.com")) + + mock_response = Mock() + mock_response.raise_for_status.side_effect = requests.HTTPError("404") + mock_get.side_effect = None + mock_get.return_value = mock_response + with pytest.raises(ValueError): + WebPageLoader().load(SourceContent("https://example.com/404")) + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_doc_id_consistency(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("Doc") + mock_bs.return_value = self.setup_mock_soup("Doc") + + loader = WebPageLoader() + result1 = loader.load(SourceContent("https://example.com")) + result2 = loader.load(SourceContent("https://example.com")) + + assert result1.doc_id == result2.doc_id + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_status_code_and_content_type(self, mock_bs, mock_get): + for status in [200, 201, 301]: + mock_get.return_value = self.setup_mock_response(f"Status {status}", status_code=status) + mock_bs.return_value = self.setup_mock_soup(f"Status {status}") + result = WebPageLoader().load(SourceContent(f"https://example.com/{status}")) + assert result.metadata["status_code"] == status + + for ctype in ["text/html", "text/plain", "application/xhtml+xml"]: + mock_get.return_value = self.setup_mock_response("Content", content_type=ctype) + mock_bs.return_value = self.setup_mock_soup("Content") + result = WebPageLoader().load(SourceContent("https://example.com")) + assert result.metadata["content_type"] == ctype diff --git a/tests/rag/test_xml_loader.py b/tests/rag/test_xml_loader.py new file mode 100644 index 000000000..9e02f410b --- /dev/null +++ b/tests/rag/test_xml_loader.py @@ -0,0 +1,137 @@ +import pytest +from unittest.mock import patch, Mock +from crewai_tools.rag.loaders.webpage_loader import WebPageLoader +from crewai_tools.rag.base_loader import LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class TestWebPageLoader: + def setup_mock_response(self, text, status_code=200, content_type="text/html"): + response = Mock() + response.text = text + response.apparent_encoding = "utf-8" + response.status_code = status_code + response.headers = {"content-type": content_type} + return response + + def setup_mock_soup(self, text, title=None, script_style_elements=None): + soup = Mock() + soup.get_text.return_value = text + soup.title = Mock(string=title) if title is not None else None + soup.return_value = script_style_elements or [] + return soup + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_load_basic_webpage(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("Test Page

    Test content

    ") + mock_bs.return_value = self.setup_mock_soup("Test content", title="Test Page") + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com")) + + assert isinstance(result, LoaderResult) + assert result.content == "Test content" + assert result.metadata["title"] == "Test Page" + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_load_webpage_with_scripts_and_styles(self, mock_bs, mock_get): + html = """ + Page with Scripts +

    Visible content

    + """ + mock_get.return_value = self.setup_mock_response(html) + scripts = [Mock(), Mock()] + styles = [Mock()] + for el in scripts + styles: + el.decompose = Mock() + mock_bs.return_value = self.setup_mock_soup("Page with Scripts Visible content", title="Page with Scripts", script_style_elements=scripts + styles) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com/with-scripts")) + + assert "Visible content" in result.content + for el in scripts + styles: + el.decompose.assert_called_once() + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_text_cleaning_and_title_handling(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("

    Messy text

    ") + mock_bs.return_value = self.setup_mock_soup("Text with extra spaces\n\n More\t text \n\n", title=None) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com/messy-text")) + assert result.content is not None + assert result.metadata["title"] == "" + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_empty_or_missing_title(self, mock_bs, mock_get): + for title in [None, ""]: + mock_get.return_value = self.setup_mock_response("Content") + mock_bs.return_value = self.setup_mock_soup("Content", title=title) + + loader = WebPageLoader() + result = loader.load(SourceContent("https://example.com")) + assert result.metadata["title"] == "" + + @patch('requests.get') + def test_custom_and_default_headers(self, mock_get): + mock_get.return_value = self.setup_mock_response("Test") + custom_headers = {"User-Agent": "Bot", "Authorization": "Bearer xyz", "Accept": "text/html"} + + with patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') as mock_bs: + mock_bs.return_value = self.setup_mock_soup("Test") + WebPageLoader().load(SourceContent("https://example.com"), headers=custom_headers) + + assert mock_get.call_args[1]['headers'] == custom_headers + + @patch('requests.get') + def test_error_handling(self, mock_get): + for error in [Exception("Fail"), ValueError("Bad"), ImportError("Oops")]: + mock_get.side_effect = error + with pytest.raises(ValueError, match="Error loading webpage"): + WebPageLoader().load(SourceContent("https://example.com")) + + @patch('requests.get') + def test_timeout_and_http_error(self, mock_get): + import requests + mock_get.side_effect = requests.Timeout("Timeout") + with pytest.raises(ValueError): + WebPageLoader().load(SourceContent("https://example.com")) + + mock_response = Mock() + mock_response.raise_for_status.side_effect = requests.HTTPError("404") + mock_get.side_effect = None + mock_get.return_value = mock_response + with pytest.raises(ValueError): + WebPageLoader().load(SourceContent("https://example.com/404")) + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_doc_id_consistency(self, mock_bs, mock_get): + mock_get.return_value = self.setup_mock_response("Doc") + mock_bs.return_value = self.setup_mock_soup("Doc") + + loader = WebPageLoader() + result1 = loader.load(SourceContent("https://example.com")) + result2 = loader.load(SourceContent("https://example.com")) + + assert result1.doc_id == result2.doc_id + + @patch('requests.get') + @patch('crewai_tools.rag.loaders.webpage_loader.BeautifulSoup') + def test_status_code_and_content_type(self, mock_bs, mock_get): + for status in [200, 201, 301]: + mock_get.return_value = self.setup_mock_response(f"Status {status}", status_code=status) + mock_bs.return_value = self.setup_mock_soup(f"Status {status}") + result = WebPageLoader().load(SourceContent(f"https://example.com/{status}")) + assert result.metadata["status_code"] == status + + for ctype in ["text/html", "text/plain", "application/xhtml+xml"]: + mock_get.return_value = self.setup_mock_response("Content", content_type=ctype) + mock_bs.return_value = self.setup_mock_soup("Content") + result = WebPageLoader().load(SourceContent("https://example.com")) + assert result.metadata["content_type"] == ctype From 403bb7e2081690c0c0d408bfd2b79a4132a9e625 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Tue, 26 Aug 2025 08:10:24 -0700 Subject: [PATCH 375/391] feat: add descriptive header to scraped website content output (#426) * feat: add descriptive header to scraped website content output * fix: correct typo in scraped website content header text --- .../tools/scrape_website_tool/scrape_website_tool.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index 0e7e25ca6..bfb371275 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -65,7 +65,8 @@ class ScrapeWebsiteTool(BaseTool): page.encoding = page.apparent_encoding parsed = BeautifulSoup(page.text, "html.parser") - text = parsed.get_text(" ") + text = "The following text is scraped website content:\n\n" + text += parsed.get_text(" ") text = re.sub("[ \t]+", " ", text) text = re.sub("\\s+\n\\s+", "\n", text) return text From 992cd726c47d87d08a9c2a2e3aba7c54294f00ec Mon Sep 17 00:00:00 2001 From: Erika Shorten <110841617+erika-shorten@users.noreply.github.com> Date: Wed, 27 Aug 2025 10:33:16 -0400 Subject: [PATCH 376/391] Weaviate hybrid search (#428) * Update tool to use hybrid search * Set default alpha to 0.75 --- src/crewai_tools/tools/weaviate_tool/vector_search.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/weaviate_tool/vector_search.py b/src/crewai_tools/tools/weaviate_tool/vector_search.py index 13efb018f..c75dd03da 100644 --- a/src/crewai_tools/tools/weaviate_tool/vector_search.py +++ b/src/crewai_tools/tools/weaviate_tool/vector_search.py @@ -41,6 +41,7 @@ class WeaviateVectorSearchTool(BaseTool): collection_name: Optional[str] = None limit: Optional[int] = Field(default=3) headers: Optional[dict] = None + alpha: Optional[int] = Field(default=0.75) env_vars: List[EnvVar] = [ EnvVar(name="OPENAI_API_KEY", description="OpenAI API key for embedding generation and retrieval", required=True), ] @@ -110,9 +111,10 @@ class WeaviateVectorSearchTool(BaseTool): generative_config=self.generative_model, ) - response = internal_docs.query.near_text( + response = internal_docs.query.hybrid( query=query, limit=self.limit, + alpha=self.alpha ) json_response = "" for obj in response.objects: From 6562587cbad614904e93f58867f3676ae0e01f5b Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 27 Aug 2025 10:42:19 -0700 Subject: [PATCH 377/391] feat: add InvokeCrewAIAutomationTool for external crew API integration (#430) * feat: add InvokeCrewAIAutomationTool for external crew API integration * feat: add InvokeCrewAIAutomationTool class for executing CrewAI tasks programmatically --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../invoke_crewai_automation_tool/README.md | 159 ++++++++++++++++ .../invoke_crewai_automation_tool.py | 176 ++++++++++++++++++ 4 files changed, 337 insertions(+) create mode 100644 src/crewai_tools/tools/invoke_crewai_automation_tool/README.md create mode 100644 src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 05482ae70..7fc478df8 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -36,6 +36,7 @@ from .tools import ( FirecrawlSearchTool, GithubSearchTool, HyperbrowserLoadTool, + InvokeCrewAIAutomationTool, JSONSearchTool, LinkupSearchTool, LlamaIndexTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 05219c4f7..05363f017 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -27,6 +27,7 @@ from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool from .github_search_tool.github_search_tool import GithubSearchTool from .hyperbrowser_load_tool.hyperbrowser_load_tool import HyperbrowserLoadTool +from .invoke_crewai_automation_tool.invoke_crewai_automation_tool import InvokeCrewAIAutomationTool from .json_search_tool.json_search_tool import JSONSearchTool from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool diff --git a/src/crewai_tools/tools/invoke_crewai_automation_tool/README.md b/src/crewai_tools/tools/invoke_crewai_automation_tool/README.md new file mode 100644 index 000000000..58ab4bbcc --- /dev/null +++ b/src/crewai_tools/tools/invoke_crewai_automation_tool/README.md @@ -0,0 +1,159 @@ +# InvokeCrewAIAutomationTool + +## Description + +The InvokeCrewAIAutomationTool provides CrewAI Platform API integration with external crew services. This tool allows you to invoke and interact with CrewAI Platform automations from within your CrewAI agents, enabling seamless integration between different crew workflows. + +## Features + +- **Dynamic Input Schema**: Configure custom input parameters for different crew automations +- **Automatic Polling**: Automatically polls for task completion with configurable timeout +- **Bearer Token Authentication**: Secure API authentication using bearer tokens +- **Comprehensive Error Handling**: Robust error handling for API failures and timeouts +- **Flexible Configuration**: Support for both simple and complex crew automation workflows + +## Installation + +Install the required dependencies: + +```shell +pip install 'crewai[tools]' +``` + +## Example + +### Basic Usage + +```python +from crewai_tools import InvokeCrewAIAutomationTool + +# Basic crew automation tool +tool = InvokeCrewAIAutomationTool( + crew_api_url="https://data-analysis-crew-[...].crewai.com", + crew_bearer_token="your_bearer_token_here", + crew_name="Data Analysis Crew", + crew_description="Analyzes data and generates insights" +) + +# Use the tool +result = tool.run() +``` + +### Advanced Usage with Custom Inputs + +```python +from crewai_tools import InvokeCrewAIAutomationTool +from pydantic import Field + +# Define custom input schema +custom_inputs = { + "year": Field(..., description="Year to retrieve the report for (integer)"), + "region": Field(default="global", description="Geographic region for analysis"), + "format": Field(default="summary", description="Report format (summary, detailed, raw)") +} + +# Create tool with custom inputs +tool = InvokeCrewAIAutomationTool( + crew_api_url="https://state-of-ai-report-crew-[...].crewai.com", + crew_bearer_token="your_bearer_token_here", + crew_name="State of AI Report", + crew_description="Retrieves a comprehensive report on state of AI for a given year and region", + crew_inputs=custom_inputs, + max_polling_time=15 * 60 # 15 minutes timeout +) + +# Use with custom parameters +result = tool.run(year=2024, region="north-america", format="detailed") +``` + +### Integration with CrewAI Agents + +```python +from crewai import Agent, Task, Crew +from crewai_tools import InvokeCrewAIAutomationTool + +# Create the automation tool +market_research_tool = InvokeCrewAIAutomationTool( + crew_api_url="https://market-research-automation-crew-[...].crewai.com", + crew_bearer_token="your_bearer_token_here", + crew_name="Market Research Automation", + crew_description="Conducts comprehensive market research analysis", + inputs={ + "year": Field(..., description="Year to use for the market research"), + } +) + +# Create an agent with the tool +research_agent = Agent( + role="Research Coordinator", + goal="Coordinate and execute market research tasks", + backstory="You are an expert at coordinating research tasks and leveraging automation tools.", + tools=[market_research_tool], + verbose=True +) + +# Create and execute a task +research_task = Task( + description="Conduct market research on AI tools market for 2024", + agent=research_agent, + expected_output="Comprehensive market research report" +) + +crew = Crew( + agents=[research_agent], + tasks=[research_task] +) + +result = crew.kickoff() +``` + +## Arguments + +### Required Parameters + +- `crew_api_url` (str): Base URL of the CrewAI Platform automation API +- `crew_bearer_token` (str): Bearer token for API authentication +- `crew_name` (str): Name of the crew automation +- `crew_description` (str): Description of what the crew automation does + +### Optional Parameters + +- `max_polling_time` (int): Maximum time in seconds to wait for task completion (default: 600 seconds = 10 minutes) +- `crew_inputs` (dict): Dictionary defining custom input schema fields using Pydantic Field objects + +## Custom Input Schema + +When defining `crew_inputs`, use Pydantic Field objects to specify the input parameters. These have to be compatible with the crew automation you are invoking: + +```python +from pydantic import Field + +crew_inputs = { + "required_param": Field(..., description="This parameter is required"), + "optional_param": Field(default="default_value", description="This parameter is optional"), + "typed_param": Field(..., description="Integer parameter", ge=1, le=100) # With validation +} +``` + +## Error Handling + +The tool provides comprehensive error handling for common scenarios: + +- **API Connection Errors**: Network connectivity issues +- **Authentication Errors**: Invalid or expired bearer tokens +- **Timeout Errors**: Tasks that exceed the maximum polling time +- **Task Failures**: Crew automations that fail during execution + +## API Endpoints + +The tool interacts with two main API endpoints: + +- `POST {crew_api_url}/kickoff`: Starts a new crew automation task +- `GET {crew_api_url}/status/{crew_id}`: Checks the status of a running task + +## Notes + +- The tool automatically polls the status endpoint every second until completion or timeout +- Successful tasks return the result directly, while failed tasks return error information +- The bearer token should be kept secure and not hardcoded in production environments +- Consider using environment variables for sensitive configuration like bearer tokens \ No newline at end of file diff --git a/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py b/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py new file mode 100644 index 000000000..09b076cc1 --- /dev/null +++ b/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py @@ -0,0 +1,176 @@ +from crewai.tools import BaseTool +from pydantic import BaseModel, Field, create_model +from typing import Any, Type +import requests +import time + +class InvokeCrewAIAutomationInput(BaseModel): + """Input schema for InvokeCrewAIAutomationTool.""" + prompt: str = Field(..., description="The prompt or query to send to the crew") + +class InvokeCrewAIAutomationTool(BaseTool): + """ + A CrewAI tool for invoking external crew/flows APIs. + + This tool provides CrewAI Platform API integration with external crew services, supporting: + - Dynamic input schema configuration + - Automatic polling for task completion + - Bearer token authentication + - Comprehensive error handling + + Example: + Basic usage: + >>> tool = InvokeCrewAIAutomationTool( + ... crew_api_url="https://api.example.com", + ... crew_bearer_token="your_token", + ... crew_name="My Crew", + ... crew_description="Description of what the crew does" + ... ) + + With custom inputs: + >>> custom_inputs = { + ... "param1": Field(..., description="Description of param1"), + ... "param2": Field(default="default_value", description="Description of param2") + ... } + >>> tool = InvokeCrewAIAutomationTool( + ... crew_api_url="https://api.example.com", + ... crew_bearer_token="your_token", + ... crew_name="My Crew", + ... crew_description="Description of what the crew does", + ... crew_inputs=custom_inputs + ... ) + + Example: + >>> tools=[ + ... InvokeCrewAIAutomationTool( + ... crew_api_url="https://canary-crew-[...].crewai.com", + ... crew_bearer_token="[Your token: abcdef012345]", + ... crew_name="State of AI Report", + ... crew_description="Retrieves a report on state of AI for a given year.", + ... crew_inputs={ + ... "year": Field(..., description="Year to retrieve the report for (integer)") + ... } + ... ) + ... ] + """ + name: str = "invoke_amp_automation" + description: str = "Invokes an CrewAI Platform Automation using API" + args_schema: Type[BaseModel] = InvokeCrewAIAutomationInput + + crew_api_url: str + crew_bearer_token: str + max_polling_time: int = 10 * 60 # 10 minutes + + def __init__( + self, + crew_api_url: str, + crew_bearer_token: str, + crew_name: str, + crew_description: str, + max_polling_time: int = 10 * 60, + crew_inputs: dict[str, Any] = None): + """ + Initialize the InvokeCrewAIAutomationTool. + + Args: + crew_api_url: Base URL of the crew API service + crew_bearer_token: Bearer token for API authentication + crew_name: Name of the crew to invoke + crew_description: Description of the crew to invoke + max_polling_time: Maximum time in seconds to wait for task completion (default: 600 seconds = 10 minutes) + crew_inputs: Optional dictionary defining custom input schema fields + """ + # Create dynamic args_schema if custom inputs provided + if crew_inputs: + # Start with the base prompt field + fields = {} + + # Add custom fields + for field_name, field_def in crew_inputs.items(): + if isinstance(field_def, tuple): + fields[field_name] = field_def + else: + # Assume it's a Field object, extract type from annotation if available + fields[field_name] = (str, field_def) + + # Create dynamic model + args_schema = create_model('DynamicInvokeCrewAIAutomationInput', **fields) + else: + args_schema = InvokeCrewAIAutomationInput + + # Initialize the parent class with proper field values + super().__init__( + name=crew_name, + description=crew_description, + args_schema=args_schema, + crew_api_url=crew_api_url, + crew_bearer_token=crew_bearer_token, + max_polling_time=max_polling_time + ) + + def _kickoff_crew(self, inputs: dict[str, Any]) -> dict[str, Any]: + """Start a new crew task + + Args: + inputs: Dictionary containing the query and other input parameters + + Returns: + Dictionary containing the crew task response. The response will contain the crew id which needs to be returned to check the status of the crew. + """ + response = requests.post( + f"{self.crew_api_url}/kickoff", + headers={ + "Authorization": f"Bearer {self.crew_bearer_token}", + "Content-Type": "application/json", + }, + json={"inputs": inputs}, + ) + response_json = response.json() + return response_json + + def _get_crew_status(self, crew_id: str) -> dict[str, Any]: + """Get the status of a crew task + + Args: + crew_id: The ID of the crew task to check + + Returns: + Dictionary containing the crew task status + """ + response = requests.get( + f"{self.crew_api_url}/status/{crew_id}", + headers={ + "Authorization": f"Bearer {self.crew_bearer_token}", + "Content-Type": "application/json", + }, + ) + return response.json() + + def _run(self, **kwargs) -> str: + """Execute the crew invocation tool.""" + if kwargs is None: + kwargs = {} + + # Start the crew + response = self._kickoff_crew(inputs=kwargs) + + if response.get("kickoff_id") is None: + return f"Error: Failed to kickoff crew. Response: {response}" + + kickoff_id = response.get("kickoff_id") + + # Poll for completion + for i in range(self.max_polling_time): + try: + status_response = self._get_crew_status(crew_id=kickoff_id) + if status_response.get("state", "").lower() == "success": + return status_response.get("result", "No result returned") + elif status_response.get("state", "").lower() == "failed": + return f"Error: Crew task failed. Response: {status_response}" + except Exception as e: + if i == self.max_polling_time - 1: # Last attempt + return f"Error: Failed to get crew status after {self.max_polling_time} attempts. Last error: {e}" + + time.sleep(1) + + return f"Error: Crew did not complete within {self.max_polling_time} seconds" From 1f581fa9accf57275ed6143f33df74c189c70387 Mon Sep 17 00:00:00 2001 From: Mike Plachta Date: Wed, 27 Aug 2025 14:11:57 -0700 Subject: [PATCH 378/391] fix: add OCRTool to crewai_tools package exports (#433) --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 7fc478df8..ea7b7dd74 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -46,6 +46,7 @@ from .tools import ( MultiOnTool, MySQLSearchTool, NL2SQLTool, + OCRTool, OxylabsAmazonProductScraperTool, OxylabsAmazonSearchScraperTool, OxylabsGoogleSearchScraperTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 05363f017..4850c1153 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -40,6 +40,7 @@ from .mongodb_vector_search_tool import ( from .multion_tool.multion_tool import MultiOnTool from .mysql_search_tool.mysql_search_tool import MySQLSearchTool from .nl2sql.nl2sql_tool import NL2SQLTool +from .ocr_tool.ocr_tool import OCRTool from .oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import ( OxylabsAmazonProductScraperTool, ) From cb84d2ddfa105a7f384da1d1d03831de75581150 Mon Sep 17 00:00:00 2001 From: Jinash Rouniyar Date: Thu, 28 Aug 2025 08:56:53 -0400 Subject: [PATCH 379/391] feat: Add modular contextual AI tools with async functionality (#431) * Add contextual AI tools with async support * Fix package version issues and update README * Rename contextual tools to contextualai and update contents * Update tools init for contextualai tools * feat: Resolved no module found error for nest_asyncio * Updated nest_asyncio import --------- Co-authored-by: QJ Co-authored-by: Qile-Jiang --- src/crewai_tools/__init__.py | 4 + src/crewai_tools/tools/__init__.py | 4 + .../contextualai_create_agent_tool/README.md | 58 +++++++++++ .../contextual_create_agent_tool.py | 71 +++++++++++++ .../tools/contextualai_parse_tool/README.md | 68 +++++++++++++ .../contextual_parse_tool.py | 92 +++++++++++++++++ .../tools/contextualai_query_tool/README.md | 54 ++++++++++ .../contextual_query_tool.py | 99 +++++++++++++++++++ .../tools/contextualai_rerank_tool/README.md | 72 ++++++++++++++ .../contextual_rerank_tool.py | 68 +++++++++++++ 10 files changed, 590 insertions(+) create mode 100644 src/crewai_tools/tools/contextualai_create_agent_tool/README.md create mode 100644 src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py create mode 100644 src/crewai_tools/tools/contextualai_parse_tool/README.md create mode 100644 src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py create mode 100644 src/crewai_tools/tools/contextualai_query_tool/README.md create mode 100644 src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py create mode 100644 src/crewai_tools/tools/contextualai_rerank_tool/README.md create mode 100644 src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index ea7b7dd74..4886dbc57 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -19,6 +19,10 @@ from .tools import ( CodeDocsSearchTool, CodeInterpreterTool, ComposioTool, + ContextualAIQueryTool, + ContextualAICreateAgentTool, + ContextualAIParseTool, + ContextualAIRerankTool, CouchbaseFTSVectorSearchTool, CrewaiEnterpriseTools, CSVSearchTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 4850c1153..886c27ad1 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -6,6 +6,10 @@ from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool +from .contextualai_query_tool.contextual_query_tool import ContextualAIQueryTool +from .contextualai_create_agent_tool.contextual_create_agent_tool import ContextualAICreateAgentTool +from .contextualai_parse_tool.contextual_parse_tool import ContextualAIParseTool +from .contextualai_rerank_tool.contextual_rerank_tool import ContextualAIRerankTool from .couchbase_tool.couchbase_tool import CouchbaseFTSVectorSearchTool from .crewai_enterprise_tools.crewai_enterprise_tools import CrewaiEnterpriseTools from .csv_search_tool.csv_search_tool import CSVSearchTool diff --git a/src/crewai_tools/tools/contextualai_create_agent_tool/README.md b/src/crewai_tools/tools/contextualai_create_agent_tool/README.md new file mode 100644 index 000000000..ee08bd23c --- /dev/null +++ b/src/crewai_tools/tools/contextualai_create_agent_tool/README.md @@ -0,0 +1,58 @@ +# ContextualAICreateAgentTool + +## Description +This tool is designed to integrate Contextual AI's enterprise-grade RAG agents with CrewAI. This tool enables you to create a new Contextual RAG agent. It uploads your documents to create a datastore and returns the Contextual agent ID and datastore ID. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: + +``` +pip install 'crewai[tools]' contextual-client +``` + +**Note**: You'll need a Contextual AI API key. Sign up at [app.contextual.ai](https://app.contextual.ai) to get your free API key. + +## Example + +```python +from crewai_tools import ContextualAICreateAgentTool + +# Initialize the tool +tool = ContextualAICreateAgentTool(api_key="your_api_key_here") + +# Create agent with documents +result = tool._run( + agent_name="Financial Analysis Agent", + agent_description="Agent for analyzing financial documents", + datastore_name="Financial Reports", + document_paths=["/path/to/report1.pdf", "/path/to/report2.pdf"], +) +print(result) +``` + +## Parameters +- `api_key`: Your Contextual AI API key +- `agent_name`: Name for the new agent +- `agent_description`: Description of the agent's purpose +- `datastore_name`: Name for the document datastore +- `document_paths`: List of file paths to upload + +Example result: + +``` +Successfully created agent 'Research Analyst' with ID: {created_agent_ID} and datastore ID: {created_datastore_ID}. Uploaded 5 documents. +``` + +You can use `ContextualAIQueryTool` with the returned IDs to query the knowledge base and retrieve relevant information from your documents. + +## Key Features +- **Complete Pipeline Setup**: Creates datastore, uploads documents, and configures agent in one operation +- **Document Processing**: Leverages Contextual AI's powerful parser to ingest complex PDFs and documents +- **Vector Storage**: Use Contextual AI's datastore for large document collections + +## Use Cases +- Set up new RAG agents from scratch with complete automation +- Upload and organize document collections into structured datastores +- Create specialized domain agents for legal, financial, technical, or research workflows + +For more detailed information about Contextual AI's capabilities, visit the [official documentation](https://docs.contextual.ai). \ No newline at end of file diff --git a/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py b/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py new file mode 100644 index 000000000..7c531273e --- /dev/null +++ b/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py @@ -0,0 +1,71 @@ +from typing import Any, Optional, Type, List +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import os + + +class ContextualAICreateAgentSchema(BaseModel): + """Schema for contextual create agent tool.""" + agent_name: str = Field(..., description="Name for the new agent") + agent_description: str = Field(..., description="Description for the new agent") + datastore_name: str = Field(..., description="Name for the new datastore") + document_paths: List[str] = Field(..., description="List of file paths to upload") + + +class ContextualAICreateAgentTool(BaseTool): + """Tool to create Contextual AI RAG agents with documents.""" + + name: str = "Contextual AI Create Agent Tool" + description: str = "Create a new Contextual AI RAG agent with documents and datastore" + args_schema: Type[BaseModel] = ContextualAICreateAgentSchema + + api_key: str + contextual_client: Any = None + package_dependencies: List[str] = ["contextual-client"] + + def __init__(self, **kwargs): + super().__init__(**kwargs) + try: + from contextual import ContextualAI + self.contextual_client = ContextualAI(api_key=self.api_key) + except ImportError: + raise ImportError( + "contextual-client package is required. Install it with: pip install contextual-client" + ) + + def _run( + self, + agent_name: str, + agent_description: str, + datastore_name: str, + document_paths: List[str] + ) -> str: + """Create a complete RAG pipeline with documents.""" + try: + import os + + # Create datastore + datastore = self.contextual_client.datastores.create(name=datastore_name) + datastore_id = datastore.id + + # Upload documents + document_ids = [] + for doc_path in document_paths: + if not os.path.exists(doc_path): + raise FileNotFoundError(f"Document not found: {doc_path}") + + with open(doc_path, 'rb') as f: + ingestion_result = self.contextual_client.datastores.documents.ingest(datastore_id, file=f) + document_ids.append(ingestion_result.id) + + # Create agent + agent = self.contextual_client.agents.create( + name=agent_name, + description=agent_description, + datastore_ids=[datastore_id] + ) + + return f"Successfully created agent '{agent_name}' with ID: {agent.id} and datastore ID: {datastore_id}. Uploaded {len(document_ids)} documents." + + except Exception as e: + return f"Failed to create agent with documents: {str(e)}" diff --git a/src/crewai_tools/tools/contextualai_parse_tool/README.md b/src/crewai_tools/tools/contextualai_parse_tool/README.md new file mode 100644 index 000000000..da4bc8821 --- /dev/null +++ b/src/crewai_tools/tools/contextualai_parse_tool/README.md @@ -0,0 +1,68 @@ +# ContextualAIParseTool + +## Description +This tool is designed to integrate Contextual AI's enterprise-grade document parsing capabilities with CrewAI, enabling you to leverage advanced AI-powered document understanding for complex layouts, tables, and figures. Use this tool to extract structured content from your documents using Contextual AI's powerful document parser. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: + +``` +pip install 'crewai[tools]' contextual-client +``` + +**Note**: You'll need a Contextual AI API key. Sign up at [app.contextual.ai](https://app.contextual.ai) to get your free API key. + +## Example + +```python +from crewai_tools import ContextualAIParseTool + +tool = ContextualAIParseTool(api_key="your_api_key_here") + +result = tool._run( + file_path="/path/to/document.pdf", + parse_mode="standard", + page_range="0-5", + output_types=["markdown-per-page"] +) +print(result) +``` + +The result will show the parsed contents of your document. For example: +``` +{ + "file_name": "attention_is_all_you_need.pdf", + "status": "completed", + "pages": [ + { + "index": 0, + "markdown": "Provided proper attribution ... + }, + { + "index": 1, + "markdown": "## 1 Introduction ... + }, + ... + ] +} +``` +## Parameters +- `api_key`: Your Contextual AI API key +- `file_path`: Path to document to parse +- `parse_mode`: Parsing mode (default: "standard") +- `figure_caption_mode`: Figure caption handling (default: "concise") +- `enable_document_hierarchy`: Enable hierarchy detection (default: True) +- `page_range`: Pages to parse (e.g., "0-5", None for all) +- `output_types`: Output formats (default: ["markdown-per-page"]) + +## Key Features +- **Advanced Document Understanding**: Handles complex PDF layouts, tables, and multi-column documents +- **Figure and Table Extraction**: Intelligent extraction of figures, charts, and tabular data +- **Page Range Selection**: Parse specific pages or entire documents + +## Use Cases +- Extract structured content from complex PDFs and research papers +- Parse financial reports, legal documents, and technical manuals +- Convert documents to markdown for further processing in RAG pipelines + +For more detailed information about Contextual AI's capabilities, visit the [official documentation](https://docs.contextual.ai). \ No newline at end of file diff --git a/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py b/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py new file mode 100644 index 000000000..5985b60f1 --- /dev/null +++ b/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py @@ -0,0 +1,92 @@ +from typing import Any, Optional, Type, List +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + + +class ContextualAIParseSchema(BaseModel): + """Schema for contextual parse tool.""" + file_path: str = Field(..., description="Path to the document to parse") + parse_mode: str = Field(default="standard", description="Parsing mode") + figure_caption_mode: str = Field(default="concise", description="Figure caption mode") + enable_document_hierarchy: bool = Field(default=True, description="Enable document hierarchy") + page_range: Optional[str] = Field(default=None, description="Page range to parse (e.g., '0-5')") + output_types: List[str] = Field(default=["markdown-per-page"], description="List of output types") + + +class ContextualAIParseTool(BaseTool): + """Tool to parse documents using Contextual AI's parser.""" + + name: str = "Contextual AI Document Parser" + description: str = "Parse documents using Contextual AI's advanced document parser" + args_schema: Type[BaseModel] = ContextualAIParseSchema + + api_key: str + package_dependencies: List[str] = ["contextual-client"] + + def _run( + self, + file_path: str, + parse_mode: str = "standard", + figure_caption_mode: str = "concise", + enable_document_hierarchy: bool = True, + page_range: Optional[str] = None, + output_types: List[str] = ["markdown-per-page"] + ) -> str: + """Parse a document using Contextual AI's parser.""" + try: + import requests + import json + import os + from time import sleep + + if not os.path.exists(file_path): + raise FileNotFoundError(f"Document not found: {file_path}") + + base_url = "https://api.contextual.ai/v1" + headers = { + "accept": "application/json", + "authorization": f"Bearer {self.api_key}" + } + + # Submit parse job + url = f"{base_url}/parse" + config = { + "parse_mode": parse_mode, + "figure_caption_mode": figure_caption_mode, + "enable_document_hierarchy": enable_document_hierarchy, + } + + if page_range: + config["page_range"] = page_range + + with open(file_path, "rb") as fp: + file = {"raw_file": fp} + result = requests.post(url, headers=headers, data=config, files=file) + response = json.loads(result.text) + job_id = response['job_id'] + + # Monitor job status + status_url = f"{base_url}/parse/jobs/{job_id}/status" + while True: + result = requests.get(status_url, headers=headers) + parse_response = json.loads(result.text)['status'] + + if parse_response == "completed": + break + elif parse_response == "failed": + raise RuntimeError("Document parsing failed") + + sleep(5) + + # Get parse results + results_url = f"{base_url}/parse/jobs/{job_id}/results" + result = requests.get( + results_url, + headers=headers, + params={"output_types": ",".join(output_types)}, + ) + + return json.dumps(json.loads(result.text), indent=2) + + except Exception as e: + return f"Failed to parse document: {str(e)}" diff --git a/src/crewai_tools/tools/contextualai_query_tool/README.md b/src/crewai_tools/tools/contextualai_query_tool/README.md new file mode 100644 index 000000000..ef939572b --- /dev/null +++ b/src/crewai_tools/tools/contextualai_query_tool/README.md @@ -0,0 +1,54 @@ +# ContextualAIQueryTool + +## Description +This tool is designed to integrate Contextual AI's enterprise-grade RAG agents with CrewAI. Run this tool to query existing Contextual AI RAG agents that have been pre-configured with documents and knowledge bases. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: + +```shell +pip install 'crewai[tools]' contextual-client +``` + +**Note**: You'll need a Contextual AI API key. Sign up at [app.contextual.ai](https://app.contextual.ai) to get your free API key. + +## Example + +Make sure you have already created a Contextual agent and ingested documents into the datastore before using this tool. + +```python +from crewai_tools import ContextualAIQueryTool + +# Initialize the tool +tool = ContextualAIQueryTool(api_key="your_api_key_here") + +# Query the agent with IDs +result = tool._run( + query="What are the key findings in the financial report?", + agent_id="your_agent_id_here", + datastore_id="your_datastore_id_here" # Optional: for document readiness checking +) +print(result) +``` + +The result will contain the generated answer to the user's query. + +## Parameters +**Initialization:** +- `api_key`: Your Contextual AI API key + +**Query (_run method):** +- `query`: The question or query to send to the agent +- `agent_id`: ID of the existing Contextual AI agent to query (required) +- `datastore_id`: Optional datastore ID for document readiness verification (if not provided, document status checking is disabled with a warning) + +## Key Features +- **Document Readiness Checking**: Automatically waits for documents to be processed before querying +- **Grounded Responses**: Built-in grounding ensures factual, source-attributed answers + +## Use Cases +- Query pre-configured RAG agents with document collections +- Access enterprise knowledge bases through user queries +- Build specialized domain experts with access to curated documents + +For more detailed information about Contextual AI's capabilities, visit the [official documentation](https://docs.contextual.ai). \ No newline at end of file diff --git a/src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py b/src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py new file mode 100644 index 000000000..955ba6a39 --- /dev/null +++ b/src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py @@ -0,0 +1,99 @@ +from typing import Any, Optional, Type, List +from crewai.tools import BaseTool +from pydantic import BaseModel, Field +import asyncio +import requests +import os + + +class ContextualAIQuerySchema(BaseModel): + """Schema for contextual query tool.""" + query: str = Field(..., description="Query to send to the Contextual AI agent.") + agent_id: str = Field(..., description="ID of the Contextual AI agent to query") + datastore_id: Optional[str] = Field(None, description="Optional datastore ID for document readiness verification") + + +class ContextualAIQueryTool(BaseTool): + """Tool to query Contextual AI RAG agents.""" + + name: str = "Contextual AI Query Tool" + description: str = "Use this tool to query a Contextual AI RAG agent with access to your documents" + args_schema: Type[BaseModel] = ContextualAIQuerySchema + + api_key: str + contextual_client: Any = None + package_dependencies: List[str] = ["contextual-client"] + + def __init__(self, **kwargs): + super().__init__(**kwargs) + try: + from contextual import ContextualAI + self.contextual_client = ContextualAI(api_key=self.api_key) + except ImportError: + raise ImportError( + "contextual-client package is required. Install it with: pip install contextual-client" + ) + + def _check_documents_ready(self, datastore_id: str) -> bool: + """Synchronous check if all documents are ready.""" + url = f"https://api.contextual.ai/v1/datastores/{datastore_id}/documents" + headers = {"Authorization": f"Bearer {self.api_key}"} + response = requests.get(url, headers=headers) + if response.status_code == 200: + data = response.json() + documents = data.get('documents', []) + return not any(doc.get('status') in ('processing', 'pending') for doc in documents) + return True + + async def _wait_for_documents_async(self, datastore_id: str, max_attempts: int = 20, interval: float = 30.0) -> bool: + """Asynchronously poll until documents are ready, exiting early if possible.""" + for attempt in range(max_attempts): + ready = await asyncio.to_thread(self._check_documents_ready, datastore_id) + if ready: + return True + await asyncio.sleep(interval) + print("Processing documents ...") + return True # give up but don't fail hard + + def _run(self, query: str, agent_id: str, datastore_id: Optional[str] = None) -> str: + if not agent_id: + raise ValueError("Agent ID is required to query the Contextual AI agent") + + if datastore_id: + ready = self._check_documents_ready(datastore_id) + if not ready: + try: + # If no running event loop, use asyncio.run + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + + if loop and loop.is_running(): + # Already inside an event loop + try: + import nest_asyncio + nest_asyncio.apply(loop) + loop.run_until_complete(self._wait_for_documents_async(datastore_id)) + except Exception as e: + print(f"Failed to apply nest_asyncio: {str(e)}") + else: + asyncio.run(self._wait_for_documents_async(datastore_id)) + else: + print("Warning: No datastore_id provided. Document status checking disabled.") + + try: + response = self.contextual_client.agents.query.create( + agent_id=agent_id, + messages=[{"role": "user", "content": query}] + ) + if hasattr(response, 'content'): + return response.content + elif hasattr(response, 'message'): + return response.message.content if hasattr(response.message, 'content') else str(response.message) + elif hasattr(response, 'messages') and len(response.messages) > 0: + last_message = response.messages[-1] + return last_message.content if hasattr(last_message, 'content') else str(last_message) + else: + return str(response) + except Exception as e: + return f"Error querying Contextual AI agent: {str(e)}" diff --git a/src/crewai_tools/tools/contextualai_rerank_tool/README.md b/src/crewai_tools/tools/contextualai_rerank_tool/README.md new file mode 100644 index 000000000..d8c8a9ed8 --- /dev/null +++ b/src/crewai_tools/tools/contextualai_rerank_tool/README.md @@ -0,0 +1,72 @@ +# ContextualAIRerankTool + +## Description +This tool is designed to integrate Contextual AI's enterprise-grade instruction-following reranker with CrewAI, enabling you to intelligently reorder documents based on relevance and custom criteria. Use this tool to enhance search result quality and document retrieval for RAG systems using Contextual AI's reranking models that understand context and follow specific instructions for optimal document ordering. + +## Installation +To incorporate this tool into your project, follow the installation instructions below: + +```shell +pip install 'crewai[tools]' contextual-client +``` + +**Note**: You'll need a Contextual AI API key. Sign up at [app.contextual.ai](https://app.contextual.ai) to get your free API key. + +## Example + +```python +from crewai_tools import ContextualAIRerankTool + +tool = ContextualAIRerankTool(api_key="your_api_key_here") + +result = tool._run( + query="financial performance and revenue metrics", + documents=[ + "Q1 report content with revenue data", + "Q2 report content with growth metrics", + "News article about market trends" + ], + instruction="Prioritize documents with specific financial metrics and quantitative data" +) +print(result) +``` + +The result will contain the document ranking. For example: +``` +Rerank Result: +{ + "results": [ + { + "index": 1, + "relevance_score": 0.88227631 + }, + { + "index": 0, + "relevance_score": 0.61159354 + }, + { + "index": 2, + "relevance_score": 0.28579462 + } + ] +} +``` + +## Parameters +- `api_key`: Your Contextual AI API key +- `query`: Search query for reranking +- `documents`: List of document texts to rerank +- `instruction`: Optional reranking instruction for custom criteria +- `metadata`: Optional metadata for each document +- `model`: Reranker model (default: "ctxl-rerank-en-v1-instruct") + +## Key Features +- **Instruction-Following Reranking**: Follows custom instructions for domain-specific document ordering +- **Metadata Integration**: Incorporates document metadata for enhanced ranking decisions + +## Use Cases +- Improve search result relevance in document collections +- Reorder documents by custom business criteria (recency, authority, relevance) +- Filter and prioritize documents for research and analysis workflows + +For more detailed information about Contextual AI's capabilities, visit the [official documentation](https://docs.contextual.ai). \ No newline at end of file diff --git a/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py b/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py new file mode 100644 index 000000000..c0bcab8a2 --- /dev/null +++ b/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py @@ -0,0 +1,68 @@ +from typing import Any, Optional, Type, List +from crewai.tools import BaseTool +from pydantic import BaseModel, Field + + +class ContextualAIRerankSchema(BaseModel): + """Schema for contextual rerank tool.""" + query: str = Field(..., description="The search query to rerank documents against") + documents: List[str] = Field(..., description="List of document texts to rerank") + instruction: Optional[str] = Field(default=None, description="Optional instruction for reranking behavior") + metadata: Optional[List[str]] = Field(default=None, description="Optional metadata for each document") + model: str = Field(default="ctxl-rerank-en-v1-instruct", description="Reranker model to use") + + +class ContextualAIRerankTool(BaseTool): + """Tool to rerank documents using Contextual AI's instruction-following reranker.""" + + name: str = "Contextual AI Document Reranker" + description: str = "Rerank documents using Contextual AI's instruction-following reranker" + args_schema: Type[BaseModel] = ContextualAIRerankSchema + + api_key: str + package_dependencies: List[str] = ["contextual-client"] + + def _run( + self, + query: str, + documents: List[str], + instruction: Optional[str] = None, + metadata: Optional[List[str]] = None, + model: str = "ctxl-rerank-en-v1-instruct" + ) -> str: + """Rerank documents using Contextual AI's instruction-following reranker.""" + try: + import requests + import json + + base_url = "https://api.contextual.ai/v1" + headers = { + "accept": "application/json", + "content-type": "application/json", + "authorization": f"Bearer {self.api_key}" + } + + payload = { + "query": query, + "documents": documents, + "model": model + } + + if instruction: + payload["instruction"] = instruction + + if metadata: + if len(metadata) != len(documents): + raise ValueError("Metadata list must have the same length as documents list") + payload["metadata"] = metadata + + rerank_url = f"{base_url}/rerank" + result = requests.post(rerank_url, json=payload, headers=headers) + + if result.status_code != 200: + raise RuntimeError(f"Reranker API returned status {result.status_code}: {result.text}") + + return json.dumps(result.json(), indent=2) + + except Exception as e: + return f"Failed to rerank documents: {str(e)}" From 93b841fc86d52d600e8b54f88efcc28f024d5f8a Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Tue, 2 Sep 2025 11:46:18 -0300 Subject: [PATCH 380/391] Create tool for generating automations in Studio (#438) * Create tool for generating automations in Studio This commit creates a tool to use CrewAI Enterprise API to generate crews using CrewAI Studio. * Replace CREWAI_BASE_URL with CREWAI_PLUS_URL * Add missing /crewai_plus in URL --- src/crewai_tools/__init__.py | 7 +- src/crewai_tools/tools/__init__.py | 23 ++- .../generate_crewai_automation_tool/README.md | 50 +++++ .../generate_crewai_automation_tool.py | 70 +++++++ .../generate_crewai_automation_tool_test.py | 187 ++++++++++++++++++ 5 files changed, 326 insertions(+), 11 deletions(-) create mode 100644 src/crewai_tools/tools/generate_crewai_automation_tool/README.md create mode 100644 src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py create mode 100644 tests/tools/generate_crewai_automation_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 4886dbc57..f4c03ba0e 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -12,16 +12,16 @@ from .tools import ( ApifyActorsTool, ArxivPaperTool, BraveSearchTool, - BrightDataWebUnlockerTool, - BrightDataSearchTool, BrightDataDatasetTool, + BrightDataSearchTool, + BrightDataWebUnlockerTool, BrowserbaseLoadTool, CodeDocsSearchTool, CodeInterpreterTool, ComposioTool, - ContextualAIQueryTool, ContextualAICreateAgentTool, ContextualAIParseTool, + ContextualAIQueryTool, ContextualAIRerankTool, CouchbaseFTSVectorSearchTool, CrewaiEnterpriseTools, @@ -38,6 +38,7 @@ from .tools import ( FirecrawlCrawlWebsiteTool, FirecrawlScrapeWebsiteTool, FirecrawlSearchTool, + GenerateCrewaiAutomationTool, GithubSearchTool, HyperbrowserLoadTool, InvokeCrewAIAutomationTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index 886c27ad1..bf1a166d9 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -2,13 +2,20 @@ from .ai_mind_tool.ai_mind_tool import AIMindTool from .apify_actors_tool.apify_actors_tool import ApifyActorsTool from .arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool from .brave_search_tool.brave_search_tool import BraveSearchTool +from .brightdata_tool import ( + BrightDataDatasetTool, + BrightDataSearchTool, + BrightDataWebUnlockerTool, +) from .browserbase_load_tool.browserbase_load_tool import BrowserbaseLoadTool from .code_docs_search_tool.code_docs_search_tool import CodeDocsSearchTool from .code_interpreter_tool.code_interpreter_tool import CodeInterpreterTool from .composio_tool.composio_tool import ComposioTool -from .contextualai_query_tool.contextual_query_tool import ContextualAIQueryTool -from .contextualai_create_agent_tool.contextual_create_agent_tool import ContextualAICreateAgentTool +from .contextualai_create_agent_tool.contextual_create_agent_tool import ( + ContextualAICreateAgentTool, +) from .contextualai_parse_tool.contextual_parse_tool import ContextualAIParseTool +from .contextualai_query_tool.contextual_query_tool import ContextualAIQueryTool from .contextualai_rerank_tool.contextual_rerank_tool import ContextualAIRerankTool from .couchbase_tool.couchbase_tool import CouchbaseFTSVectorSearchTool from .crewai_enterprise_tools.crewai_enterprise_tools import CrewaiEnterpriseTools @@ -29,9 +36,14 @@ from .firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import ( FirecrawlScrapeWebsiteTool, ) from .firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool +from .generate_crewai_automation_tool.generate_crewai_automation_tool import ( + GenerateCrewaiAutomationTool, +) from .github_search_tool.github_search_tool import GithubSearchTool from .hyperbrowser_load_tool.hyperbrowser_load_tool import HyperbrowserLoadTool -from .invoke_crewai_automation_tool.invoke_crewai_automation_tool import InvokeCrewAIAutomationTool +from .invoke_crewai_automation_tool.invoke_crewai_automation_tool import ( + InvokeCrewAIAutomationTool, +) from .json_search_tool.json_search_tool import JSONSearchTool from .linkup.linkup_search_tool import LinkupSearchTool from .llamaindex_tool.llamaindex_tool import LlamaIndexTool @@ -108,9 +120,4 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( YoutubeChannelSearchTool, ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool -from .brightdata_tool import ( - BrightDataDatasetTool, - BrightDataSearchTool, - BrightDataWebUnlockerTool -) from .zapier_action_tool.zapier_action_tool import ZapierActionTools diff --git a/src/crewai_tools/tools/generate_crewai_automation_tool/README.md b/src/crewai_tools/tools/generate_crewai_automation_tool/README.md new file mode 100644 index 000000000..c70741ca4 --- /dev/null +++ b/src/crewai_tools/tools/generate_crewai_automation_tool/README.md @@ -0,0 +1,50 @@ +# GenerateCrewaiAutomationTool + +## Description + +The GenerateCrewaiAutomationTool integrates with CrewAI Studio API to generate complete CrewAI automations from natural language descriptions. It translates high-level requirements into functional CrewAI implementations and returns direct links to Studio projects. + +## Environment Variables + +Set your CrewAI Personal Access Token (CrewAI Enterprise > Settings > Account > Personal Access Token): + +```bash +export CREWAI_PERSONAL_ACCESS_TOKEN="your_personal_access_token_here" +export CREWAI_PLUS_URL="https://app.crewai.com" # optional +``` + +## Example + +```python +from crewai_tools import GenerateCrewaiAutomationTool +from crewai import Agent, Task, Crew + +# Initialize tool +tool = GenerateCrewaiAutomationTool() + +# Generate automation +result = tool.run( + prompt="Generate a CrewAI automation that scrapes websites and stores data in a database", + organization_id="org_123" # optional but recommended +) + +print(result) +# Output: Generated CrewAI Studio project URL: https://studio.crewai.com/project/abc123 + +# Use with agent +agent = Agent( + role="Automation Architect", + goal="Generate CrewAI automations", + backstory="Expert at creating automated workflows", + tools=[tool] +) + +task = Task( + description="Create a lead qualification automation", + agent=agent, + expected_output="Studio project URL" +) + +crew = Crew(agents=[agent], tasks=[task]) +result = crew.kickoff() +``` \ No newline at end of file diff --git a/src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py b/src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py new file mode 100644 index 000000000..3d52ae3fa --- /dev/null +++ b/src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py @@ -0,0 +1,70 @@ +import os +from typing import List, Optional, Type + +import requests +from crewai.tools import BaseTool, EnvVar +from pydantic import BaseModel, Field + + +class GenerateCrewaiAutomationToolSchema(BaseModel): + prompt: str = Field( + description="The prompt to generate the CrewAI automation, e.g. 'Generate a CrewAI automation that will scrape the website and store the data in a database.'" + ) + organization_id: Optional[str] = Field( + default=None, + description="The identifier for the CrewAI Enterprise organization. If not specified, a default organization will be used.", + ) + + +class GenerateCrewaiAutomationTool(BaseTool): + name: str = "Generate CrewAI Automation" + description: str = ( + "A tool that leverages CrewAI Studio's capabilities to automatically generate complete CrewAI " + "automations based on natural language descriptions. It translates high-level requirements into " + "functional CrewAI implementations." + ) + args_schema: Type[BaseModel] = GenerateCrewaiAutomationToolSchema + crewai_enterprise_url: str = Field( + default_factory=lambda: os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com"), + description="The base URL of CrewAI Enterprise. If not provided, it will be loaded from the environment variable CREWAI_PLUS_URL with default https://app.crewai.com.", + ) + personal_access_token: Optional[str] = Field( + default_factory=lambda: os.getenv("CREWAI_PERSONAL_ACCESS_TOKEN"), + description="The user's Personal Access Token to access CrewAI Enterprise API. If not provided, it will be loaded from the environment variable CREWAI_PERSONAL_ACCESS_TOKEN.", + ) + env_vars: List[EnvVar] = [ + EnvVar( + name="CREWAI_PERSONAL_ACCESS_TOKEN", + description="Personal Access Token for CrewAI Enterprise API", + required=True, + ), + EnvVar( + name="CREWAI_PLUS_URL", + description="Base URL for CrewAI Enterprise API", + required=False, + ), + ] + + def _run(self, **kwargs) -> str: + input_data = GenerateCrewaiAutomationToolSchema(**kwargs) + response = requests.post( + f"{self.crewai_enterprise_url}/crewai_plus/api/v1/studio", + headers=self._get_headers(input_data.organization_id), + json={"prompt": input_data.prompt}, + ) + + response.raise_for_status() + studio_project_url = response.json().get("url") + return f"Generated CrewAI Studio project URL: {studio_project_url}" + + def _get_headers(self, organization_id: Optional[str] = None) -> dict: + headers = { + "Authorization": f"Bearer {self.personal_access_token}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + if organization_id: + headers["X-Crewai-Organization-Id"] = organization_id + + return headers diff --git a/tests/tools/generate_crewai_automation_tool_test.py b/tests/tools/generate_crewai_automation_tool_test.py new file mode 100644 index 000000000..715e00804 --- /dev/null +++ b/tests/tools/generate_crewai_automation_tool_test.py @@ -0,0 +1,187 @@ +import os +from unittest.mock import MagicMock, patch + +import pytest +import requests + +from crewai_tools.tools.generate_crewai_automation_tool.generate_crewai_automation_tool import ( + GenerateCrewaiAutomationTool, + GenerateCrewaiAutomationToolSchema, +) + + +@pytest.fixture(autouse=True) +def mock_env(): + with patch.dict(os.environ, {"CREWAI_PERSONAL_ACCESS_TOKEN": "test_token"}): + os.environ.pop("CREWAI_PLUS_URL", None) + yield + + +@pytest.fixture +def tool(): + return GenerateCrewaiAutomationTool() + + +@pytest.fixture +def custom_url_tool(): + with patch.dict(os.environ, {"CREWAI_PLUS_URL": "https://custom.crewai.com"}): + return GenerateCrewaiAutomationTool() + + +def test_default_initialization(tool): + assert tool.crewai_enterprise_url == "https://app.crewai.com" + assert tool.personal_access_token == "test_token" + assert tool.name == "Generate CrewAI Automation" + + +def test_custom_base_url_from_environment(custom_url_tool): + assert custom_url_tool.crewai_enterprise_url == "https://custom.crewai.com" + + +def test_personal_access_token_from_environment(tool): + assert tool.personal_access_token == "test_token" + + +def test_valid_prompt_only(): + schema = GenerateCrewaiAutomationToolSchema( + prompt="Create a web scraping automation" + ) + assert schema.prompt == "Create a web scraping automation" + assert schema.organization_id is None + + +def test_valid_prompt_with_organization_id(): + schema = GenerateCrewaiAutomationToolSchema( + prompt="Create automation", organization_id="org-123" + ) + assert schema.prompt == "Create automation" + assert schema.organization_id == "org-123" + + +def test_empty_prompt_validation(): + schema = GenerateCrewaiAutomationToolSchema(prompt="") + assert schema.prompt == "" + assert schema.organization_id is None + + +@patch("requests.post") +def test_successful_generation_without_org_id(mock_post, tool): + mock_response = MagicMock() + mock_response.json.return_value = { + "url": "https://app.crewai.com/studio/project-123" + } + mock_post.return_value = mock_response + + result = tool.run(prompt="Create automation") + + assert ( + result + == "Generated CrewAI Studio project URL: https://app.crewai.com/studio/project-123" + ) + mock_post.assert_called_once_with( + "https://app.crewai.com/crewai_plus/api/v1/studio", + headers={ + "Authorization": "Bearer test_token", + "Content-Type": "application/json", + "Accept": "application/json", + }, + json={"prompt": "Create automation"}, + ) + + +@patch("requests.post") +def test_successful_generation_with_org_id(mock_post, tool): + mock_response = MagicMock() + mock_response.json.return_value = { + "url": "https://app.crewai.com/studio/project-456" + } + mock_post.return_value = mock_response + + result = tool.run(prompt="Create automation", organization_id="org-456") + + assert ( + result + == "Generated CrewAI Studio project URL: https://app.crewai.com/studio/project-456" + ) + mock_post.assert_called_once_with( + "https://app.crewai.com/crewai_plus/api/v1/studio", + headers={ + "Authorization": "Bearer test_token", + "Content-Type": "application/json", + "Accept": "application/json", + "X-Crewai-Organization-Id": "org-456", + }, + json={"prompt": "Create automation"}, + ) + + +@patch("requests.post") +def test_custom_base_url_usage(mock_post, custom_url_tool): + mock_response = MagicMock() + mock_response.json.return_value = { + "url": "https://custom.crewai.com/studio/project-789" + } + mock_post.return_value = mock_response + + custom_url_tool.run(prompt="Create automation") + + mock_post.assert_called_once_with( + "https://custom.crewai.com/crewai_plus/api/v1/studio", + headers={ + "Authorization": "Bearer test_token", + "Content-Type": "application/json", + "Accept": "application/json", + }, + json={"prompt": "Create automation"}, + ) + + +@patch("requests.post") +def test_api_error_response_handling(mock_post, tool): + mock_post.return_value.raise_for_status.side_effect = requests.HTTPError( + "400 Bad Request" + ) + + with pytest.raises(requests.HTTPError): + tool.run(prompt="Create automation") + + +@patch("requests.post") +def test_network_error_handling(mock_post, tool): + mock_post.side_effect = requests.ConnectionError("Network unreachable") + + with pytest.raises(requests.ConnectionError): + tool.run(prompt="Create automation") + + +@patch("requests.post") +def test_api_response_missing_url(mock_post, tool): + mock_response = MagicMock() + mock_response.json.return_value = {"status": "success"} + mock_post.return_value = mock_response + + result = tool.run(prompt="Create automation") + + assert result == "Generated CrewAI Studio project URL: None" + + +def test_authorization_header_construction(tool): + headers = tool._get_headers() + + assert headers["Authorization"] == "Bearer test_token" + assert headers["Content-Type"] == "application/json" + assert headers["Accept"] == "application/json" + assert "X-Crewai-Organization-Id" not in headers + + +def test_authorization_header_with_org_id(tool): + headers = tool._get_headers(organization_id="org-123") + + assert headers["Authorization"] == "Bearer test_token" + assert headers["X-Crewai-Organization-Id"] == "org-123" + + +def test_missing_personal_access_token(): + with patch.dict(os.environ, {}, clear=True): + tool = GenerateCrewaiAutomationTool() + assert tool.personal_access_token is None From 33241ef363d66dd8b2417b0200954e36fb20420a Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 2 Sep 2025 16:41:00 -0300 Subject: [PATCH 381/391] refactor: fetch & execute enterprise tool actions from platform (#437) * refactor: fetch enterprise tool actions from platform * chore: logging legacy token detected --- .../adapters/enterprise_adapter.py | 97 ++++++++++--------- tests/tools/crewai_enterprise_tools_test.py | 7 +- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index bd442d98f..3acf1e8e0 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -1,15 +1,19 @@ import os import json import requests -from typing import List, Any, Dict, Literal, Optional, Union, get_origin +import logging +from typing import List, Any, Dict, Literal, Optional, Union, get_origin, Type, cast from pydantic import Field, create_model from crewai.tools import BaseTool import re -# DEFAULTS -ENTERPRISE_ACTION_KIT_PROJECT_ID = "dd525517-df22-49d2-a69e-6a0eed211166" -ENTERPRISE_ACTION_KIT_PROJECT_URL = "https://worker-actionkit.tools.crewai.com/projects" +def get_enterprise_api_base_url() -> str: + """Get the enterprise API base URL from environment or use default.""" + base_url = os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com") + return f"{base_url}/crewai_plus/api/v1/integrations" + +ENTERPRISE_API_BASE_URL = get_enterprise_api_base_url() class EnterpriseActionTool(BaseTool): @@ -22,11 +26,8 @@ class EnterpriseActionTool(BaseTool): action_schema: Dict[str, Any] = Field( default={}, description="The schema of the action" ) - enterprise_action_kit_project_id: str = Field( - default=ENTERPRISE_ACTION_KIT_PROJECT_ID, description="The project id" - ) - enterprise_action_kit_project_url: str = Field( - default=ENTERPRISE_ACTION_KIT_PROJECT_URL, description="The project url" + enterprise_api_base_url: str = Field( + default=ENTERPRISE_API_BASE_URL, description="The base API URL" ) def __init__( @@ -36,8 +37,7 @@ class EnterpriseActionTool(BaseTool): enterprise_action_token: str, action_name: str, action_schema: Dict[str, Any], - enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, - enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, + enterprise_api_base_url: Optional[str] = None, ): self._model_registry = {} self._base_name = self._sanitize_name(name) @@ -86,11 +86,7 @@ class EnterpriseActionTool(BaseTool): self.enterprise_action_token = enterprise_action_token self.action_name = action_name self.action_schema = action_schema - - if enterprise_action_kit_project_id is not None: - self.enterprise_action_kit_project_id = enterprise_action_kit_project_id - if enterprise_action_kit_project_url is not None: - self.enterprise_action_kit_project_url = enterprise_action_kit_project_url + self.enterprise_api_base_url = enterprise_api_base_url or get_enterprise_api_base_url() def _sanitize_name(self, name: str) -> str: """Sanitize names to create proper Python class names.""" @@ -112,7 +108,7 @@ class EnterpriseActionTool(BaseTool): ) return schema_props, required - def _process_schema_type(self, schema: Dict[str, Any], type_name: str) -> type: + def _process_schema_type(self, schema: Dict[str, Any], type_name: str) -> Type[Any]: """Process a JSON schema and return appropriate Python type.""" if "anyOf" in schema: any_of_types = schema["anyOf"] @@ -122,7 +118,7 @@ class EnterpriseActionTool(BaseTool): if non_null_types: base_type = self._process_schema_type(non_null_types[0], type_name) return Optional[base_type] if is_nullable else base_type - return Optional[str] + return cast(Type[Any], Optional[str]) if "oneOf" in schema: return self._process_schema_type(schema["oneOf"][0], type_name) @@ -136,7 +132,7 @@ class EnterpriseActionTool(BaseTool): enum_values = schema["enum"] if not enum_values: return self._map_json_type_to_python(json_type) - return Literal[tuple(enum_values)] # type: ignore + return Literal[tuple(enum_values)] # type: ignore[return-value] if json_type == "array": items_schema = schema.get("items", {"type": "string"}) @@ -148,7 +144,7 @@ class EnterpriseActionTool(BaseTool): return self._map_json_type_to_python(json_type) - def _create_nested_model(self, schema: Dict[str, Any], model_name: str) -> type: + def _create_nested_model(self, schema: Dict[str, Any], model_name: str) -> Type[Any]: """Create a nested Pydantic model for complex objects.""" full_model_name = f"{self._base_name}{model_name}" @@ -187,7 +183,7 @@ class EnterpriseActionTool(BaseTool): return dict def _create_field_definition( - self, field_type: type, is_required: bool, description: str + self, field_type: Type[Any], is_required: bool, description: str ) -> tuple: """Create Pydantic field definition based on type and requirement.""" if is_required: @@ -201,7 +197,7 @@ class EnterpriseActionTool(BaseTool): Field(default=None, description=description), ) - def _map_json_type_to_python(self, json_type: str) -> type: + def _map_json_type_to_python(self, json_type: str) -> Type[Any]: """Map basic JSON schema types to Python types.""" type_mapping = { "string": str, @@ -246,12 +242,13 @@ class EnterpriseActionTool(BaseTool): if field_name not in cleaned_kwargs: cleaned_kwargs[field_name] = None - api_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" + + api_url = f"{self.enterprise_api_base_url}/actions/{self.action_name}/execute" headers = { "Authorization": f"Bearer {self.enterprise_action_token}", "Content-Type": "application/json", } - payload = {"action": self.action_name, "parameters": cleaned_kwargs} + payload = cleaned_kwargs response = requests.post( url=api_url, headers=headers, json=payload, timeout=60 @@ -274,40 +271,30 @@ class EnterpriseActionKitToolAdapter: def __init__( self, enterprise_action_token: str, - enterprise_action_kit_project_url: str = ENTERPRISE_ACTION_KIT_PROJECT_URL, - enterprise_action_kit_project_id: str = ENTERPRISE_ACTION_KIT_PROJECT_ID, + enterprise_api_base_url: Optional[str] = None, ): """Initialize the adapter with an enterprise action token.""" - self.enterprise_action_token = enterprise_action_token + self._set_enterprise_action_token(enterprise_action_token) self._actions_schema = {} self._tools = None - self.enterprise_action_kit_project_id = enterprise_action_kit_project_id - self.enterprise_action_kit_project_url = enterprise_action_kit_project_url + self.enterprise_api_base_url = enterprise_api_base_url or get_enterprise_api_base_url() def tools(self) -> List[BaseTool]: """Get the list of tools created from enterprise actions.""" if self._tools is None: self._fetch_actions() self._create_tools() - return self._tools + return self._tools or [] def _fetch_actions(self): """Fetch available actions from the API.""" try: - if ( - self.enterprise_action_token is None - or self.enterprise_action_token == "" - ): - self.enterprise_action_token = os.environ.get( - "CREWAI_ENTERPRISE_TOOLS_TOKEN" - ) - actions_url = f"{self.enterprise_action_kit_project_url}/{self.enterprise_action_kit_project_id}/actions" + actions_url = f"{self.enterprise_api_base_url}/actions" headers = {"Authorization": f"Bearer {self.enterprise_action_token}"} - params = {"format": "json_schema"} response = requests.get( - actions_url, headers=headers, params=params, timeout=30 + actions_url, headers=headers, timeout=30 ) response.raise_for_status() @@ -316,17 +303,22 @@ class EnterpriseActionKitToolAdapter: print(f"Unexpected API response structure: {raw_data}") return - # Parse the actions schema parsed_schema = {} action_categories = raw_data["actions"] - for category, action_list in action_categories.items(): + for integration_type, action_list in action_categories.items(): if isinstance(action_list, list): for action in action_list: - func_details = action.get("function") - if func_details and "name" in func_details: - action_name = func_details["name"] - parsed_schema[action_name] = action + action_name = action.get("name") + if action_name: + action_schema = { + "function": { + "name": action_name, + "description": action.get("description", f"Execute {action_name}"), + "parameters": action.get("parameters", {}) + } + } + parsed_schema[action_name] = action_schema self._actions_schema = parsed_schema @@ -408,14 +400,23 @@ class EnterpriseActionKitToolAdapter: action_name=action_name, action_schema=action_schema, enterprise_action_token=self.enterprise_action_token, - enterprise_action_kit_project_id=self.enterprise_action_kit_project_id, - enterprise_action_kit_project_url=self.enterprise_action_kit_project_url, + enterprise_api_base_url=self.enterprise_api_base_url, ) tools.append(tool) self._tools = tools + def _set_enterprise_action_token(self, enterprise_action_token: Optional[str]): + if enterprise_action_token and not enterprise_action_token.startswith("PK_"): + logging.warning( + "Legacy token detected, please consider using the new Enterprise Action Auth token. Check out our docs for more information https://docs.crewai.com/en/enterprise/features/integrations." + ) + + token = enterprise_action_token or os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") + + self.enterprise_action_token = token + def __enter__(self): return self.tools() diff --git a/tests/tools/crewai_enterprise_tools_test.py b/tests/tools/crewai_enterprise_tools_test.py index b043289dc..2e4f51ca9 100644 --- a/tests/tools/crewai_enterprise_tools_test.py +++ b/tests/tools/crewai_enterprise_tools_test.py @@ -281,10 +281,9 @@ class TestEnterpriseActionToolSchemaConversion(unittest.TestCase): call_args = mock_post.call_args payload = call_args[1]["json"] - self.assertEqual(payload["action"], "GMAIL_SEARCH_FOR_EMAIL") - self.assertIn("filterCriteria", payload["parameters"]) - self.assertIn("options", payload["parameters"]) - self.assertEqual(payload["parameters"]["filterCriteria"]["operation"], "OR") + self.assertIn("filterCriteria", payload) + self.assertIn("options", payload) + self.assertEqual(payload["filterCriteria"]["operation"], "OR") def test_model_naming_convention(self): """Test that generated model names follow proper conventions.""" From 47b64d3507c3be54c6f6360cb6c755bf37f15bd4 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Tue, 2 Sep 2025 17:28:22 -0300 Subject: [PATCH 382/391] fix: use explicit DeprecationWarning for legacy token detection (#440) --- src/crewai_tools/adapters/enterprise_adapter.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/crewai_tools/adapters/enterprise_adapter.py b/src/crewai_tools/adapters/enterprise_adapter.py index 3acf1e8e0..c4bfa35eb 100644 --- a/src/crewai_tools/adapters/enterprise_adapter.py +++ b/src/crewai_tools/adapters/enterprise_adapter.py @@ -1,7 +1,7 @@ import os import json import requests -import logging +import warnings from typing import List, Any, Dict, Literal, Optional, Union, get_origin, Type, cast from pydantic import Field, create_model from crewai.tools import BaseTool @@ -409,8 +409,10 @@ class EnterpriseActionKitToolAdapter: def _set_enterprise_action_token(self, enterprise_action_token: Optional[str]): if enterprise_action_token and not enterprise_action_token.startswith("PK_"): - logging.warning( - "Legacy token detected, please consider using the new Enterprise Action Auth token. Check out our docs for more information https://docs.crewai.com/en/enterprise/features/integrations." + warnings.warn( + "Legacy token detected, please consider using the new Enterprise Action Auth token. Check out our docs for more information https://docs.crewai.com/en/enterprise/features/integrations.", + DeprecationWarning, + stacklevel=2 ) token = enterprise_action_token or os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") From cb8a1da730f6623a43114c80a66520c871afe3c3 Mon Sep 17 00:00:00 2001 From: Tony Kipkemboi Date: Mon, 8 Sep 2025 10:53:06 -0400 Subject: [PATCH 383/391] feat(parallel): add ParallelSearchTool (Search API v1beta) (#445) * docs: add BUILDING_TOOLS.md * feat(parallel): add ParallelSearchTool (Search API v1beta), tests, README; register exports; regenerate tool.specs.json * test(parallel): replace URL substring assertion with hostname allowlist (CodeQL) --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 3 + .../tools/parallel_tools/README.md | 153 ++++++++++++++++++ .../tools/parallel_tools/__init__.py | 7 + .../parallel_tools/parallel_search_tool.py | 119 ++++++++++++++ tests/tools/parallel_search_tool_test.py | 47 ++++++ 6 files changed, 330 insertions(+) create mode 100644 src/crewai_tools/tools/parallel_tools/README.md create mode 100644 src/crewai_tools/tools/parallel_tools/__init__.py create mode 100644 src/crewai_tools/tools/parallel_tools/parallel_search_tool.py create mode 100644 tests/tools/parallel_search_tool_test.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index f4c03ba0e..27d259b31 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -93,4 +93,5 @@ from .tools import ( YoutubeChannelSearchTool, YoutubeVideoSearchTool, ZapierActionTools, + ParallelSearchTool, ) diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index bf1a166d9..ba1621456 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -121,3 +121,6 @@ from .youtube_channel_search_tool.youtube_channel_search_tool import ( ) from .youtube_video_search_tool.youtube_video_search_tool import YoutubeVideoSearchTool from .zapier_action_tool.zapier_action_tool import ZapierActionTools +from .parallel_tools import ( + ParallelSearchTool, +) diff --git a/src/crewai_tools/tools/parallel_tools/README.md b/src/crewai_tools/tools/parallel_tools/README.md new file mode 100644 index 000000000..37f413561 --- /dev/null +++ b/src/crewai_tools/tools/parallel_tools/README.md @@ -0,0 +1,153 @@ +# ParallelSearchTool + +Unified Parallel web search tool using the Parallel Search API (v1beta). Returns ranked results with compressed excerpts optimized for LLMs. + +- **Quickstart**: see the official docs: [Search API Quickstart](https://docs.parallel.ai/search-api/search-quickstart) +- **Processors**: guidance on `base` vs `pro`: [Processors](https://docs.parallel.ai/search-api/processors) + +## Why this tool + +- **Single-call pipeline**: Replaces search → scrape → extract with a single, low‑latency API call. +- **LLM‑ready**: Returns compressed excerpts that feed directly into LLM prompts (fewer tokens, less pre/post‑processing). +- **Flexible**: Control result count and excerpt length; optionally restrict sources via `source_policy`. + +## Environment + +- `PARALLEL_API_KEY` (required) + +Optional (for the agent example): +- `OPENAI_API_KEY` or other LLM provider keys supported by CrewAI + +## Parameters + +- `objective` (str, optional): Natural‑language research goal (≤ 5000 chars) +- `search_queries` (list[str], optional): Up to 5 keyword queries (each ≤ 200 chars) +- `processor` (str, default `base`): `base` (fast/low cost) or `pro` (freshness/quality) +- `max_results` (int, default 10): ≤ 40 (subject to processor limits) +- `max_chars_per_result` (int, default 6000): ≥ 100; values > 30000 not guaranteed +- `source_policy` (dict, optional): Source policy for domain inclusion/exclusion + +Notes: +- API is in beta; default rate limit is 600 RPM. Contact support for production capacity. + +## Direct usage (when published) + +```python +from crewai_tools import ParallelSearchTool + +tool = ParallelSearchTool() +resp_json = tool.run( + objective="When was the United Nations established? Prefer UN's websites.", + search_queries=["Founding year UN", "Year of founding United Nations"], + processor="base", + max_results=5, + max_chars_per_result=1500, +) +print(resp_json) # => {"search_id": ..., "results": [{"url", "title", "excerpts": [...]}, ...]} +``` + +### Parameters you can pass + +Call `run(...)` with any of the following (at least one of `objective` or `search_queries` is required): + +```python +tool.run( + objective: str | None = None, # ≤ 5000 chars + search_queries: list[str] | None = None, # up to 5 items, each ≤ 200 chars + processor: str = "base", # "base" (fast) or "pro" (freshness/quality) + max_results: int = 10, # ≤ 40 (processor limits apply) + max_chars_per_result: int = 6000, # ≥ 100 (values > 30000 not guaranteed) + source_policy: dict | None = None, # optional SourcePolicy config +) +``` + +Example with `source_policy`: + +```python +source_policy = { + "allow": {"domains": ["un.org"]}, + # "deny": {"domains": ["example.com"]}, # optional +} + +resp_json = tool.run( + objective="When was the United Nations established?", + processor="base", + max_results=5, + max_chars_per_result=1500, + source_policy=source_policy, +) +``` + +## Example with agents + +Here’s a minimal example that calls `ParallelSearchTool` to fetch sources and has an LLM produce a short, cited answer. + +```python +import os +from crewai import Agent, Task, Crew, LLM, Process +from crewai_tools import ParallelSearchTool + +# LLM +llm = LLM( + model="gemini/gemini-2.0-flash", + temperature=0.5, + api_key=os.getenv("GEMINI_API_KEY") +) + +# Parallel Search +search = ParallelSearchTool() + +# User query +query = "find all the recent concerns about AI evals? please cite the sources" + +# Researcher agent +researcher = Agent( + role="Web Researcher", + backstory="You are an expert web researcher", + goal="Find cited, high-quality sources and provide a brief answer.", + tools=[search], + llm=llm, + verbose=True, +) + +# Research task +task = Task( + description=f"Research the {query} and produce a short, cited answer.", + expected_output="A concise, sourced answer to the question. The answer should be in this format: [query]: [answer] - [source]", + agent=researcher, + output_file="answer.mdx", +) + +# Crew +crew = Crew( + agents=[researcher], + tasks=[task], + verbose=True, + process=Process.sequential, +) + +# Run the crew +result = crew.kickoff(inputs={'query': query}) +print(result) +``` + +Output from the agent above: + +```md +Recent concerns about AI evaluations include: the rise of AI-related incidents alongside a lack of standardized Responsible AI (RAI) evaluations among major industrial model developers - [https://hai.stanford.edu/ai-index/2025-ai-index-report]; flawed benchmark datasets that fail to account for critical factors, leading to unrealistic estimates of AI model abilities - [https://www.nature.com/articles/d41586-025-02462-5]; the need for multi-metric, context-aware evaluations in medical imaging AI to ensure reliability and clinical relevance - [https://www.sciencedirect.com/science/article/pii/S3050577125000283]; challenges related to data sets (insufficient, imbalanced, or poor quality), communication gaps, and misaligned expectations in AI model training - [https://www.oracle.com/artificial-intelligence/ai-model-training-challenges/]; the argument that LLM agents should be evaluated primarily on their riskiness, not just performance, due to unreliability, hallucinations, and brittleness - [https://www.technologyreview.com/2025/06/24/1119187/fix-ai-evaluation-crisis/]; the fact that the AI industry's embraced benchmarks may be close to meaningless, with top makers of AI models picking and choosing different responsible AI benchmarks, complicating efforts to systematically compare risks and limitations - [https://themarkup.org/artificial-intelligence/2024/07/17/everyone-is-judging-ai-by-these-tests-but-experts-say-theyre-close-to-meaningless]; and the difficulty of building robust and reliable model evaluations, as many existing evaluation suites are limited in their ability to serve as accurate indicators of model capabilities or safety - [https://www.anthropic.com/research/evaluating-ai-systems]. +``` + +Tips: +- Ensure your LLM provider keys are set (e.g., `GEMINI_API_KEY`) and CrewAI model config is in place. +- For longer analyses, raise `max_chars_per_result` or use `processor="pro"` (higher quality, higher latency). + +## Behavior + +- Single‑request web research; no scraping/post‑processing required. +- Returns `search_id` and ranked `results` with compressed `excerpts`. +- Clear error handling on HTTP/timeouts. + +## References + +- Search API Quickstart: https://docs.parallel.ai/search-api/search-quickstart +- Processors: https://docs.parallel.ai/search-api/processors diff --git a/src/crewai_tools/tools/parallel_tools/__init__.py b/src/crewai_tools/tools/parallel_tools/__init__.py new file mode 100644 index 000000000..579dc8941 --- /dev/null +++ b/src/crewai_tools/tools/parallel_tools/__init__.py @@ -0,0 +1,7 @@ +from .parallel_search_tool import ParallelSearchTool + +__all__ = [ + "ParallelSearchTool", +] + + diff --git a/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py b/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py new file mode 100644 index 000000000..d695bac9d --- /dev/null +++ b/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py @@ -0,0 +1,119 @@ +import os +from typing import Any, Dict, List, Optional, Type, Annotated + +import requests +from crewai.tools import BaseTool, EnvVar +from pydantic import BaseModel, Field + + +class ParallelSearchInput(BaseModel): + """Input schema for ParallelSearchTool using the Search API (v1beta). + + At least one of objective or search_queries is required. + """ + + objective: Optional[str] = Field( + None, + description="Natural-language goal for the web research (<=5000 chars)", + max_length=5000, + ) + search_queries: Optional[List[Annotated[str, Field(max_length=200)]]] = Field( + default=None, + description="Optional list of keyword queries (<=5 items, each <=200 chars)", + min_length=1, + max_length=5, + ) + processor: str = Field( + default="base", + description="Search processor: 'base' (fast/low cost) or 'pro' (higher quality/freshness)", + pattern=r"^(base|pro)$", + ) + max_results: int = Field( + default=10, + ge=1, + le=40, + description="Maximum number of search results to return (processor limits apply)", + ) + max_chars_per_result: int = Field( + default=6000, + ge=100, + description="Maximum characters per result excerpt (values >30000 not guaranteed)", + ) + source_policy: Optional[Dict[str, Any]] = Field( + default=None, description="Optional source policy configuration" + ) + + +class ParallelSearchTool(BaseTool): + name: str = "Parallel Web Search Tool" + description: str = ( + "Search the web using Parallel's Search API (v1beta). Returns ranked results with " + "compressed excerpts optimized for LLMs." + ) + args_schema: Type[BaseModel] = ParallelSearchInput + + env_vars: List[EnvVar] = [ + EnvVar( + name="PARALLEL_API_KEY", + description="API key for Parallel", + required=True, + ), + ] + package_dependencies: List[str] = ["requests"] + + search_url: str = "https://api.parallel.ai/v1beta/search" + + def _run( + self, + objective: Optional[str] = None, + search_queries: Optional[List[str]] = None, + processor: str = "base", + max_results: int = 10, + max_chars_per_result: int = 6000, + source_policy: Optional[Dict[str, Any]] = None, + **_: Any, + ) -> str: + api_key = os.environ.get("PARALLEL_API_KEY") + if not api_key: + return "Error: PARALLEL_API_KEY environment variable is required" + + if not objective and not search_queries: + return "Error: Provide at least one of 'objective' or 'search_queries'" + + headers = { + "x-api-key": api_key, + "Content-Type": "application/json", + } + + try: + payload: Dict[str, Any] = { + "processor": processor, + "max_results": max_results, + "max_chars_per_result": max_chars_per_result, + } + if objective is not None: + payload["objective"] = objective + if search_queries is not None: + payload["search_queries"] = search_queries + if source_policy is not None: + payload["source_policy"] = source_policy + + request_timeout = 90 if processor == "pro" else 30 + resp = requests.post(self.search_url, json=payload, headers=headers, timeout=request_timeout) + if resp.status_code >= 300: + return f"Parallel Search API error: {resp.status_code} {resp.text[:200]}" + data = resp.json() + return self._format_output(data) + except requests.Timeout: + return "Parallel Search API timeout. Please try again later." + except Exception as exc: # noqa: BLE001 + return f"Unexpected error calling Parallel Search API: {exc}" + + def _format_output(self, result: Dict[str, Any]) -> str: + # Return the full JSON payload (search_id + results) as a compact JSON string + try: + import json + + return json.dumps(result or {}, ensure_ascii=False) + except Exception: + return str(result or {}) diff --git a/tests/tools/parallel_search_tool_test.py b/tests/tools/parallel_search_tool_test.py new file mode 100644 index 000000000..0d4df60a7 --- /dev/null +++ b/tests/tools/parallel_search_tool_test.py @@ -0,0 +1,47 @@ +import os +import json +from urllib.parse import urlparse +from unittest.mock import patch + +import pytest + +from crewai_tools.tools.parallel_tools.parallel_search_tool import ( + ParallelSearchTool, +) + + +def test_requires_env_var(monkeypatch): + monkeypatch.delenv("PARALLEL_API_KEY", raising=False) + tool = ParallelSearchTool() + result = tool.run(objective="test") + assert "PARALLEL_API_KEY" in result + + +@patch("crewai_tools.tools.parallel_tools.parallel_search_tool.requests.post") +def test_happy_path(mock_post, monkeypatch): + monkeypatch.setenv("PARALLEL_API_KEY", "test") + + mock_post.return_value.status_code = 200 + mock_post.return_value.json.return_value = { + "search_id": "search_123", + "results": [ + { + "url": "https://www.un.org/en/about-us/history-of-the-un", + "title": "History of the United Nations", + "excerpts": [ + "Four months after the San Francisco Conference ended, the United Nations officially began, on 24 October 1945..." + ], + } + ], + } + + tool = ParallelSearchTool() + result = tool.run(objective="When was the UN established?", search_queries=["Founding year UN"]) + data = json.loads(result) + assert "search_id" in data + urls = [r.get("url", "") for r in data.get("results", [])] + # Validate host against allowed set instead of substring matching + allowed_hosts = {"www.un.org", "un.org"} + assert any(urlparse(u).netloc in allowed_hosts for u in urls) + + From 6f2301c945bb7f856a6913606fd01b65e256d62d Mon Sep 17 00:00:00 2001 From: Vini Brasil Date: Wed, 10 Sep 2025 09:38:14 -0300 Subject: [PATCH 384/391] Clarify tool support for both local and remote URLs (#447) This commit updates tool prompts to explicitly highlight that some tools can accept both local file paths and remote URLs. The improved prompts ensure LLMs understand they may pass remote resources. --- src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | 4 ++-- .../tools/docx_search_tool/docx_search_tool.py | 2 +- .../tools/json_search_tool/json_search_tool.py | 7 ++++--- src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py | 4 ++-- src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py | 6 +++--- src/crewai_tools/tools/txt_search_tool/txt_search_tool.py | 2 +- src/crewai_tools/tools/xml_search_tool/xml_search_tool.py | 5 ++--- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 4567df201..05572d8bb 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Optional, Type from embedchain.models.data_type import DataType from pydantic import BaseModel, Field @@ -18,7 +18,7 @@ class FixedCSVSearchToolSchema(BaseModel): class CSVSearchToolSchema(FixedCSVSearchToolSchema): """Input for CSVSearchTool.""" - csv: str = Field(..., description="Mandatory csv path you want to search") + csv: str = Field(..., description="File path or URL of a CSV file to be searched") class CSVSearchTool(RagTool): diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index cdd76c29d..a9d2c9610 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -10,7 +10,7 @@ class FixedDOCXSearchToolSchema(BaseModel): """Input for DOCXSearchTool.""" docx: Optional[str] = Field( - ..., description="Mandatory docx path you want to search" + ..., description="File path or URL of a DOCX file to be searched" ) search_query: str = Field( ..., diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 5d832c6b9..820323eec 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -1,6 +1,5 @@ -from typing import Any, Optional, Type +from typing import Optional, Type -from embedchain.models.data_type import DataType from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -18,7 +17,9 @@ class FixedJSONSearchToolSchema(BaseModel): class JSONSearchToolSchema(FixedJSONSearchToolSchema): """Input for JSONSearchTool.""" - json_path: str = Field(..., description="Mandatory json path you want to search") + json_path: str = Field( + ..., description="File path or URL of a JSON file to be searched" + ) class JSONSearchTool(RagTool): diff --git a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py index dfab255b0..14afe5db7 100644 --- a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Type +from typing import Optional, Type from embedchain.models.data_type import DataType from pydantic import BaseModel, Field @@ -18,7 +18,7 @@ class FixedMDXSearchToolSchema(BaseModel): class MDXSearchToolSchema(FixedMDXSearchToolSchema): """Input for MDXSearchTool.""" - mdx: str = Field(..., description="Mandatory mdx path you want to search") + mdx: str = Field(..., description="File path or URL of a MDX file to be searched") class MDXSearchTool(RagTool): diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index d56219785..ad0b8f8d3 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -1,7 +1,7 @@ -from typing import Any, Optional, Type +from typing import Optional, Type from embedchain.models.data_type import DataType -from pydantic import BaseModel, Field, model_validator +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -17,7 +17,7 @@ class FixedPDFSearchToolSchema(BaseModel): class PDFSearchToolSchema(FixedPDFSearchToolSchema): """Input for PDFSearchTool.""" - pdf: str = Field(..., description="Mandatory pdf path you want to search") + pdf: str = Field(..., description="File path or URL of a PDF file to be searched") class PDFSearchTool(RagTool): diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index ebbde1223..93d696ab1 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -17,7 +17,7 @@ class FixedTXTSearchToolSchema(BaseModel): class TXTSearchToolSchema(FixedTXTSearchToolSchema): """Input for TXTSearchTool.""" - txt: str = Field(..., description="Mandatory txt path you want to search") + txt: str = Field(..., description="File path or URL of a TXT file to be searched") class TXTSearchTool(RagTool): diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 2e0d26a88..426b0ca38 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -1,6 +1,5 @@ -from typing import Any, Optional, Type +from typing import Optional, Type -from embedchain.models.data_type import DataType from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -18,7 +17,7 @@ class FixedXMLSearchToolSchema(BaseModel): class XMLSearchToolSchema(FixedXMLSearchToolSchema): """Input for XMLSearchTool.""" - xml: str = Field(..., description="Mandatory xml path you want to search") + xml: str = Field(..., description="File path or URL of a XML file to be searched") class XMLSearchTool(RagTool): From f9925887aa944930115136919f1e329c3b8c0c33 Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 12 Sep 2025 13:04:26 -0300 Subject: [PATCH 385/391] Add CrewAIPlatformTools (#449) * chore: add deprecation warning in CrewaiEnterpriseTools * feat: add CrewAI Platform Tool * feat: drop support to oldest env-var token --- src/crewai_tools/__init__.py | 1 + src/crewai_tools/tools/__init__.py | 1 + .../crewai_enterprise_tools.py | 7 + .../tools/crewai_platform_tools/__init__.py | 16 ++ .../crewai_platform_action_tool.py | 233 ++++++++++++++++++ .../crewai_platform_tool_builder.py | 135 ++++++++++ .../crewai_platform_tools.py | 28 +++ .../tools/crewai_platform_tools/misc.py | 13 + .../test_crewai_platform_action_tool.py | 165 +++++++++++++ .../test_crewai_platform_tool_builder.py | 223 +++++++++++++++++ .../test_crewai_platform_tools.py | 95 +++++++ 11 files changed, 917 insertions(+) create mode 100644 src/crewai_tools/tools/crewai_platform_tools/__init__.py create mode 100644 src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py create mode 100644 src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py create mode 100644 src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py create mode 100644 src/crewai_tools/tools/crewai_platform_tools/misc.py create mode 100644 tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py create mode 100644 tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py create mode 100644 tests/tools/crewai_platform_tools/test_crewai_platform_tools.py diff --git a/src/crewai_tools/__init__.py b/src/crewai_tools/__init__.py index 27d259b31..85fe5ed6e 100644 --- a/src/crewai_tools/__init__.py +++ b/src/crewai_tools/__init__.py @@ -25,6 +25,7 @@ from .tools import ( ContextualAIRerankTool, CouchbaseFTSVectorSearchTool, CrewaiEnterpriseTools, + CrewaiPlatformTools, CSVSearchTool, DallETool, DatabricksQueryTool, diff --git a/src/crewai_tools/tools/__init__.py b/src/crewai_tools/tools/__init__.py index ba1621456..2b0bb968a 100644 --- a/src/crewai_tools/tools/__init__.py +++ b/src/crewai_tools/tools/__init__.py @@ -19,6 +19,7 @@ from .contextualai_query_tool.contextual_query_tool import ContextualAIQueryTool from .contextualai_rerank_tool.contextual_rerank_tool import ContextualAIRerankTool from .couchbase_tool.couchbase_tool import CouchbaseFTSVectorSearchTool from .crewai_enterprise_tools.crewai_enterprise_tools import CrewaiEnterpriseTools +from .crewai_platform_tools.crewai_platform_tools import CrewaiPlatformTools from .csv_search_tool.csv_search_tool import CSVSearchTool from .dalle_tool.dalle_tool import DallETool from .databricks_query_tool.databricks_query_tool import DatabricksQueryTool diff --git a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py index 0a56dee67..f5ac47643 100644 --- a/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py +++ b/src/crewai_tools/tools/crewai_enterprise_tools/crewai_enterprise_tools.py @@ -33,6 +33,13 @@ def CrewaiEnterpriseTools( A ToolCollection of BaseTool instances for enterprise actions """ + import warnings + warnings.warn( + "CrewaiEnterpriseTools will be removed in v1.0.0. Considering use `Agent(apps=[...])` instead.", + DeprecationWarning, + stacklevel=2 + ) + if enterprise_token is None or enterprise_token == "": enterprise_token = os.environ.get("CREWAI_ENTERPRISE_TOOLS_TOKEN") if not enterprise_token: diff --git a/src/crewai_tools/tools/crewai_platform_tools/__init__.py b/src/crewai_tools/tools/crewai_platform_tools/__init__.py new file mode 100644 index 000000000..55db598c5 --- /dev/null +++ b/src/crewai_tools/tools/crewai_platform_tools/__init__.py @@ -0,0 +1,16 @@ +"""CrewAI Platform Tools + +This module provides tools for integrating with various platform applications +through the CrewAI platform API. +""" + +from crewai_tools.tools.crewai_platform_tools.crewai_platform_tools import CrewaiPlatformTools +from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import CrewAIPlatformActionTool +from crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder import CrewaiPlatformToolBuilder + + +__all__ = [ + "CrewaiPlatformTools", + "CrewAIPlatformActionTool", + "CrewaiPlatformToolBuilder", +] diff --git a/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py new file mode 100644 index 000000000..8df877408 --- /dev/null +++ b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py @@ -0,0 +1,233 @@ +""" +Crewai Enterprise Tools +""" +import re +import json +import requests +from typing import Dict, Any, List, Type, Optional, Union, get_origin, cast, Literal +from pydantic import Field, create_model +from crewai.tools import BaseTool +from crewai_tools.tools.crewai_platform_tools.misc import get_platform_api_base_url, get_platform_integration_token + + +class CrewAIPlatformActionTool(BaseTool): + action_name: str = Field(default="", description="The name of the action") + action_schema: Dict[str, Any] = Field( + default_factory=dict, description="The schema of the action" + ) + + def __init__( + self, + description: str, + action_name: str, + action_schema: Dict[str, Any], + ): + self._model_registry = {} + self._base_name = self._sanitize_name(action_name) + + schema_props, required = self._extract_schema_info(action_schema) + + field_definitions = {} + for param_name, param_details in schema_props.items(): + param_desc = param_details.get("description", "") + is_required = param_name in required + + try: + field_type = self._process_schema_type( + param_details, self._sanitize_name(param_name).title() + ) + except Exception as e: + field_type = str + + field_definitions[param_name] = self._create_field_definition( + field_type, is_required, param_desc + ) + + if field_definitions: + try: + args_schema = create_model( + f"{self._base_name}Schema", **field_definitions + ) + except Exception as e: + print(f"Warning: Could not create main schema model: {e}") + args_schema = create_model( + f"{self._base_name}Schema", + input_text=(str, Field(description="Input for the action")), + ) + else: + args_schema = create_model( + f"{self._base_name}Schema", + input_text=(str, Field(description="Input for the action")), + ) + + super().__init__(name=action_name.lower().replace(" ", "_"), description=description, args_schema=args_schema) + self.action_name = action_name + self.action_schema = action_schema + + def _sanitize_name(self, name: str) -> str: + name = name.lower().replace(" ", "_") + sanitized = re.sub(r"[^a-zA-Z0-9_]", "", name) + parts = sanitized.split("_") + return "".join(word.capitalize() for word in parts if word) + + def _extract_schema_info( + self, action_schema: Dict[str, Any] + ) -> tuple[Dict[str, Any], List[str]]: + schema_props = ( + action_schema.get("function", {}) + .get("parameters", {}) + .get("properties", {}) + ) + required = ( + action_schema.get("function", {}).get("parameters", {}).get("required", []) + ) + return schema_props, required + + def _process_schema_type(self, schema: Dict[str, Any], type_name: str) -> Type[Any]: + if "anyOf" in schema: + any_of_types = schema["anyOf"] + is_nullable = any(t.get("type") == "null" for t in any_of_types) + non_null_types = [t for t in any_of_types if t.get("type") != "null"] + + if non_null_types: + base_type = self._process_schema_type(non_null_types[0], type_name) + return Optional[base_type] if is_nullable else base_type + return cast(Type[Any], Optional[str]) + + if "oneOf" in schema: + return self._process_schema_type(schema["oneOf"][0], type_name) + + if "allOf" in schema: + return self._process_schema_type(schema["allOf"][0], type_name) + + json_type = schema.get("type", "string") + + if "enum" in schema: + enum_values = schema["enum"] + if not enum_values: + return self._map_json_type_to_python(json_type) + return Literal[tuple(enum_values)] + + if json_type == "array": + items_schema = schema.get("items", {"type": "string"}) + item_type = self._process_schema_type(items_schema, f"{type_name}Item") + return List[item_type] + + if json_type == "object": + return self._create_nested_model(schema, type_name) + + return self._map_json_type_to_python(json_type) + + def _create_nested_model(self, schema: Dict[str, Any], model_name: str) -> Type[Any]: + full_model_name = f"{self._base_name}{model_name}" + + if full_model_name in self._model_registry: + return self._model_registry[full_model_name] + + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + if not properties: + return dict + + field_definitions = {} + for prop_name, prop_schema in properties.items(): + prop_desc = prop_schema.get("description", "") + is_required = prop_name in required_fields + + try: + prop_type = self._process_schema_type( + prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}" + ) + except Exception as e: + prop_type = str + + field_definitions[prop_name] = self._create_field_definition( + prop_type, is_required, prop_desc + ) + + try: + nested_model = create_model(full_model_name, **field_definitions) + self._model_registry[full_model_name] = nested_model + return nested_model + except Exception as e: + print(f"Warning: Could not create nested model {full_model_name}: {e}") + return dict + + def _create_field_definition( + self, field_type: Type[Any], is_required: bool, description: str + ) -> tuple: + if is_required: + return (field_type, Field(description=description)) + else: + if get_origin(field_type) is Union: + return (field_type, Field(default=None, description=description)) + else: + return ( + Optional[field_type], + Field(default=None, description=description), + ) + + def _map_json_type_to_python(self, json_type: str) -> Type[Any]: + type_mapping = { + "string": str, + "integer": int, + "number": float, + "boolean": bool, + "array": list, + "object": dict, + "null": type(None), + } + return type_mapping.get(json_type, str) + + def _get_required_nullable_fields(self) -> List[str]: + schema_props, required = self._extract_schema_info(self.action_schema) + + required_nullable_fields = [] + for param_name in required: + param_details = schema_props.get(param_name, {}) + if self._is_nullable_type(param_details): + required_nullable_fields.append(param_name) + + return required_nullable_fields + + def _is_nullable_type(self, schema: Dict[str, Any]) -> bool: + if "anyOf" in schema: + return any(t.get("type") == "null" for t in schema["anyOf"]) + return schema.get("type") == "null" + + def _run(self, **kwargs) -> str: + try: + cleaned_kwargs = {} + for key, value in kwargs.items(): + if value is not None: + cleaned_kwargs[key] = value + + required_nullable_fields = self._get_required_nullable_fields() + + for field_name in required_nullable_fields: + if field_name not in cleaned_kwargs: + cleaned_kwargs[field_name] = None + + + api_url = f"{get_platform_api_base_url()}/actions/{self.action_name}/execute" + token = get_platform_integration_token() + headers = { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + } + payload = cleaned_kwargs + + response = requests.post( + url=api_url, headers=headers, json=payload, timeout=60 + ) + + data = response.json() + if not response.ok: + error_message = data.get("error", {}).get("message", json.dumps(data)) + return f"API request failed: {error_message}" + + return json.dumps(data, indent=2) + + except Exception as e: + return f"Error executing action {self.action_name}: {str(e)}" diff --git a/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py new file mode 100644 index 000000000..9a8feb94c --- /dev/null +++ b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py @@ -0,0 +1,135 @@ + +import requests +from typing import List, Any, Dict +from crewai.tools import BaseTool +from crewai_tools.tools.crewai_platform_tools.misc import get_platform_api_base_url, get_platform_integration_token +from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import CrewAIPlatformActionTool + + +class CrewaiPlatformToolBuilder: + def __init__( + self, + apps: list[str], + ): + self._apps = apps + self._actions_schema = {} + self._tools = None + + def tools(self) -> list[BaseTool]: + if self._tools is None: + self._fetch_actions() + self._create_tools() + return self._tools if self._tools is not None else [] + + def _fetch_actions(self): + actions_url = f"{get_platform_api_base_url()}/actions" + headers = {"Authorization": f"Bearer {get_platform_integration_token()}"} + + try: + response = requests.get( + actions_url, headers=headers, timeout=30, params={"apps": ",".join(self._apps)} + ) + response.raise_for_status() + except Exception as e: + return + + + raw_data = response.json() + + self._actions_schema = {} + action_categories = raw_data.get("actions", {}) + + for app, action_list in action_categories.items(): + if isinstance(action_list, list): + for action in action_list: + if action_name := action.get("name"): + action_schema = { + "function": { + "name": action_name, + "description": action.get("description", f"Execute {action_name}"), + "parameters": action.get("parameters", {}), + "app": app, + } + } + self._actions_schema[action_name] = action_schema + + def _generate_detailed_description( + self, schema: Dict[str, Any], indent: int = 0 + ) -> List[str]: + descriptions = [] + indent_str = " " * indent + + schema_type = schema.get("type", "string") + + if schema_type == "object": + properties = schema.get("properties", {}) + required_fields = schema.get("required", []) + + if properties: + descriptions.append(f"{indent_str}Object with properties:") + for prop_name, prop_schema in properties.items(): + prop_desc = prop_schema.get("description", "") + is_required = prop_name in required_fields + req_str = " (required)" if is_required else " (optional)" + descriptions.append( + f"{indent_str} - {prop_name}: {prop_desc}{req_str}" + ) + + if prop_schema.get("type") == "object": + descriptions.extend( + self._generate_detailed_description(prop_schema, indent + 2) + ) + elif prop_schema.get("type") == "array": + items_schema = prop_schema.get("items", {}) + if items_schema.get("type") == "object": + descriptions.append(f"{indent_str} Array of objects:") + descriptions.extend( + self._generate_detailed_description( + items_schema, indent + 3 + ) + ) + elif "enum" in items_schema: + descriptions.append( + f"{indent_str} Array of enum values: {items_schema['enum']}" + ) + elif "enum" in prop_schema: + descriptions.append( + f"{indent_str} Enum values: {prop_schema['enum']}" + ) + + return descriptions + + def _create_tools(self): + tools = [] + + for action_name, action_schema in self._actions_schema.items(): + function_details = action_schema.get("function", {}) + description = function_details.get("description", f"Execute {action_name}") + + parameters = function_details.get("parameters", {}) + param_descriptions = [] + + if parameters.get("properties"): + param_descriptions.append("\nDetailed Parameter Structure:") + param_descriptions.extend( + self._generate_detailed_description(parameters) + ) + + full_description = description + "\n".join(param_descriptions) + + tool = CrewAIPlatformActionTool( + description=full_description, + action_name=action_name, + action_schema=action_schema, + ) + + tools.append(tool) + + self._tools = tools + + + def __enter__(self): + return self.tools() + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py new file mode 100644 index 000000000..8bfa1073a --- /dev/null +++ b/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py @@ -0,0 +1,28 @@ +import re +import os +import typing as t +from typing import Literal +import logging +import json +from crewai.tools import BaseTool +from crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder import CrewaiPlatformToolBuilder +from crewai_tools.adapters.tool_collection import ToolCollection + +logger = logging.getLogger(__name__) + + + +def CrewaiPlatformTools( + apps: list[str], +) -> ToolCollection[BaseTool]: + """Factory function that returns crewai platform tools. + Args: + apps: List of platform apps to get tools that are available on the platform. + + Returns: + A list of BaseTool instances for platform actions + """ + + builder = CrewaiPlatformToolBuilder(apps=apps) + + return builder.tools() diff --git a/src/crewai_tools/tools/crewai_platform_tools/misc.py b/src/crewai_tools/tools/crewai_platform_tools/misc.py new file mode 100644 index 000000000..0839719d7 --- /dev/null +++ b/src/crewai_tools/tools/crewai_platform_tools/misc.py @@ -0,0 +1,13 @@ +import os + +def get_platform_api_base_url() -> str: + """Get the platform API base URL from environment or use default.""" + base_url = os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com") + return f"{base_url}/crewai_plus/api/v1/integrations" + +def get_platform_integration_token() -> str: + """Get the platform API base URL from environment or use default.""" + token = os.getenv("CREWAI_PLATFORM_INTEGRATION_TOKEN") or "" + if not token: + raise ValueError("No platform integration token found, please set the CREWAI_PLATFORM_INTEGRATION_TOKEN environment variable") + return token # TODO: Use context manager to get token diff --git a/tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py b/tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py new file mode 100644 index 000000000..c24237082 --- /dev/null +++ b/tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py @@ -0,0 +1,165 @@ + +import unittest +from unittest.mock import patch, Mock +import pytest +from crewai_tools.tools.crewai_platform_tools import CrewAIPlatformActionTool + + +class TestCrewAIPlatformActionTool(unittest.TestCase): + @pytest.fixture + def sample_action_schema(self): + return { + "function": { + "name": "test_action", + "description": "Test action for unit testing", + "parameters": { + "type": "object", + "properties": { + "message": { + "type": "string", + "description": "Message to send" + }, + "priority": { + "type": "integer", + "description": "Priority level" + } + }, + "required": ["message"] + } + } + } + + @pytest.fixture + def platform_action_tool(self, sample_action_schema): + return CrewAIPlatformActionTool( + description="Test Action Tool\nTest description", + action_name="test_action", + action_schema=sample_action_schema + ) + + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post") + def test_run_success(self, mock_post): + schema = { + "function": { + "name": "test_action", + "description": "Test action", + "parameters": { + "type": "object", + "properties": { + "message": {"type": "string", "description": "Message"} + }, + "required": ["message"] + } + } + } + + tool = CrewAIPlatformActionTool( + description="Test tool", + action_name="test_action", + action_schema=schema + ) + + mock_response = Mock() + mock_response.ok = True + mock_response.json.return_value = {"result": "success", "data": "test_data"} + mock_post.return_value = mock_response + + result = tool._run(message="test message") + + mock_post.assert_called_once() + _, kwargs = mock_post.call_args + + assert "test_action/execute" in kwargs["url"] + assert kwargs["headers"]["Authorization"] == "Bearer test_token" + assert kwargs["json"]["message"] == "test message" + assert "success" in result + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post") + def test_run_api_error(self, mock_post): + schema = { + "function": { + "name": "test_action", + "description": "Test action", + "parameters": { + "type": "object", + "properties": { + "message": {"type": "string", "description": "Message"} + }, + "required": ["message"] + } + } + } + + tool = CrewAIPlatformActionTool( + description="Test tool", + action_name="test_action", + action_schema=schema + ) + + mock_response = Mock() + mock_response.ok = False + mock_response.json.return_value = {"error": {"message": "Invalid request"}} + mock_post.return_value = mock_response + + result = tool._run(message="test message") + + assert "API request failed" in result + assert "Invalid request" in result + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post") + def test_run_exception(self, mock_post): + schema = { + "function": { + "name": "test_action", + "description": "Test action", + "parameters": { + "type": "object", + "properties": { + "message": {"type": "string", "description": "Message"} + }, + "required": ["message"] + } + } + } + + tool = CrewAIPlatformActionTool( + description="Test tool", + action_name="test_action", + action_schema=schema + ) + + mock_post.side_effect = Exception("Network error") + + result = tool._run(message="test message") + + assert "Error executing action test_action: Network error" in result + + def test_run_without_token(self): + schema = { + "function": { + "name": "test_action", + "description": "Test action", + "parameters": { + "type": "object", + "properties": { + "message": {"type": "string", "description": "Message"} + }, + "required": ["message"] + } + } + } + + tool = CrewAIPlatformActionTool( + description="Test tool", + action_name="test_action", + action_schema=schema + ) + + with patch.dict("os.environ", {}, clear=True): + result = tool._run(message="test message") + assert "Error executing action test_action:" in result + assert "No platform integration token found" in result diff --git a/tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py b/tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py new file mode 100644 index 000000000..e60be2e12 --- /dev/null +++ b/tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py @@ -0,0 +1,223 @@ +import unittest +from unittest.mock import patch, Mock +import pytest +from crewai_tools.tools.crewai_platform_tools import CrewaiPlatformToolBuilder, CrewAIPlatformActionTool + + +class TestCrewaiPlatformToolBuilder(unittest.TestCase): + @pytest.fixture + def platform_tool_builder(self): + """Create a CrewaiPlatformToolBuilder instance for testing""" + return CrewaiPlatformToolBuilder(apps=["github", "slack"]) + + @pytest.fixture + def mock_api_response(self): + return { + "actions": { + "github": [ + { + "name": "create_issue", + "description": "Create a GitHub issue", + "parameters": { + "type": "object", + "properties": { + "title": {"type": "string", "description": "Issue title"}, + "body": {"type": "string", "description": "Issue body"} + }, + "required": ["title"] + } + } + ], + "slack": [ + { + "name": "send_message", + "description": "Send a Slack message", + "parameters": { + "type": "object", + "properties": { + "channel": {"type": "string", "description": "Channel name"}, + "text": {"type": "string", "description": "Message text"} + }, + "required": ["channel", "text"] + } + } + ] + } + } + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") + def test_fetch_actions_success(self, mock_get): + mock_api_response = { + "actions": { + "github": [ + { + "name": "create_issue", + "description": "Create a GitHub issue", + "parameters": { + "type": "object", + "properties": { + "title": {"type": "string", "description": "Issue title"} + }, + "required": ["title"] + } + } + ] + } + } + + builder = CrewaiPlatformToolBuilder(apps=["github", "slack/send_message"]) + + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = mock_api_response + mock_get.return_value = mock_response + + builder._fetch_actions() + + mock_get.assert_called_once() + args, kwargs = mock_get.call_args + + assert "/actions" in args[0] + assert kwargs["headers"]["Authorization"] == "Bearer test_token" + assert kwargs["params"]["apps"] == "github,slack/send_message" + + assert "create_issue" in builder._actions_schema + assert builder._actions_schema["create_issue"]["function"]["name"] == "create_issue" + + def test_fetch_actions_no_token(self): + builder = CrewaiPlatformToolBuilder(apps=["github"]) + + with patch.dict("os.environ", {}, clear=True): + with self.assertRaises(ValueError) as context: + builder._fetch_actions() + assert "No platform integration token found" in str(context.exception) + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") + def test_create_tools(self, mock_get): + mock_api_response = { + "actions": { + "github": [ + { + "name": "create_issue", + "description": "Create a GitHub issue", + "parameters": { + "type": "object", + "properties": { + "title": {"type": "string", "description": "Issue title"} + }, + "required": ["title"] + } + } + ], + "slack": [ + { + "name": "send_message", + "description": "Send a Slack message", + "parameters": { + "type": "object", + "properties": { + "channel": {"type": "string", "description": "Channel name"} + }, + "required": ["channel"] + } + } + ] + } + } + + builder = CrewaiPlatformToolBuilder(apps=["github", "slack"]) + + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = mock_api_response + mock_get.return_value = mock_response + + tools = builder.tools() + + assert len(tools) == 2 + assert all(isinstance(tool, CrewAIPlatformActionTool) for tool in tools) + + tool_names = [tool.action_name for tool in tools] + assert "create_issue" in tool_names + assert "send_message" in tool_names + + github_tool = next((t for t in tools if t.action_name == "create_issue"), None) + slack_tool = next((t for t in tools if t.action_name == "send_message"), None) + + assert github_tool is not None + assert slack_tool is not None + assert "Create a GitHub issue" in github_tool.description + assert "Send a Slack message" in slack_tool.description + + def test_tools_caching(self): + builder = CrewaiPlatformToolBuilder(apps=["github"]) + + cached_tools = [] + + def mock_create_tools(): + builder._tools = cached_tools + + with patch.object(builder, '_fetch_actions') as mock_fetch, \ + patch.object(builder, '_create_tools', side_effect=mock_create_tools) as mock_create: + + tools1 = builder.tools() + assert mock_fetch.call_count == 1 + assert mock_create.call_count == 1 + + tools2 = builder.tools() + assert mock_fetch.call_count == 1 + assert mock_create.call_count == 1 + + assert tools1 is tools2 + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + def test_empty_apps_list(self): + builder = CrewaiPlatformToolBuilder(apps=[]) + + with patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") as mock_get: + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"actions": {}} + mock_get.return_value = mock_response + + tools = builder.tools() + + assert isinstance(tools, list) + assert len(tools) == 0 + + _, kwargs = mock_get.call_args + assert kwargs["params"]["apps"] == "" + + def test_detailed_description_generation(self): + builder = CrewaiPlatformToolBuilder(apps=["test"]) + + complex_schema = { + "type": "object", + "properties": { + "simple_string": {"type": "string", "description": "A simple string"}, + "nested_object": { + "type": "object", + "properties": { + "inner_prop": {"type": "integer", "description": "Inner property"} + }, + "description": "Nested object" + }, + "array_prop": { + "type": "array", + "items": {"type": "string"}, + "description": "Array of strings" + } + } + } + + descriptions = builder._generate_detailed_description(complex_schema) + + assert isinstance(descriptions, list) + assert len(descriptions) > 0 + + description_text = "\n".join(descriptions) + assert "simple_string" in description_text + assert "nested_object" in description_text + assert "array_prop" in description_text diff --git a/tests/tools/crewai_platform_tools/test_crewai_platform_tools.py b/tests/tools/crewai_platform_tools/test_crewai_platform_tools.py new file mode 100644 index 000000000..295c68745 --- /dev/null +++ b/tests/tools/crewai_platform_tools/test_crewai_platform_tools.py @@ -0,0 +1,95 @@ +import unittest +from unittest.mock import patch, Mock +from crewai_tools.tools.crewai_platform_tools import CrewaiPlatformTools + + +class TestCrewaiPlatformTools(unittest.TestCase): + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") + def test_crewai_platform_tools_basic(self, mock_get): + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"actions": {"github": []}} + mock_get.return_value = mock_response + + tools = CrewaiPlatformTools(apps=["github"]) + assert tools is not None + assert isinstance(tools, list) + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") + def test_crewai_platform_tools_multiple_apps(self, mock_get): + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = { + "actions": { + "github": [ + { + "name": "create_issue", + "description": "Create a GitHub issue", + "parameters": { + "type": "object", + "properties": { + "title": {"type": "string", "description": "Issue title"}, + "body": {"type": "string", "description": "Issue body"} + }, + "required": ["title"] + } + } + ], + "slack": [ + { + "name": "send_message", + "description": "Send a Slack message", + "parameters": { + "type": "object", + "properties": { + "channel": {"type": "string", "description": "Channel to send to"}, + "text": {"type": "string", "description": "Message text"} + }, + "required": ["channel", "text"] + } + } + ] + } + } + mock_get.return_value = mock_response + + tools = CrewaiPlatformTools(apps=["github", "slack"]) + assert tools is not None + assert isinstance(tools, list) + assert len(tools) == 2 + + mock_get.assert_called_once() + args, kwargs = mock_get.call_args + assert "apps=github,slack" in args[0] or kwargs.get("params", {}).get("apps") == "github,slack" + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + def test_crewai_platform_tools_empty_apps(self): + with patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") as mock_get: + mock_response = Mock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"actions": {}} + mock_get.return_value = mock_response + + tools = CrewaiPlatformTools(apps=[]) + assert tools is not None + assert isinstance(tools, list) + assert len(tools) == 0 + + @patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}) + @patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get") + def test_crewai_platform_tools_api_error_handling(self, mock_get): + mock_get.side_effect = Exception("API Error") + + tools = CrewaiPlatformTools(apps=["github"]) + assert tools is not None + assert isinstance(tools, list) + assert len(tools) == 0 + + def test_crewai_platform_tools_no_token(self): + with patch.dict("os.environ", {}, clear=True): + with self.assertRaises(ValueError) as context: + CrewaiPlatformTools(apps=["github"]) + assert "No platform integration token found" in str(context.exception) From 8d9cee45f2e1a73bac6a942868f9b470ad6023fe Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Fri, 12 Sep 2025 14:35:17 -0300 Subject: [PATCH 386/391] feat: attempt to make embedchain optional (#450) * fix: attempt to make embedchain optional * fix: drop pydantic_settings dependency * fix: ensure the package is importable without any extra dependency After making embedchain option many packages were unstalled which caused errors in some tools due to failing import directives --- .../adapters/embedchain_adapter.py | 15 ++++++++++--- .../adapters/pdf_embedchain_adapter.py | 15 ++++++++++--- .../brightdata_tool/brightdata_dataset.py | 22 +++++++++++-------- .../tools/brightdata_tool/brightdata_serp.py | 19 +++++++++------- .../brightdata_tool/brightdata_unlocker.py | 21 ++++++++++-------- .../code_docs_search_tool.py | 9 +++++++- .../tools/csv_search_tool/csv_search_tool.py | 9 +++++++- .../directory_search_tool.py | 9 +++++++- .../docx_search_tool/docx_search_tool.py | 9 +++++++- .../github_search_tool/github_search_tool.py | 13 ++++++++--- .../tools/mdx_search_tool/mdx_search_tool.py | 9 +++++++- .../mysql_search_tool/mysql_search_tool.py | 9 +++++++- src/crewai_tools/tools/nl2sql/nl2sql_tool.py | 15 +++++++++++-- .../tools/pdf_search_tool/pdf_search_tool.py | 9 +++++++- .../tools/pg_search_tool/pg_search_tool.py | 9 +++++++- src/crewai_tools/tools/rag/rag_tool.py | 6 ++++- .../scrape_element_from_website.py | 10 ++++++++- .../scrape_website_tool.py | 9 +++++++- .../website_search/website_search_tool.py | 9 +++++++- .../youtube_channel_search_tool.py | 7 +++++- .../youtube_video_search_tool.py | 9 +++++++- 21 files changed, 191 insertions(+), 51 deletions(-) diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py index 446aab96c..1e7b83c0b 100644 --- a/src/crewai_tools/adapters/embedchain_adapter.py +++ b/src/crewai_tools/adapters/embedchain_adapter.py @@ -1,14 +1,23 @@ from typing import Any -from embedchain import App - from crewai_tools.tools.rag.rag_tool import Adapter +try: + from embedchain import App + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + class EmbedchainAdapter(Adapter): - embedchain_app: App + embedchain_app: Any # Will be App when embedchain is available summarize: bool = False + def __init__(self, **data): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") + super().__init__(**data) + def query(self, question: str) -> str: result, sources = self.embedchain_app.query( question, citations=True, dry_run=(not self.summarize) diff --git a/src/crewai_tools/adapters/pdf_embedchain_adapter.py b/src/crewai_tools/adapters/pdf_embedchain_adapter.py index 12557c971..aa682c84f 100644 --- a/src/crewai_tools/adapters/pdf_embedchain_adapter.py +++ b/src/crewai_tools/adapters/pdf_embedchain_adapter.py @@ -1,15 +1,24 @@ from typing import Any, Optional -from embedchain import App - from crewai_tools.tools.rag.rag_tool import Adapter +try: + from embedchain import App + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + class PDFEmbedchainAdapter(Adapter): - embedchain_app: App + embedchain_app: Any # Will be App when embedchain is available summarize: bool = False src: Optional[str] = None + def __init__(self, **data): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") + super().__init__(**data) + def query(self, question: str) -> str: where = ( {"app_id": self.embedchain_app.config.id, "source": self.src} diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py b/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py index bd0dcc1c3..88ca65077 100644 --- a/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py @@ -5,15 +5,19 @@ from typing import Any, Dict, Optional, Type import aiohttp from crewai.tools import BaseTool from pydantic import BaseModel, Field -from pydantic_settings import BaseSettings -class BrightDataConfig(BaseSettings): +class BrightDataConfig(BaseModel): API_URL: str = "https://api.brightdata.com" DEFAULT_TIMEOUT: int = 600 DEFAULT_POLLING_INTERVAL: int = 1 - - class Config: - env_prefix = "BRIGHTDATA_" + + @classmethod + def from_env(cls): + return cls( + API_URL=os.environ.get("BRIGHTDATA_API_URL", "https://api.brightdata.com"), + DEFAULT_TIMEOUT=int(os.environ.get("BRIGHTDATA_DEFAULT_TIMEOUT", "600")), + DEFAULT_POLLING_INTERVAL=int(os.environ.get("BRIGHTDATA_DEFAULT_POLLING_INTERVAL", "1")) + ) class BrightDataDatasetToolException(Exception): """Exception raised for custom error in the application.""" @@ -48,10 +52,10 @@ class BrightDataDatasetToolSchema(BaseModel): default=None, description="Additional params if any" ) -config = BrightDataConfig() +config = BrightDataConfig.from_env() -BRIGHTDATA_API_URL = config.API_URL -timeout = config.DEFAULT_TIMEOUT +BRIGHTDATA_API_URL = config.API_URL +timeout = config.DEFAULT_TIMEOUT datasets = [ { @@ -532,7 +536,7 @@ class BrightDataDatasetTool(BaseTool): url = url or self.url zipcode = zipcode or self.zipcode additional_params = additional_params or self.additional_params - + if not dataset_type: raise ValueError("dataset_type is required either in constructor or method call") if not url: diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py b/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py index 3b1170713..ae197ce0f 100644 --- a/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py @@ -5,12 +5,15 @@ from typing import Any, Optional, Type import requests from crewai.tools import BaseTool from pydantic import BaseModel, Field -from pydantic_settings import BaseSettings -class BrightDataConfig(BaseSettings): - API_URL: str = "https://api.brightdata.com/request" - class Config: - env_prefix = "BRIGHTDATA_" +class BrightDataConfig(BaseModel): + API_URL: str = "https://api.brightdata.com/request" + + @classmethod + def from_env(cls): + return cls( + API_URL=os.environ.get("BRIGHTDATA_API_URL", "https://api.brightdata.com/request") + ) class BrightDataSearchToolSchema(BaseModel): """ @@ -73,7 +76,7 @@ class BrightDataSearchTool(BaseTool): name: str = "Bright Data SERP Search" description: str = "Tool to perform web search using Bright Data SERP API." args_schema: Type[BaseModel] = BrightDataSearchToolSchema - _config = BrightDataConfig() + _config = BrightDataConfig.from_env() base_url: str = "" api_key: str = "" zone: str = "" @@ -95,7 +98,7 @@ class BrightDataSearchTool(BaseTool): self.search_type = search_type self.device_type = device_type self.parse_results = parse_results - + self.api_key = os.getenv("BRIGHT_DATA_API_KEY") self.zone = os.getenv("BRIGHT_DATA_ZONE") if not self.api_key: @@ -136,7 +139,7 @@ class BrightDataSearchTool(BaseTool): device_type = device_type or self.device_type parse_results = parse_results if parse_results is not None else self.parse_results results_count = kwargs.get("results_count", "10") - + # Validate required parameters if not query: raise ValueError("query is required either in constructor or method call") diff --git a/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py b/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py index fb8c2fb07..27864cb97 100644 --- a/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py +++ b/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py @@ -4,12 +4,15 @@ from typing import Any, Optional, Type import requests from crewai.tools import BaseTool from pydantic import BaseModel, Field -from pydantic_settings import BaseSettings -class BrightDataConfig(BaseSettings): - API_URL: str = "https://api.brightdata.com/request" - class Config: - env_prefix = "BRIGHTDATA_" +class BrightDataConfig(BaseModel): + API_URL: str = "https://api.brightdata.com/request" + + @classmethod + def from_env(cls): + return cls( + API_URL=os.environ.get("BRIGHTDATA_API_URL", "https://api.brightdata.com/request") + ) class BrightDataUnlockerToolSchema(BaseModel): """ @@ -57,7 +60,7 @@ class BrightDataWebUnlockerTool(BaseTool): name: str = "Bright Data Web Unlocker Scraping" description: str = "Tool to perform web scraping using Bright Data Web Unlocker" args_schema: Type[BaseModel] = BrightDataUnlockerToolSchema - _config = BrightDataConfig() + _config = BrightDataConfig.from_env() base_url: str = "" api_key: str = "" zone: str = "" @@ -71,7 +74,7 @@ class BrightDataWebUnlockerTool(BaseTool): self.url = url self.format = format self.data_format = data_format - + self.api_key = os.getenv("BRIGHT_DATA_API_KEY") self.zone = os.getenv("BRIGHT_DATA_ZONE") if not self.api_key: @@ -83,10 +86,10 @@ class BrightDataWebUnlockerTool(BaseTool): url = url or self.url format = format or self.format data_format = data_format or self.data_format - + if not url: raise ValueError("url is required either in constructor or method call") - + payload = { "url": url, "zone": self.zone, diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 05711d7bc..155b4390d 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -37,6 +42,8 @@ class CodeDocsSearchTool(RagTool): self._generate_description() def add(self, docs_url: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(docs_url, data_type=DataType.DOCS_SITE) def _run( diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 05572d8bb..4be84efdd 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -1,6 +1,11 @@ from typing import Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -37,6 +42,8 @@ class CSVSearchTool(RagTool): self._generate_description() def add(self, csv: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(csv, data_type=DataType.CSV) def _run( diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 20d21731a..30fdd52cc 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -1,6 +1,11 @@ from typing import Optional, Type -from embedchain.loaders.directory_loader import DirectoryLoader +try: + from embedchain.loaders.directory_loader import DirectoryLoader + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -29,6 +34,8 @@ class DirectorySearchTool(RagTool): args_schema: Type[BaseModel] = DirectorySearchToolSchema def __init__(self, directory: Optional[str] = None, **kwargs): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) if directory is not None: self.add(directory) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index a9d2c9610..97dab02cd 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -43,6 +48,8 @@ class DOCXSearchTool(RagTool): self._generate_description() def add(self, docx: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(docx, data_type=DataType.DOCX) def _run( diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index 51fe4033c..afde4fe92 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -1,6 +1,11 @@ -from typing import List, Optional, Type +from typing import List, Optional, Type, Any + +try: + from embedchain.loaders.github import GithubLoader + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False -from embedchain.loaders.github import GithubLoader from pydantic import BaseModel, Field, PrivateAttr from ..rag.rag_tool import RagTool @@ -37,7 +42,7 @@ class GithubSearchTool(RagTool): default_factory=lambda: ["code", "repo", "pr", "issue"], description="Content types you want to be included search, options: [code, repo, pr, issue]", ) - _loader: GithubLoader | None = PrivateAttr(default=None) + _loader: Any | None = PrivateAttr(default=None) def __init__( self, @@ -45,6 +50,8 @@ class GithubSearchTool(RagTool): content_types: Optional[List[str]] = None, **kwargs, ): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) self._loader = GithubLoader(config={"token": self.gh_token}) diff --git a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py index 14afe5db7..807da62fe 100644 --- a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py @@ -1,8 +1,13 @@ from typing import Optional, Type -from embedchain.models.data_type import DataType from pydantic import BaseModel, Field +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from ..rag.rag_tool import RagTool @@ -37,6 +42,8 @@ class MDXSearchTool(RagTool): self._generate_description() def add(self, mdx: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(mdx, data_type=DataType.MDX) def _run( diff --git a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py index a472e1761..8c2c5ef5d 100644 --- a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Type -from embedchain.loaders.mysql import MySQLLoader +try: + from embedchain.loaders.mysql import MySQLLoader + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -22,6 +27,8 @@ class MySQLSearchTool(RagTool): db_uri: str = Field(..., description="Mandatory database URI") def __init__(self, table_name: str, **kwargs): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) kwargs["data_type"] = "mysql" kwargs["loader"] = MySQLLoader(config=dict(url=self.db_uri)) diff --git a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py index 786550ee7..f3d892d1a 100644 --- a/src/crewai_tools/tools/nl2sql/nl2sql_tool.py +++ b/src/crewai_tools/tools/nl2sql/nl2sql_tool.py @@ -2,8 +2,13 @@ from typing import Any, Type, Union from crewai.tools import BaseTool from pydantic import BaseModel, Field -from sqlalchemy import create_engine, text -from sqlalchemy.orm import sessionmaker + +try: + from sqlalchemy import create_engine, text + from sqlalchemy.orm import sessionmaker + SQLALCHEMY_AVAILABLE = True +except ImportError: + SQLALCHEMY_AVAILABLE = False class NL2SQLToolInput(BaseModel): @@ -25,6 +30,9 @@ class NL2SQLTool(BaseTool): args_schema: Type[BaseModel] = NL2SQLToolInput def model_post_init(self, __context: Any) -> None: + if not SQLALCHEMY_AVAILABLE: + raise ImportError("sqlalchemy is not installed. Please install it with `pip install crewai-tools[sqlalchemy]`") + data = {} tables = self._fetch_available_tables() @@ -58,6 +66,9 @@ class NL2SQLTool(BaseTool): return data def execute_sql(self, sql_query: str) -> Union[list, str]: + if not SQLALCHEMY_AVAILABLE: + raise ImportError("sqlalchemy is not installed. Please install it with `pip install crewai-tools[sqlalchemy]`") + engine = create_engine(self.db_uri) Session = sessionmaker(bind=engine) session = Session() diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index ad0b8f8d3..96f141c17 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -1,8 +1,13 @@ from typing import Optional, Type -from embedchain.models.data_type import DataType from pydantic import BaseModel, Field +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from ..rag.rag_tool import RagTool @@ -36,6 +41,8 @@ class PDFSearchTool(RagTool): self._generate_description() def add(self, pdf: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(pdf, data_type=DataType.PDF_FILE) def _run( diff --git a/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py index ec0207aa7..30e294944 100644 --- a/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Type -from embedchain.loaders.postgres import PostgresLoader +try: + from embedchain.loaders.postgres import PostgresLoader + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -22,6 +27,8 @@ class PGSearchTool(RagTool): db_uri: str = Field(..., description="Mandatory database URI") def __init__(self, table_name: str, **kwargs): + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) kwargs["data_type"] = "postgres" kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index f7e785bd7..1a9fad8b8 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -40,7 +40,11 @@ class RagTool(BaseTool): @model_validator(mode="after") def _set_default_adapter(self): if isinstance(self.adapter, RagTool._AdapterPlaceholder): - from embedchain import App + try: + from embedchain import App + except ImportError: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") + from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter with portalocker.Lock("crewai-rag-tool.lock", timeout=10): diff --git a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py index f1e215bf3..61f5d9c8c 100644 --- a/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py +++ b/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py @@ -2,10 +2,15 @@ import os from typing import Any, Optional, Type import requests -from bs4 import BeautifulSoup from crewai.tools import BaseTool from pydantic import BaseModel, Field +try: + from bs4 import BeautifulSoup + BEAUTIFULSOUP_AVAILABLE = True +except ImportError: + BEAUTIFULSOUP_AVAILABLE = False + class FixedScrapeElementFromWebsiteToolSchema(BaseModel): """Input for ScrapeElementFromWebsiteTool.""" @@ -61,6 +66,9 @@ class ScrapeElementFromWebsiteTool(BaseTool): self, **kwargs: Any, ) -> Any: + if not BEAUTIFULSOUP_AVAILABLE: + raise ImportError("beautifulsoup4 is not installed. Please install it with `pip install crewai-tools[beautifulsoup4]`") + website_url = kwargs.get("website_url", self.website_url) css_element = kwargs.get("css_element", self.css_element) page = requests.get( diff --git a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py index bfb371275..262e79a69 100644 --- a/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py +++ b/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py @@ -3,7 +3,11 @@ import re from typing import Any, Optional, Type import requests -from bs4 import BeautifulSoup +try: + from bs4 import BeautifulSoup + BEAUTIFULSOUP_AVAILABLE = True +except ImportError: + BEAUTIFULSOUP_AVAILABLE = False from crewai.tools import BaseTool from pydantic import BaseModel, Field @@ -40,6 +44,9 @@ class ScrapeWebsiteTool(BaseTool): **kwargs, ): super().__init__(**kwargs) + if not BEAUTIFULSOUP_AVAILABLE: + raise ImportError("beautifulsoup4 is not installed. Please install it with `pip install crewai-tools[beautifulsoup4]`") + if website_url is not None: self.website_url = website_url self.description = ( diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index b89af6656..9728b44db 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -39,6 +44,8 @@ class WebsiteSearchTool(RagTool): self._generate_description() def add(self, website: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(website, data_type=DataType.WEB_PAGE) def _run( diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 9de4b568f..6d16a708d 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index 639f1a266..b93cc6c29 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -1,6 +1,11 @@ from typing import Any, Optional, Type -from embedchain.models.data_type import DataType +try: + from embedchain.models.data_type import DataType + EMBEDCHAIN_AVAILABLE = True +except ImportError: + EMBEDCHAIN_AVAILABLE = False + from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool @@ -39,6 +44,8 @@ class YoutubeVideoSearchTool(RagTool): self._generate_description() def add(self, youtube_video_url: str) -> None: + if not EMBEDCHAIN_AVAILABLE: + raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO) def _run( From e29ca9ec282b9c20d0a8e5a969c33ebbedbd9d42 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Thu, 18 Sep 2025 19:02:22 -0400 Subject: [PATCH 387/391] feat: replace embedchain with native crewai adapter (#451) - Remove embedchain adapter; add crewai rag adapter and update all search tools - Add loaders: pdf, youtube (video & channel), github, docs site, mysql, postgresql - Add configurable similarity threshold, limit params, and embedding_model support - Improve chromadb compatibility (sanitize metadata, convert columns, fix chunking) - Fix xml encoding, Python 3.10 issues, and youtube url spoofing - Update crewai dependency and instructions; refresh uv.lock - Update tests for new rag adapter and search params --- .../adapters/crewai_rag_adapter.py | 215 ++++++++++++++++++ .../adapters/embedchain_adapter.py | 34 --- .../adapters/pdf_embedchain_adapter.py | 41 ---- src/crewai_tools/rag/chunkers/base_chunker.py | 10 +- src/crewai_tools/rag/data_types.py | 25 +- src/crewai_tools/rag/loaders/__init__.py | 6 + .../rag/loaders/docs_site_loader.py | 98 ++++++++ src/crewai_tools/rag/loaders/github_loader.py | 110 +++++++++ src/crewai_tools/rag/loaders/mysql_loader.py | 99 ++++++++ src/crewai_tools/rag/loaders/pdf_loader.py | 72 ++++++ .../rag/loaders/postgres_loader.py | 99 ++++++++ src/crewai_tools/rag/loaders/xml_loader.py | 2 +- .../rag/loaders/youtube_channel_loader.py | 141 ++++++++++++ .../rag/loaders/youtube_video_loader.py | 123 ++++++++++ src/crewai_tools/rag/misc.py | 25 ++ .../code_docs_search_tool.py | 12 +- .../tools/csv_search_tool/csv_search_tool.py | 13 +- .../directory_search_tool.py | 18 +- .../docx_search_tool/docx_search_tool.py | 12 +- .../github_search_tool/github_search_tool.py | 26 +-- .../json_search_tool/json_search_tool.py | 4 +- .../tools/mdx_search_tool/mdx_search_tool.py | 12 +- .../mysql_search_tool/mysql_search_tool.py | 17 +- .../tools/pdf_search_tool/pdf_search_tool.py | 13 +- .../tools/pg_search_tool/pg_search_tool.py | 17 +- src/crewai_tools/tools/rag/rag_tool.py | 175 ++++++++++++-- .../tools/txt_search_tool/txt_search_tool.py | 4 +- .../website_search/website_search_tool.py | 15 +- .../tools/xml_search_tool/xml_search_tool.py | 4 +- .../youtube_channel_search_tool.py | 11 +- .../youtube_video_search_tool.py | 12 +- tests/tools/rag/rag_tool_test.py | 75 +++--- tests/tools/test_search_tools.py | 54 +++-- 33 files changed, 1317 insertions(+), 277 deletions(-) create mode 100644 src/crewai_tools/adapters/crewai_rag_adapter.py delete mode 100644 src/crewai_tools/adapters/embedchain_adapter.py delete mode 100644 src/crewai_tools/adapters/pdf_embedchain_adapter.py create mode 100644 src/crewai_tools/rag/loaders/docs_site_loader.py create mode 100644 src/crewai_tools/rag/loaders/github_loader.py create mode 100644 src/crewai_tools/rag/loaders/mysql_loader.py create mode 100644 src/crewai_tools/rag/loaders/pdf_loader.py create mode 100644 src/crewai_tools/rag/loaders/postgres_loader.py create mode 100644 src/crewai_tools/rag/loaders/youtube_channel_loader.py create mode 100644 src/crewai_tools/rag/loaders/youtube_video_loader.py diff --git a/src/crewai_tools/adapters/crewai_rag_adapter.py b/src/crewai_tools/adapters/crewai_rag_adapter.py new file mode 100644 index 000000000..c2142ad4b --- /dev/null +++ b/src/crewai_tools/adapters/crewai_rag_adapter.py @@ -0,0 +1,215 @@ +"""Adapter for CrewAI's native RAG system.""" + +from typing import Any, TypedDict, TypeAlias +from typing_extensions import Unpack +from pathlib import Path +import hashlib + +from pydantic import Field, PrivateAttr +from crewai.rag.config.utils import get_rag_client +from crewai.rag.config.types import RagConfigType +from crewai.rag.types import BaseRecord, SearchResult +from crewai.rag.core.base_client import BaseClient +from crewai.rag.factory import create_client + +from crewai_tools.tools.rag.rag_tool import Adapter +from crewai_tools.rag.data_types import DataType +from crewai_tools.rag.misc import sanitize_metadata_for_chromadb +from crewai_tools.rag.chunkers.base_chunker import BaseChunker + +ContentItem: TypeAlias = str | Path | dict[str, Any] + +class AddDocumentParams(TypedDict, total=False): + """Parameters for adding documents to the RAG system.""" + data_type: DataType + metadata: dict[str, Any] + website: str + url: str + file_path: str | Path + github_url: str + youtube_url: str + directory_path: str | Path + + +class CrewAIRagAdapter(Adapter): + """Adapter that uses CrewAI's native RAG system. + + Supports custom vector database configuration through the config parameter. + """ + + collection_name: str = "default" + summarize: bool = False + similarity_threshold: float = 0.6 + limit: int = 5 + config: RagConfigType | None = None + _client: BaseClient | None = PrivateAttr(default=None) + + def model_post_init(self, __context: Any) -> None: + """Initialize the CrewAI RAG client after model initialization.""" + if self.config is not None: + self._client = create_client(self.config) + else: + self._client = get_rag_client() + self._client.get_or_create_collection(collection_name=self.collection_name) + + def query(self, question: str, similarity_threshold: float | None = None, limit: int | None = None) -> str: + """Query the knowledge base with a question. + + Args: + question: The question to ask + similarity_threshold: Minimum similarity score for results (default: 0.6) + limit: Maximum number of results to return (default: 5) + + Returns: + Relevant content from the knowledge base + """ + search_limit = limit if limit is not None else self.limit + search_threshold = similarity_threshold if similarity_threshold is not None else self.similarity_threshold + + results: list[SearchResult] = self._client.search( + collection_name=self.collection_name, + query=question, + limit=search_limit, + score_threshold=search_threshold + ) + + if not results: + return "No relevant content found." + + contents: list[str] = [] + for result in results: + content: str = result.get("content", "") + if content: + contents.append(content) + + return "\n\n".join(contents) + + def add(self, *args: ContentItem, **kwargs: Unpack[AddDocumentParams]) -> None: + """Add content to the knowledge base. + + This method handles various input types and converts them to documents + for the vector database. It supports the data_type parameter for + compatibility with existing tools. + + Args: + *args: Content items to add (strings, paths, or document dicts) + **kwargs: Additional parameters including data_type, metadata, etc. + """ + from crewai_tools.rag.data_types import DataTypes, DataType + from crewai_tools.rag.source_content import SourceContent + from crewai_tools.rag.base_loader import LoaderResult + import os + + documents: list[BaseRecord] = [] + data_type: DataType | None = kwargs.get("data_type") + base_metadata: dict[str, Any] = kwargs.get("metadata", {}) + + for arg in args: + source_ref: str + if isinstance(arg, dict): + source_ref = str(arg.get("source", arg.get("content", ""))) + else: + source_ref = str(arg) + + if not data_type: + data_type = DataTypes.from_content(source_ref) + + if data_type == DataType.DIRECTORY: + if not os.path.isdir(source_ref): + raise ValueError(f"Directory does not exist: {source_ref}") + + # Define binary and non-text file extensions to skip + binary_extensions = {'.pyc', '.pyo', '.png', '.jpg', '.jpeg', '.gif', + '.bmp', '.ico', '.svg', '.webp', '.pdf', '.zip', + '.tar', '.gz', '.bz2', '.7z', '.rar', '.exe', + '.dll', '.so', '.dylib', '.bin', '.dat', '.db', + '.sqlite', '.class', '.jar', '.war', '.ear'} + + for root, dirs, files in os.walk(source_ref): + dirs[:] = [d for d in dirs if not d.startswith('.')] + + for filename in files: + if filename.startswith('.'): + continue + + # Skip binary files based on extension + file_ext = os.path.splitext(filename)[1].lower() + if file_ext in binary_extensions: + continue + + # Skip __pycache__ directories + if '__pycache__' in root: + continue + + file_path: str = os.path.join(root, filename) + try: + file_data_type: DataType = DataTypes.from_content(file_path) + file_loader = file_data_type.get_loader() + file_chunker = file_data_type.get_chunker() + + file_source = SourceContent(file_path) + file_result: LoaderResult = file_loader.load(file_source) + + file_chunks = file_chunker.chunk(file_result.content) + + for chunk_idx, file_chunk in enumerate(file_chunks): + file_metadata: dict[str, Any] = base_metadata.copy() + file_metadata.update(file_result.metadata) + file_metadata["data_type"] = str(file_data_type) + file_metadata["file_path"] = file_path + file_metadata["chunk_index"] = chunk_idx + file_metadata["total_chunks"] = len(file_chunks) + + if isinstance(arg, dict): + file_metadata.update(arg.get("metadata", {})) + + chunk_id = hashlib.sha256(f"{file_result.doc_id}_{chunk_idx}_{file_chunk}".encode()).hexdigest() + + documents.append({ + "doc_id": chunk_id, + "content": file_chunk, + "metadata": sanitize_metadata_for_chromadb(file_metadata) + }) + except Exception: + # Silently skip files that can't be processed + continue + else: + metadata: dict[str, Any] = base_metadata.copy() + + if data_type in [DataType.PDF_FILE, DataType.TEXT_FILE, DataType.DOCX, + DataType.CSV, DataType.JSON, DataType.XML, DataType.MDX]: + if not os.path.isfile(source_ref): + raise FileNotFoundError(f"File does not exist: {source_ref}") + + loader = data_type.get_loader() + chunker = data_type.get_chunker() + + source_content = SourceContent(source_ref) + loader_result: LoaderResult = loader.load(source_content) + + chunks = chunker.chunk(loader_result.content) + + for i, chunk in enumerate(chunks): + chunk_metadata: dict[str, Any] = metadata.copy() + chunk_metadata.update(loader_result.metadata) + chunk_metadata["data_type"] = str(data_type) + chunk_metadata["chunk_index"] = i + chunk_metadata["total_chunks"] = len(chunks) + chunk_metadata["source"] = source_ref + + if isinstance(arg, dict): + chunk_metadata.update(arg.get("metadata", {})) + + chunk_id = hashlib.sha256(f"{loader_result.doc_id}_{i}_{chunk}".encode()).hexdigest() + + documents.append({ + "doc_id": chunk_id, + "content": chunk, + "metadata": sanitize_metadata_for_chromadb(chunk_metadata) + }) + + if documents: + self._client.add_documents( + collection_name=self.collection_name, + documents=documents + ) \ No newline at end of file diff --git a/src/crewai_tools/adapters/embedchain_adapter.py b/src/crewai_tools/adapters/embedchain_adapter.py deleted file mode 100644 index 1e7b83c0b..000000000 --- a/src/crewai_tools/adapters/embedchain_adapter.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Any - -from crewai_tools.tools.rag.rag_tool import Adapter - -try: - from embedchain import App - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - - -class EmbedchainAdapter(Adapter): - embedchain_app: Any # Will be App when embedchain is available - summarize: bool = False - - def __init__(self, **data): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") - super().__init__(**data) - - def query(self, question: str) -> str: - result, sources = self.embedchain_app.query( - question, citations=True, dry_run=(not self.summarize) - ) - if self.summarize: - return result - return "\n\n".join([source[0] for source in sources]) - - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - self.embedchain_app.add(*args, **kwargs) diff --git a/src/crewai_tools/adapters/pdf_embedchain_adapter.py b/src/crewai_tools/adapters/pdf_embedchain_adapter.py deleted file mode 100644 index aa682c84f..000000000 --- a/src/crewai_tools/adapters/pdf_embedchain_adapter.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import Any, Optional - -from crewai_tools.tools.rag.rag_tool import Adapter - -try: - from embedchain import App - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - - -class PDFEmbedchainAdapter(Adapter): - embedchain_app: Any # Will be App when embedchain is available - summarize: bool = False - src: Optional[str] = None - - def __init__(self, **data): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") - super().__init__(**data) - - def query(self, question: str) -> str: - where = ( - {"app_id": self.embedchain_app.config.id, "source": self.src} - if self.src - else None - ) - result, sources = self.embedchain_app.query( - question, citations=True, dry_run=(not self.summarize), where=where - ) - if self.summarize: - return result - return "\n\n".join([source[0] for source in sources]) - - def add( - self, - *args: Any, - **kwargs: Any, - ) -> None: - self.src = args[0] if args else None - self.embedchain_app.add(*args, **kwargs) diff --git a/src/crewai_tools/rag/chunkers/base_chunker.py b/src/crewai_tools/rag/chunkers/base_chunker.py index deafbfc7a..cadf66f16 100644 --- a/src/crewai_tools/rag/chunkers/base_chunker.py +++ b/src/crewai_tools/rag/chunkers/base_chunker.py @@ -112,7 +112,10 @@ class RecursiveCharacterTextSplitter: if separator == "": doc = "".join(current_doc) else: - doc = separator.join(current_doc) + if self._keep_separator and separator == " ": + doc = "".join(current_doc) + else: + doc = separator.join(current_doc) if doc: docs.append(doc) @@ -133,7 +136,10 @@ class RecursiveCharacterTextSplitter: if separator == "": doc = "".join(current_doc) else: - doc = separator.join(current_doc) + if self._keep_separator and separator == " ": + doc = "".join(current_doc) + else: + doc = separator.join(current_doc) if doc: docs.append(doc) diff --git a/src/crewai_tools/rag/data_types.py b/src/crewai_tools/rag/data_types.py index d2d265cce..1e6f0d8c6 100644 --- a/src/crewai_tools/rag/data_types.py +++ b/src/crewai_tools/rag/data_types.py @@ -25,6 +25,8 @@ class DataType(str, Enum): # Web types WEBSITE = "website" DOCS_SITE = "docs_site" + YOUTUBE_VIDEO = "youtube_video" + YOUTUBE_CHANNEL = "youtube_channel" # Raw types TEXT = "text" @@ -34,6 +36,7 @@ class DataType(str, Enum): from importlib import import_module chunkers = { + DataType.PDF_FILE: ("text_chunker", "TextChunker"), DataType.TEXT_FILE: ("text_chunker", "TextChunker"), DataType.TEXT: ("text_chunker", "TextChunker"), DataType.DOCX: ("text_chunker", "DocxChunker"), @@ -45,9 +48,18 @@ class DataType(str, Enum): DataType.XML: ("structured_chunker", "XmlChunker"), DataType.WEBSITE: ("web_chunker", "WebsiteChunker"), + DataType.DIRECTORY: ("text_chunker", "TextChunker"), + DataType.YOUTUBE_VIDEO: ("text_chunker", "TextChunker"), + DataType.YOUTUBE_CHANNEL: ("text_chunker", "TextChunker"), + DataType.GITHUB: ("text_chunker", "TextChunker"), + DataType.DOCS_SITE: ("text_chunker", "TextChunker"), + DataType.MYSQL: ("text_chunker", "TextChunker"), + DataType.POSTGRES: ("text_chunker", "TextChunker"), } - module_name, class_name = chunkers.get(self, ("default_chunker", "DefaultChunker")) + if self not in chunkers: + raise ValueError(f"No chunker defined for {self}") + module_name, class_name = chunkers[self] module_path = f"crewai_tools.rag.chunkers.{module_name}" try: @@ -60,6 +72,7 @@ class DataType(str, Enum): from importlib import import_module loaders = { + DataType.PDF_FILE: ("pdf_loader", "PDFLoader"), DataType.TEXT_FILE: ("text_loader", "TextFileLoader"), DataType.TEXT: ("text_loader", "TextLoader"), DataType.XML: ("xml_loader", "XMLLoader"), @@ -69,9 +82,17 @@ class DataType(str, Enum): DataType.DOCX: ("docx_loader", "DOCXLoader"), DataType.CSV: ("csv_loader", "CSVLoader"), DataType.DIRECTORY: ("directory_loader", "DirectoryLoader"), + DataType.YOUTUBE_VIDEO: ("youtube_video_loader", "YoutubeVideoLoader"), + DataType.YOUTUBE_CHANNEL: ("youtube_channel_loader", "YoutubeChannelLoader"), + DataType.GITHUB: ("github_loader", "GithubLoader"), + DataType.DOCS_SITE: ("docs_site_loader", "DocsSiteLoader"), + DataType.MYSQL: ("mysql_loader", "MySQLLoader"), + DataType.POSTGRES: ("postgres_loader", "PostgresLoader"), } - module_name, class_name = loaders.get(self, ("text_loader", "TextLoader")) + if self not in loaders: + raise ValueError(f"No loader defined for {self}") + module_name, class_name = loaders[self] module_path = f"crewai_tools.rag.loaders.{module_name}" try: module = import_module(module_path) diff --git a/src/crewai_tools/rag/loaders/__init__.py b/src/crewai_tools/rag/loaders/__init__.py index 503651468..dc7424833 100644 --- a/src/crewai_tools/rag/loaders/__init__.py +++ b/src/crewai_tools/rag/loaders/__init__.py @@ -6,6 +6,9 @@ from crewai_tools.rag.loaders.json_loader import JSONLoader from crewai_tools.rag.loaders.docx_loader import DOCXLoader from crewai_tools.rag.loaders.csv_loader import CSVLoader from crewai_tools.rag.loaders.directory_loader import DirectoryLoader +from crewai_tools.rag.loaders.pdf_loader import PDFLoader +from crewai_tools.rag.loaders.youtube_video_loader import YoutubeVideoLoader +from crewai_tools.rag.loaders.youtube_channel_loader import YoutubeChannelLoader __all__ = [ "TextFileLoader", @@ -17,4 +20,7 @@ __all__ = [ "DOCXLoader", "CSVLoader", "DirectoryLoader", + "PDFLoader", + "YoutubeVideoLoader", + "YoutubeChannelLoader", ] diff --git a/src/crewai_tools/rag/loaders/docs_site_loader.py b/src/crewai_tools/rag/loaders/docs_site_loader.py new file mode 100644 index 000000000..b87ebc419 --- /dev/null +++ b/src/crewai_tools/rag/loaders/docs_site_loader.py @@ -0,0 +1,98 @@ +"""Documentation site loader.""" + +from typing import Any +from urllib.parse import urljoin, urlparse + +import requests +from bs4 import BeautifulSoup + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class DocsSiteLoader(BaseLoader): + """Loader for documentation websites.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load content from a documentation site. + + Args: + source: Documentation site URL + **kwargs: Additional arguments + + Returns: + LoaderResult with documentation content + """ + docs_url = source.source + + try: + response = requests.get(docs_url, timeout=30) + response.raise_for_status() + except requests.RequestException as e: + raise ValueError(f"Unable to fetch documentation from {docs_url}: {e}") + + soup = BeautifulSoup(response.text, "html.parser") + + for script in soup(["script", "style"]): + script.decompose() + + title = soup.find("title") + title_text = title.get_text(strip=True) if title else "Documentation" + + main_content = None + for selector in ["main", "article", '[role="main"]', ".content", "#content", ".documentation"]: + main_content = soup.select_one(selector) + if main_content: + break + + if not main_content: + main_content = soup.find("body") + + if not main_content: + raise ValueError(f"Unable to extract content from documentation site: {docs_url}") + + text_parts = [f"Title: {title_text}", ""] + + headings = main_content.find_all(["h1", "h2", "h3"]) + if headings: + text_parts.append("Table of Contents:") + for heading in headings[:15]: + level = int(heading.name[1]) + indent = " " * (level - 1) + text_parts.append(f"{indent}- {heading.get_text(strip=True)}") + text_parts.append("") + + text = main_content.get_text(separator="\n", strip=True) + lines = [line.strip() for line in text.split("\n") if line.strip()] + text_parts.extend(lines) + + nav_links = [] + for nav_selector in ["nav", ".sidebar", ".toc", ".navigation"]: + nav = soup.select_one(nav_selector) + if nav: + links = nav.find_all("a", href=True) + for link in links[:20]: + href = link["href"] + if not href.startswith(("http://", "https://", "mailto:", "#")): + full_url = urljoin(docs_url, href) + nav_links.append(f"- {link.get_text(strip=True)}: {full_url}") + + if nav_links: + text_parts.append("") + text_parts.append("Related documentation pages:") + text_parts.extend(nav_links[:10]) + + content = "\n".join(text_parts) + + if len(content) > 100000: + content = content[:100000] + "\n\n[Content truncated...]" + + return LoaderResult( + content=content, + metadata={ + "source": docs_url, + "title": title_text, + "domain": urlparse(docs_url).netloc + }, + doc_id=self.generate_doc_id(source_ref=docs_url, content=content) + ) \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/github_loader.py b/src/crewai_tools/rag/loaders/github_loader.py new file mode 100644 index 000000000..b033c2071 --- /dev/null +++ b/src/crewai_tools/rag/loaders/github_loader.py @@ -0,0 +1,110 @@ +"""GitHub repository content loader.""" + +from typing import Any + +from github import Github, GithubException + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class GithubLoader(BaseLoader): + """Loader for GitHub repository content.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load content from a GitHub repository. + + Args: + source: GitHub repository URL + **kwargs: Additional arguments including gh_token and content_types + + Returns: + LoaderResult with repository content + """ + metadata = kwargs.get("metadata", {}) + gh_token = metadata.get("gh_token") + content_types = metadata.get("content_types", ["code", "repo"]) + + repo_url = source.source + if not repo_url.startswith("https://github.com/"): + raise ValueError(f"Invalid GitHub URL: {repo_url}") + + parts = repo_url.replace("https://github.com/", "").strip("/").split("/") + if len(parts) < 2: + raise ValueError(f"Invalid GitHub repository URL: {repo_url}") + + repo_name = f"{parts[0]}/{parts[1]}" + + g = Github(gh_token) if gh_token else Github() + + try: + repo = g.get_repo(repo_name) + except GithubException as e: + raise ValueError(f"Unable to access repository {repo_name}: {e}") + + all_content = [] + + if "repo" in content_types: + all_content.append(f"Repository: {repo.full_name}") + all_content.append(f"Description: {repo.description or 'No description'}") + all_content.append(f"Language: {repo.language or 'Not specified'}") + all_content.append(f"Stars: {repo.stargazers_count}") + all_content.append(f"Forks: {repo.forks_count}") + all_content.append("") + + if "code" in content_types: + try: + readme = repo.get_readme() + all_content.append("README:") + all_content.append(readme.decoded_content.decode("utf-8", errors="ignore")) + all_content.append("") + except GithubException: + pass + + try: + contents = repo.get_contents("") + if isinstance(contents, list): + all_content.append("Repository structure:") + for content_file in contents[:20]: + all_content.append(f"- {content_file.path} ({content_file.type})") + all_content.append("") + except GithubException: + pass + + if "pr" in content_types: + prs = repo.get_pulls(state="open") + pr_list = list(prs[:5]) + if pr_list: + all_content.append("Recent Pull Requests:") + for pr in pr_list: + all_content.append(f"- PR #{pr.number}: {pr.title}") + if pr.body: + body_preview = pr.body[:200].replace("\n", " ") + all_content.append(f" {body_preview}") + all_content.append("") + + if "issue" in content_types: + issues = repo.get_issues(state="open") + issue_list = [i for i in list(issues[:10]) if not i.pull_request][:5] + if issue_list: + all_content.append("Recent Issues:") + for issue in issue_list: + all_content.append(f"- Issue #{issue.number}: {issue.title}") + if issue.body: + body_preview = issue.body[:200].replace("\n", " ") + all_content.append(f" {body_preview}") + all_content.append("") + + if not all_content: + raise ValueError(f"No content could be loaded from repository: {repo_url}") + + content = "\n".join(all_content) + return LoaderResult( + content=content, + metadata={ + "source": repo_url, + "repo": repo_name, + "content_types": content_types + }, + doc_id=self.generate_doc_id(source_ref=repo_url, content=content) + ) \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/mysql_loader.py b/src/crewai_tools/rag/loaders/mysql_loader.py new file mode 100644 index 000000000..79a95e678 --- /dev/null +++ b/src/crewai_tools/rag/loaders/mysql_loader.py @@ -0,0 +1,99 @@ +"""MySQL database loader.""" + +from typing import Any +from urllib.parse import urlparse + +import pymysql + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class MySQLLoader(BaseLoader): + """Loader for MySQL database content.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load content from a MySQL database table. + + Args: + source: SQL query (e.g., "SELECT * FROM table_name") + **kwargs: Additional arguments including db_uri + + Returns: + LoaderResult with database content + """ + metadata = kwargs.get("metadata", {}) + db_uri = metadata.get("db_uri") + + if not db_uri: + raise ValueError("Database URI is required for MySQL loader") + + query = source.source + + parsed = urlparse(db_uri) + if parsed.scheme not in ["mysql", "mysql+pymysql"]: + raise ValueError(f"Invalid MySQL URI scheme: {parsed.scheme}") + + connection_params = { + "host": parsed.hostname or "localhost", + "port": parsed.port or 3306, + "user": parsed.username, + "password": parsed.password, + "database": parsed.path.lstrip("/") if parsed.path else None, + "charset": "utf8mb4", + "cursorclass": pymysql.cursors.DictCursor + } + + if not connection_params["database"]: + raise ValueError("Database name is required in the URI") + + try: + connection = pymysql.connect(**connection_params) + try: + with connection.cursor() as cursor: + cursor.execute(query) + rows = cursor.fetchall() + + if not rows: + content = "No data found in the table" + return LoaderResult( + content=content, + metadata={"source": query, "row_count": 0}, + doc_id=self.generate_doc_id(source_ref=query, content=content) + ) + + text_parts = [] + + columns = list(rows[0].keys()) + text_parts.append(f"Columns: {', '.join(columns)}") + text_parts.append(f"Total rows: {len(rows)}") + text_parts.append("") + + for i, row in enumerate(rows, 1): + text_parts.append(f"Row {i}:") + for col, val in row.items(): + if val is not None: + text_parts.append(f" {col}: {val}") + text_parts.append("") + + content = "\n".join(text_parts) + + if len(content) > 100000: + content = content[:100000] + "\n\n[Content truncated...]" + + return LoaderResult( + content=content, + metadata={ + "source": query, + "database": connection_params["database"], + "row_count": len(rows), + "columns": columns + }, + doc_id=self.generate_doc_id(source_ref=query, content=content) + ) + finally: + connection.close() + except pymysql.Error as e: + raise ValueError(f"MySQL database error: {e}") + except Exception as e: + raise ValueError(f"Failed to load data from MySQL: {e}") \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/pdf_loader.py b/src/crewai_tools/rag/loaders/pdf_loader.py new file mode 100644 index 000000000..ed1dbfbfe --- /dev/null +++ b/src/crewai_tools/rag/loaders/pdf_loader.py @@ -0,0 +1,72 @@ +"""PDF loader for extracting text from PDF files.""" + +import os +from pathlib import Path +from typing import Any + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class PDFLoader(BaseLoader): + """Loader for PDF files.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load and extract text from a PDF file. + + Args: + source: The source content containing the PDF file path + + Returns: + LoaderResult with extracted text content + + Raises: + FileNotFoundError: If the PDF file doesn't exist + ImportError: If required PDF libraries aren't installed + """ + try: + import pypdf + except ImportError: + try: + import PyPDF2 as pypdf + except ImportError: + raise ImportError( + "PDF support requires pypdf or PyPDF2. " + "Install with: uv add pypdf" + ) + + file_path = source.source + + if not os.path.isfile(file_path): + raise FileNotFoundError(f"PDF file not found: {file_path}") + + text_content = [] + metadata: dict[str, Any] = { + "source": str(file_path), + "file_name": Path(file_path).name, + "file_type": "pdf" + } + + try: + with open(file_path, 'rb') as file: + pdf_reader = pypdf.PdfReader(file) + metadata["num_pages"] = len(pdf_reader.pages) + + for page_num, page in enumerate(pdf_reader.pages, 1): + page_text = page.extract_text() + if page_text.strip(): + text_content.append(f"Page {page_num}:\n{page_text}") + except Exception as e: + raise ValueError(f"Error reading PDF file {file_path}: {str(e)}") + + if not text_content: + content = f"[PDF file with no extractable text: {Path(file_path).name}]" + else: + content = "\n\n".join(text_content) + + return LoaderResult( + content=content, + source=str(file_path), + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=str(file_path), content=content) + ) \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/postgres_loader.py b/src/crewai_tools/rag/loaders/postgres_loader.py new file mode 100644 index 000000000..131dbdc3f --- /dev/null +++ b/src/crewai_tools/rag/loaders/postgres_loader.py @@ -0,0 +1,99 @@ +"""PostgreSQL database loader.""" + +from typing import Any +from urllib.parse import urlparse + +import psycopg2 +from psycopg2.extras import RealDictCursor + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class PostgresLoader(BaseLoader): + """Loader for PostgreSQL database content.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load content from a PostgreSQL database table. + + Args: + source: SQL query (e.g., "SELECT * FROM table_name") + **kwargs: Additional arguments including db_uri + + Returns: + LoaderResult with database content + """ + metadata = kwargs.get("metadata", {}) + db_uri = metadata.get("db_uri") + + if not db_uri: + raise ValueError("Database URI is required for PostgreSQL loader") + + query = source.source + + parsed = urlparse(db_uri) + if parsed.scheme not in ["postgresql", "postgres", "postgresql+psycopg2"]: + raise ValueError(f"Invalid PostgreSQL URI scheme: {parsed.scheme}") + + connection_params = { + "host": parsed.hostname or "localhost", + "port": parsed.port or 5432, + "user": parsed.username, + "password": parsed.password, + "database": parsed.path.lstrip("/") if parsed.path else None, + "cursor_factory": RealDictCursor + } + + if not connection_params["database"]: + raise ValueError("Database name is required in the URI") + + try: + connection = psycopg2.connect(**connection_params) + try: + with connection.cursor() as cursor: + cursor.execute(query) + rows = cursor.fetchall() + + if not rows: + content = "No data found in the table" + return LoaderResult( + content=content, + metadata={"source": query, "row_count": 0}, + doc_id=self.generate_doc_id(source_ref=query, content=content) + ) + + text_parts = [] + + columns = list(rows[0].keys()) + text_parts.append(f"Columns: {', '.join(columns)}") + text_parts.append(f"Total rows: {len(rows)}") + text_parts.append("") + + for i, row in enumerate(rows, 1): + text_parts.append(f"Row {i}:") + for col, val in row.items(): + if val is not None: + text_parts.append(f" {col}: {val}") + text_parts.append("") + + content = "\n".join(text_parts) + + if len(content) > 100000: + content = content[:100000] + "\n\n[Content truncated...]" + + return LoaderResult( + content=content, + metadata={ + "source": query, + "database": connection_params["database"], + "row_count": len(rows), + "columns": columns + }, + doc_id=self.generate_doc_id(source_ref=query, content=content) + ) + finally: + connection.close() + except psycopg2.Error as e: + raise ValueError(f"PostgreSQL database error: {e}") + except Exception as e: + raise ValueError(f"Failed to load data from PostgreSQL: {e}") \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/xml_loader.py b/src/crewai_tools/rag/loaders/xml_loader.py index ffafdb9d9..30c949932 100644 --- a/src/crewai_tools/rag/loaders/xml_loader.py +++ b/src/crewai_tools/rag/loaders/xml_loader.py @@ -11,7 +11,7 @@ class XMLLoader(BaseLoader): if source_content.is_url(): content = self._load_from_url(source_ref, kwargs) - elif os.path.exists(source_ref): + elif source_content.path_exists(): content = self._load_from_file(source_ref) return self._parse_xml(content, source_ref) diff --git a/src/crewai_tools/rag/loaders/youtube_channel_loader.py b/src/crewai_tools/rag/loaders/youtube_channel_loader.py new file mode 100644 index 000000000..3a62c5146 --- /dev/null +++ b/src/crewai_tools/rag/loaders/youtube_channel_loader.py @@ -0,0 +1,141 @@ +"""YouTube channel loader for extracting content from YouTube channels.""" + +import re +from typing import Any + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class YoutubeChannelLoader(BaseLoader): + """Loader for YouTube channels.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load and extract content from a YouTube channel. + + Args: + source: The source content containing the YouTube channel URL + + Returns: + LoaderResult with channel content + + Raises: + ImportError: If required YouTube libraries aren't installed + ValueError: If the URL is not a valid YouTube channel URL + """ + try: + from pytube import Channel + except ImportError: + raise ImportError( + "YouTube channel support requires pytube. " + "Install with: uv add pytube" + ) + + channel_url = source.source + + if not any(pattern in channel_url for pattern in ['youtube.com/channel/', 'youtube.com/c/', 'youtube.com/@', 'youtube.com/user/']): + raise ValueError(f"Invalid YouTube channel URL: {channel_url}") + + metadata: dict[str, Any] = { + "source": channel_url, + "data_type": "youtube_channel" + } + + try: + channel = Channel(channel_url) + + metadata["channel_name"] = channel.channel_name + metadata["channel_id"] = channel.channel_id + + max_videos = kwargs.get('max_videos', 10) + video_urls = list(channel.video_urls)[:max_videos] + metadata["num_videos_loaded"] = len(video_urls) + metadata["total_videos"] = len(list(channel.video_urls)) + + content_parts = [ + f"YouTube Channel: {channel.channel_name}", + f"Channel ID: {channel.channel_id}", + f"Total Videos: {metadata['total_videos']}", + f"Videos Loaded: {metadata['num_videos_loaded']}", + "\n--- Video Summaries ---\n" + ] + + try: + from youtube_transcript_api import YouTubeTranscriptApi + from pytube import YouTube + + for i, video_url in enumerate(video_urls, 1): + try: + video_id = self._extract_video_id(video_url) + if not video_id: + continue + yt = YouTube(video_url) + title = yt.title or f"Video {i}" + description = yt.description[:200] if yt.description else "No description" + + content_parts.append(f"\n{i}. {title}") + content_parts.append(f" URL: {video_url}") + content_parts.append(f" Description: {description}...") + + try: + api = YouTubeTranscriptApi() + transcript_list = api.list(video_id) + transcript = None + + try: + transcript = transcript_list.find_transcript(['en']) + except: + try: + transcript = transcript_list.find_generated_transcript(['en']) + except: + transcript = next(iter(transcript_list), None) + + if transcript: + transcript_data = transcript.fetch() + text_parts = [] + char_count = 0 + for entry in transcript_data: + text = entry.text.strip() if hasattr(entry, 'text') else '' + if text: + text_parts.append(text) + char_count += len(text) + if char_count > 500: + break + + if text_parts: + preview = ' '.join(text_parts)[:500] + content_parts.append(f" Transcript Preview: {preview}...") + except: + content_parts.append(" Transcript: Not available") + + except Exception as e: + content_parts.append(f"\n{i}. Error loading video: {str(e)}") + + except ImportError: + for i, video_url in enumerate(video_urls, 1): + content_parts.append(f"\n{i}. {video_url}") + + content = '\n'.join(content_parts) + + except Exception as e: + raise ValueError(f"Unable to load YouTube channel {channel_url}: {str(e)}") from e + + return LoaderResult( + content=content, + source=channel_url, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=channel_url, content=content) + ) + + def _extract_video_id(self, url: str) -> str | None: + """Extract video ID from YouTube URL.""" + patterns = [ + r'(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/|youtube\.com\/v\/)([^&\n?#]+)', + ] + + for pattern in patterns: + match = re.search(pattern, url) + if match: + return match.group(1) + + return None \ No newline at end of file diff --git a/src/crewai_tools/rag/loaders/youtube_video_loader.py b/src/crewai_tools/rag/loaders/youtube_video_loader.py new file mode 100644 index 000000000..6e0fd39e8 --- /dev/null +++ b/src/crewai_tools/rag/loaders/youtube_video_loader.py @@ -0,0 +1,123 @@ +"""YouTube video loader for extracting transcripts from YouTube videos.""" + +import re +from typing import Any +from urllib.parse import urlparse, parse_qs + +from crewai_tools.rag.base_loader import BaseLoader, LoaderResult +from crewai_tools.rag.source_content import SourceContent + + +class YoutubeVideoLoader(BaseLoader): + """Loader for YouTube videos.""" + + def load(self, source: SourceContent, **kwargs) -> LoaderResult: + """Load and extract transcript from a YouTube video. + + Args: + source: The source content containing the YouTube URL + + Returns: + LoaderResult with transcript content + + Raises: + ImportError: If required YouTube libraries aren't installed + ValueError: If the URL is not a valid YouTube video URL + """ + try: + from youtube_transcript_api import YouTubeTranscriptApi + except ImportError: + raise ImportError( + "YouTube support requires youtube-transcript-api. " + "Install with: uv add youtube-transcript-api" + ) + + video_url = source.source + video_id = self._extract_video_id(video_url) + + if not video_id: + raise ValueError(f"Invalid YouTube URL: {video_url}") + + metadata: dict[str, Any] = { + "source": video_url, + "video_id": video_id, + "data_type": "youtube_video" + } + + try: + api = YouTubeTranscriptApi() + transcript_list = api.list(video_id) + + transcript = None + try: + transcript = transcript_list.find_transcript(['en']) + except: + try: + transcript = transcript_list.find_generated_transcript(['en']) + except: + transcript = next(iter(transcript_list)) + + if transcript: + metadata["language"] = transcript.language + metadata["is_generated"] = transcript.is_generated + + transcript_data = transcript.fetch() + + text_content = [] + for entry in transcript_data: + text = entry.text.strip() if hasattr(entry, 'text') else '' + if text: + text_content.append(text) + + content = ' '.join(text_content) + + try: + from pytube import YouTube + yt = YouTube(video_url) + metadata["title"] = yt.title + metadata["author"] = yt.author + metadata["length_seconds"] = yt.length + metadata["description"] = yt.description[:500] if yt.description else None + + if yt.title: + content = f"Title: {yt.title}\n\nAuthor: {yt.author or 'Unknown'}\n\nTranscript:\n{content}" + except: + pass + else: + raise ValueError(f"No transcript available for YouTube video: {video_id}") + + except Exception as e: + raise ValueError(f"Unable to extract transcript from YouTube video {video_id}: {str(e)}") from e + + return LoaderResult( + content=content, + source=video_url, + metadata=metadata, + doc_id=self.generate_doc_id(source_ref=video_url, content=content) + ) + + def _extract_video_id(self, url: str) -> str | None: + """Extract video ID from various YouTube URL formats.""" + patterns = [ + r'(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/|youtube\.com\/v\/)([^&\n?#]+)', + ] + + for pattern in patterns: + match = re.search(pattern, url) + if match: + return match.group(1) + + try: + parsed = urlparse(url) + hostname = parsed.hostname + if hostname: + hostname_lower = hostname.lower() + # Allow youtube.com and any subdomain of youtube.com, plus youtu.be shortener + if hostname_lower == 'youtube.com' or hostname_lower.endswith('.youtube.com') or hostname_lower == 'youtu.be': + query_params = parse_qs(parsed.query) + if 'v' in query_params: + return query_params['v'][0] + except: + pass + + return None \ No newline at end of file diff --git a/src/crewai_tools/rag/misc.py b/src/crewai_tools/rag/misc.py index 5b95f804e..edec22f80 100644 --- a/src/crewai_tools/rag/misc.py +++ b/src/crewai_tools/rag/misc.py @@ -1,4 +1,29 @@ import hashlib +from typing import Any def compute_sha256(content: str) -> str: return hashlib.sha256(content.encode("utf-8")).hexdigest() + +def sanitize_metadata_for_chromadb(metadata: dict[str, Any]) -> dict[str, Any]: + """Sanitize metadata to ensure ChromaDB compatibility. + + ChromaDB only accepts str, int, float, or bool values in metadata. + This function converts other types to strings. + + Args: + metadata: Dictionary of metadata to sanitize + + Returns: + Sanitized metadata dictionary with only ChromaDB-compatible types + """ + sanitized = {} + for key, value in metadata.items(): + if isinstance(value, (str, int, float, bool)) or value is None: + sanitized[key] = value + elif isinstance(value, (list, tuple)): + # Convert lists/tuples to pipe-separated strings + sanitized[key] = " | ".join(str(v) for v in value) + else: + # Convert other types to string + sanitized[key] = str(value) + return sanitized diff --git a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py index 155b4390d..85be97894 100644 --- a/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py +++ b/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py @@ -1,14 +1,10 @@ from typing import Any, Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedCodeDocsSearchToolSchema(BaseModel): @@ -42,15 +38,15 @@ class CodeDocsSearchTool(RagTool): self._generate_description() def add(self, docs_url: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(docs_url, data_type=DataType.DOCS_SITE) def _run( self, search_query: str, docs_url: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if docs_url is not None: self.add(docs_url) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py index 4be84efdd..ac95b1df5 100644 --- a/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py +++ b/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py @@ -1,14 +1,10 @@ from typing import Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedCSVSearchToolSchema(BaseModel): @@ -42,15 +38,16 @@ class CSVSearchTool(RagTool): self._generate_description() def add(self, csv: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(csv, data_type=DataType.CSV) def _run( self, search_query: str, csv: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if csv is not None: self.add(csv) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) + diff --git a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py index 30fdd52cc..9f0765f2d 100644 --- a/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py +++ b/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py @@ -1,14 +1,9 @@ from typing import Optional, Type -try: - from embedchain.loaders.directory_loader import DirectoryLoader - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedDirectorySearchToolSchema(BaseModel): @@ -34,8 +29,6 @@ class DirectorySearchTool(RagTool): args_schema: Type[BaseModel] = DirectorySearchToolSchema def __init__(self, directory: Optional[str] = None, **kwargs): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) if directory is not None: self.add(directory) @@ -44,16 +37,15 @@ class DirectorySearchTool(RagTool): self._generate_description() def add(self, directory: str) -> None: - super().add( - directory, - loader=DirectoryLoader(config=dict(recursive=True)), - ) + super().add(directory, data_type=DataType.DIRECTORY) def _run( self, search_query: str, directory: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if directory is not None: self.add(directory) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py index 97dab02cd..9a33bade9 100644 --- a/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py +++ b/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py @@ -1,14 +1,10 @@ from typing import Any, Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedDOCXSearchToolSchema(BaseModel): @@ -48,15 +44,15 @@ class DOCXSearchTool(RagTool): self._generate_description() def add(self, docx: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(docx, data_type=DataType.DOCX) def _run( self, search_query: str, docx: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> Any: if docx is not None: self.add(docx) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/github_search_tool/github_search_tool.py b/src/crewai_tools/tools/github_search_tool/github_search_tool.py index afde4fe92..3a0fe42b6 100644 --- a/src/crewai_tools/tools/github_search_tool/github_search_tool.py +++ b/src/crewai_tools/tools/github_search_tool/github_search_tool.py @@ -1,14 +1,9 @@ -from typing import List, Optional, Type, Any +from typing import List, Optional, Type -try: - from embedchain.loaders.github import GithubLoader - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - -from pydantic import BaseModel, Field, PrivateAttr +from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedGithubSearchToolSchema(BaseModel): @@ -42,7 +37,6 @@ class GithubSearchTool(RagTool): default_factory=lambda: ["code", "repo", "pr", "issue"], description="Content types you want to be included search, options: [code, repo, pr, issue]", ) - _loader: Any | None = PrivateAttr(default=None) def __init__( self, @@ -50,10 +44,7 @@ class GithubSearchTool(RagTool): content_types: Optional[List[str]] = None, **kwargs, ): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) - self._loader = GithubLoader(config={"token": self.gh_token}) if github_repo and content_types: self.add(repo=github_repo, content_types=content_types) @@ -67,11 +58,10 @@ class GithubSearchTool(RagTool): content_types: Optional[List[str]] = None, ) -> None: content_types = content_types or self.content_types - super().add( - f"repo:{repo} type:{','.join(content_types)}", - data_type="github", - loader=self._loader, + f"https://github.com/{repo}", + data_type=DataType.GITHUB, + metadata={"content_types": content_types, "gh_token": self.gh_token} ) def _run( @@ -79,10 +69,12 @@ class GithubSearchTool(RagTool): search_query: str, github_repo: Optional[str] = None, content_types: Optional[List[str]] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if github_repo: self.add( repo=github_repo, content_types=content_types, ) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/json_search_tool/json_search_tool.py b/src/crewai_tools/tools/json_search_tool/json_search_tool.py index 820323eec..49dad0ac7 100644 --- a/src/crewai_tools/tools/json_search_tool/json_search_tool.py +++ b/src/crewai_tools/tools/json_search_tool/json_search_tool.py @@ -41,7 +41,9 @@ class JSONSearchTool(RagTool): self, search_query: str, json_path: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if json_path is not None: self.add(json_path) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py index 807da62fe..3390b8dba 100644 --- a/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py +++ b/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py @@ -2,13 +2,9 @@ from typing import Optional, Type from pydantic import BaseModel, Field -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedMDXSearchToolSchema(BaseModel): @@ -42,15 +38,15 @@ class MDXSearchTool(RagTool): self._generate_description() def add(self, mdx: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(mdx, data_type=DataType.MDX) def _run( self, search_query: str, mdx: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if mdx is not None: self.add(mdx) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py index 8c2c5ef5d..c97585b4e 100644 --- a/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py +++ b/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py @@ -1,14 +1,9 @@ from typing import Any, Type -try: - from embedchain.loaders.mysql import MySQLLoader - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class MySQLSearchToolSchema(BaseModel): @@ -27,12 +22,8 @@ class MySQLSearchTool(RagTool): db_uri: str = Field(..., description="Mandatory database URI") def __init__(self, table_name: str, **kwargs): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) - kwargs["data_type"] = "mysql" - kwargs["loader"] = MySQLLoader(config=dict(url=self.db_uri)) - self.add(table_name) + self.add(table_name, data_type=DataType.MYSQL, metadata={"db_uri": self.db_uri}) self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." self._generate_description() @@ -46,6 +37,8 @@ class MySQLSearchTool(RagTool): def _run( self, search_query: str, + similarity_threshold: float | None = None, + limit: int | None = None, **kwargs: Any, ) -> Any: - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py index 96f141c17..9ab1f29ea 100644 --- a/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py +++ b/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py @@ -2,13 +2,8 @@ from typing import Optional, Type from pydantic import BaseModel, Field -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedPDFSearchToolSchema(BaseModel): @@ -41,15 +36,15 @@ class PDFSearchTool(RagTool): self._generate_description() def add(self, pdf: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(pdf, data_type=DataType.PDF_FILE) def _run( self, query: str, pdf: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if pdf is not None: self.add(pdf) - return super()._run(query=query) + return super()._run(query=query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py b/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py index 30e294944..31f2e697c 100644 --- a/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py +++ b/src/crewai_tools/tools/pg_search_tool/pg_search_tool.py @@ -1,14 +1,9 @@ from typing import Any, Type -try: - from embedchain.loaders.postgres import PostgresLoader - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class PGSearchToolSchema(BaseModel): @@ -27,12 +22,8 @@ class PGSearchTool(RagTool): db_uri: str = Field(..., description="Mandatory database URI") def __init__(self, table_name: str, **kwargs): - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().__init__(**kwargs) - kwargs["data_type"] = "postgres" - kwargs["loader"] = PostgresLoader(config=dict(url=self.db_uri)) - self.add(table_name) + self.add(table_name, data_type=DataType.POSTGRES, metadata={"db_uri": self.db_uri}) self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content." self._generate_description() @@ -46,6 +37,8 @@ class PGSearchTool(RagTool): def _run( self, search_query: str, + similarity_threshold: float | None = None, + limit: int | None = None, **kwargs: Any, ) -> Any: - return super()._run(query=search_query, **kwargs) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit, **kwargs) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 1a9fad8b8..2397eac6f 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -1,17 +1,22 @@ -import portalocker - +import os from abc import ABC, abstractmethod -from typing import Any -from pydantic import BaseModel, ConfigDict, Field, model_validator +from typing import Any, cast +from crewai.rag.embeddings.factory import get_embedding_function from crewai.tools import BaseTool +from pydantic import BaseModel, ConfigDict, Field, model_validator class Adapter(BaseModel, ABC): model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod - def query(self, question: str) -> str: + def query( + self, + question: str, + similarity_threshold: float | None = None, + limit: int | None = None, + ) -> str: """Query the knowledge base with a question and return the answer.""" @abstractmethod @@ -25,7 +30,12 @@ class Adapter(BaseModel, ABC): class RagTool(BaseTool): class _AdapterPlaceholder(Adapter): - def query(self, question: str) -> str: + def query( + self, + question: str, + similarity_threshold: float | None = None, + limit: int | None = None, + ) -> str: raise NotImplementedError def add(self, *args: Any, **kwargs: Any) -> None: @@ -34,28 +44,149 @@ class RagTool(BaseTool): name: str = "Knowledge base" description: str = "A knowledge base that can be used to answer questions." summarize: bool = False + similarity_threshold: float = 0.6 + limit: int = 5 adapter: Adapter = Field(default_factory=_AdapterPlaceholder) - config: dict[str, Any] | None = None + config: Any | None = None @model_validator(mode="after") def _set_default_adapter(self): if isinstance(self.adapter, RagTool._AdapterPlaceholder): - try: - from embedchain import App - except ImportError: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") + from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter - from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter + parsed_config = self._parse_config(self.config) - with portalocker.Lock("crewai-rag-tool.lock", timeout=10): - app = App.from_config(config=self.config) if self.config else App() - - self.adapter = EmbedchainAdapter( - embedchain_app=app, summarize=self.summarize + self.adapter = CrewAIRagAdapter( + collection_name="rag_tool_collection", + summarize=self.summarize, + similarity_threshold=self.similarity_threshold, + limit=self.limit, + config=parsed_config, ) return self + def _parse_config(self, config: Any) -> Any: + """Parse complex config format to extract provider-specific config. + + Raises: + ValueError: If the config format is invalid or uses unsupported providers. + """ + if config is None: + return None + + if isinstance(config, dict) and "provider" in config: + return config + + if isinstance(config, dict): + if "vectordb" in config: + vectordb_config = config["vectordb"] + if isinstance(vectordb_config, dict) and "provider" in vectordb_config: + provider = vectordb_config["provider"] + provider_config = vectordb_config.get("config", {}) + + supported_providers = ["chromadb", "qdrant"] + if provider not in supported_providers: + raise ValueError( + f"Unsupported vector database provider: '{provider}'. " + f"CrewAI RAG currently supports: {', '.join(supported_providers)}." + ) + + embedding_config = config.get("embedding_model") + embedding_function = None + if embedding_config and isinstance(embedding_config, dict): + embedding_function = self._create_embedding_function( + embedding_config, provider + ) + + return self._create_provider_config( + provider, provider_config, embedding_function + ) + else: + return None + else: + embedding_config = config.get("embedding_model") + embedding_function = None + if embedding_config and isinstance(embedding_config, dict): + embedding_function = self._create_embedding_function( + embedding_config, "chromadb" + ) + + return self._create_provider_config("chromadb", {}, embedding_function) + return config + + @staticmethod + def _create_embedding_function(embedding_config: dict, provider: str) -> Any: + """Create embedding function for the specified vector database provider.""" + embedding_provider = embedding_config.get("provider") + embedding_model_config = embedding_config.get("config", {}).copy() + + if "model" in embedding_model_config: + embedding_model_config["model_name"] = embedding_model_config.pop("model") + + factory_config = {"provider": embedding_provider, **embedding_model_config} + + if embedding_provider == "openai" and "api_key" not in factory_config: + api_key = os.getenv("OPENAI_API_KEY") + if api_key: + factory_config["api_key"] = api_key + + print(f"Creating embedding function with config: {factory_config}") + + if provider == "chromadb": + embedding_func = get_embedding_function(factory_config) + print(f"Created embedding function: {embedding_func}") + print(f"Embedding function type: {type(embedding_func)}") + return embedding_func + + elif provider == "qdrant": + chromadb_func = get_embedding_function(factory_config) + + def qdrant_embed_fn(text: str) -> list[float]: + """Embed text using ChromaDB function and convert to list of floats for Qdrant. + + Args: + text: The input text to embed. + + Returns: + A list of floats representing the embedding. + """ + embeddings = chromadb_func([text]) + return embeddings[0] if embeddings and len(embeddings) > 0 else [] + + return cast(Any, qdrant_embed_fn) + + return None + + @staticmethod + def _create_provider_config( + provider: str, provider_config: dict, embedding_function: Any + ) -> Any: + """Create proper provider config object.""" + if provider == "chromadb": + from crewai.rag.chromadb.config import ChromaDBConfig + + config_kwargs = {} + if embedding_function: + config_kwargs["embedding_function"] = embedding_function + + config_kwargs.update(provider_config) + + return ChromaDBConfig(**config_kwargs) + + elif provider == "qdrant": + from crewai.rag.qdrant.config import QdrantConfig + + config_kwargs = {} + if embedding_function: + config_kwargs["embedding_function"] = embedding_function + + config_kwargs.update(provider_config) + + return QdrantConfig(**config_kwargs) + + return None + def add( self, *args: Any, @@ -66,5 +197,13 @@ class RagTool(BaseTool): def _run( self, query: str, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: - return f"Relevant Content:\n{self.adapter.query(query)}" + threshold = ( + similarity_threshold + if similarity_threshold is not None + else self.similarity_threshold + ) + result_limit = limit if limit is not None else self.limit + return f"Relevant Content:\n{self.adapter.query(query, similarity_threshold=threshold, limit=result_limit)}" diff --git a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py index 93d696ab1..2ccfa4eb2 100644 --- a/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py +++ b/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py @@ -39,7 +39,9 @@ class TXTSearchTool(RagTool): self, search_query: str, txt: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if txt is not None: self.add(txt) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/website_search/website_search_tool.py b/src/crewai_tools/tools/website_search/website_search_tool.py index 9728b44db..ac8084d3f 100644 --- a/src/crewai_tools/tools/website_search/website_search_tool.py +++ b/src/crewai_tools/tools/website_search/website_search_tool.py @@ -1,14 +1,9 @@ from typing import Any, Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedWebsiteSearchToolSchema(BaseModel): @@ -44,15 +39,15 @@ class WebsiteSearchTool(RagTool): self._generate_description() def add(self, website: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") - super().add(website, data_type=DataType.WEB_PAGE) + super().add(website, data_type=DataType.WEBSITE) def _run( self, search_query: str, website: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if website is not None: self.add(website) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py index 426b0ca38..8509c2d42 100644 --- a/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py +++ b/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py @@ -39,7 +39,9 @@ class XMLSearchTool(RagTool): self, search_query: str, xml: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if xml is not None: self.add(xml) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py index 6d16a708d..80f597665 100644 --- a/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py +++ b/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py @@ -1,14 +1,9 @@ from typing import Any, Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False - from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedYoutubeChannelSearchToolSchema(BaseModel): @@ -55,7 +50,9 @@ class YoutubeChannelSearchTool(RagTool): self, search_query: str, youtube_channel_handle: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if youtube_channel_handle is not None: self.add(youtube_channel_handle) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py index b93cc6c29..000c81cec 100644 --- a/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py +++ b/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py @@ -1,14 +1,10 @@ from typing import Any, Optional, Type -try: - from embedchain.models.data_type import DataType - EMBEDCHAIN_AVAILABLE = True -except ImportError: - EMBEDCHAIN_AVAILABLE = False from pydantic import BaseModel, Field from ..rag.rag_tool import RagTool +from crewai_tools.rag.data_types import DataType class FixedYoutubeVideoSearchToolSchema(BaseModel): @@ -44,15 +40,15 @@ class YoutubeVideoSearchTool(RagTool): self._generate_description() def add(self, youtube_video_url: str) -> None: - if not EMBEDCHAIN_AVAILABLE: - raise ImportError("embedchain is not installed. Please install it with `pip install crewai-tools[embedchain]`") super().add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO) def _run( self, search_query: str, youtube_video_url: Optional[str] = None, + similarity_threshold: float | None = None, + limit: int | None = None, ) -> str: if youtube_video_url is not None: self.add(youtube_video_url) - return super()._run(query=search_query) + return super()._run(query=search_query, similarity_threshold=similarity_threshold, limit=limit) diff --git a/tests/tools/rag/rag_tool_test.py b/tests/tools/rag/rag_tool_test.py index 42baccc2c..693cd120a 100644 --- a/tests/tools/rag/rag_tool_test.py +++ b/tests/tools/rag/rag_tool_test.py @@ -1,43 +1,54 @@ -import os -from tempfile import NamedTemporaryFile +from tempfile import TemporaryDirectory from typing import cast -from unittest import mock +from pathlib import Path -from pytest import fixture -from crewai_tools.adapters.embedchain_adapter import EmbedchainAdapter +from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter from crewai_tools.tools.rag.rag_tool import RagTool -@fixture(autouse=True) -def mock_embedchain_db_uri(): - with NamedTemporaryFile() as tmp: - uri = f"sqlite:///{tmp.name}" - with mock.patch.dict(os.environ, {"EMBEDCHAIN_DB_URI": uri}): - yield - - -def test_custom_llm_and_embedder(): +def test_rag_tool_initialization(): + """Test that RagTool initializes with CrewAI adapter by default.""" class MyTool(RagTool): pass - tool = MyTool( - config=dict( - llm=dict( - provider="openai", - config=dict(model="gpt-3.5-custom"), - ), - embedder=dict( - provider="openai", - config=dict(model="text-embedding-3-custom"), - ), - ) - ) + tool = MyTool() assert tool.adapter is not None - assert isinstance(tool.adapter, EmbedchainAdapter) + assert isinstance(tool.adapter, CrewAIRagAdapter) + + adapter = cast(CrewAIRagAdapter, tool.adapter) + assert adapter.collection_name == "rag_tool_collection" + assert adapter._client is not None - adapter = cast(EmbedchainAdapter, tool.adapter) - assert adapter.embedchain_app.llm.config.model == "gpt-3.5-custom" - assert ( - adapter.embedchain_app.embedding_model.config.model == "text-embedding-3-custom" - ) + +def test_rag_tool_add_and_query(): + """Test adding content and querying with RagTool.""" + class MyTool(RagTool): + pass + + tool = MyTool() + + tool.add("The sky is blue on a clear day.") + tool.add("Machine learning is a subset of artificial intelligence.") + + result = tool._run(query="What color is the sky?") + assert "Relevant Content:" in result + + result = tool._run(query="Tell me about machine learning") + assert "Relevant Content:" in result + + +def test_rag_tool_with_file(): + """Test RagTool with file content.""" + with TemporaryDirectory() as tmpdir: + test_file = Path(tmpdir) / "test.txt" + test_file.write_text("Python is a programming language known for its simplicity.") + + class MyTool(RagTool): + pass + + tool = MyTool() + tool.add(str(test_file)) + + result = tool._run(query="What is Python?") + assert "Relevant Content:" in result diff --git a/tests/tools/test_search_tools.py b/tests/tools/test_search_tools.py index eaa0c591c..b912ef005 100644 --- a/tests/tools/test_search_tools.py +++ b/tests/tools/test_search_tools.py @@ -1,11 +1,11 @@ import os import tempfile from pathlib import Path -from unittest.mock import ANY, MagicMock +from unittest.mock import MagicMock import pytest -from embedchain.models.data_type import DataType +from crewai_tools.rag.data_types import DataType from crewai_tools.tools import ( CodeDocsSearchTool, CSVSearchTool, @@ -49,7 +49,7 @@ def test_pdf_search_tool(mock_adapter): result = tool._run(query="test content") assert "this is a test" in result.lower() mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE) - mock_adapter.query.assert_called_once_with("test content") + mock_adapter.query.assert_called_once_with("test content", similarity_threshold=0.6, limit=5) mock_adapter.query.reset_mock() mock_adapter.add.reset_mock() @@ -58,7 +58,7 @@ def test_pdf_search_tool(mock_adapter): result = tool._run(pdf="test.pdf", query="test content") assert "this is a test" in result.lower() mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE) - mock_adapter.query.assert_called_once_with("test content") + mock_adapter.query.assert_called_once_with("test content", similarity_threshold=0.6, limit=5) def test_txt_search_tool(): @@ -82,7 +82,7 @@ def test_docx_search_tool(mock_adapter): result = tool._run(search_query="test content") assert "this is a test" in result.lower() mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX) - mock_adapter.query.assert_called_once_with("test content") + mock_adapter.query.assert_called_once_with("test content", similarity_threshold=0.6, limit=5) mock_adapter.query.reset_mock() mock_adapter.add.reset_mock() @@ -91,7 +91,7 @@ def test_docx_search_tool(mock_adapter): result = tool._run(docx="test.docx", search_query="test content") assert "this is a test" in result.lower() mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX) - mock_adapter.query.assert_called_once_with("test content") + mock_adapter.query.assert_called_once_with("test content", similarity_threshold=0.6, limit=5) def test_json_search_tool(): @@ -114,7 +114,7 @@ def test_xml_search_tool(mock_adapter): result = tool._run(search_query="test XML", xml="test.xml") assert "this is a test" in result.lower() mock_adapter.add.assert_called_once_with("test.xml") - mock_adapter.query.assert_called_once_with("test XML") + mock_adapter.query.assert_called_once_with("test XML", similarity_threshold=0.6, limit=5) def test_csv_search_tool(): @@ -153,8 +153,8 @@ def test_website_search_tool(mock_adapter): tool = WebsiteSearchTool(website=website, adapter=mock_adapter) result = tool._run(search_query=search_query) - mock_adapter.query.assert_called_once_with("what is crewai?") - mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEB_PAGE) + mock_adapter.query.assert_called_once_with("what is crewai?", similarity_threshold=0.6, limit=5) + mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEBSITE) assert "this is a test" in result.lower() @@ -164,8 +164,8 @@ def test_website_search_tool(mock_adapter): tool = WebsiteSearchTool(adapter=mock_adapter) result = tool._run(website=website, search_query=search_query) - mock_adapter.query.assert_called_once_with("what is crewai?") - mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEB_PAGE) + mock_adapter.query.assert_called_once_with("what is crewai?", similarity_threshold=0.6, limit=5) + mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEBSITE) assert "this is a test" in result.lower() @@ -185,7 +185,7 @@ def test_youtube_video_search_tool(mock_adapter): mock_adapter.add.assert_called_once_with( youtube_video_url, data_type=DataType.YOUTUBE_VIDEO ) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) mock_adapter.query.reset_mock() mock_adapter.add.reset_mock() @@ -197,7 +197,7 @@ def test_youtube_video_search_tool(mock_adapter): mock_adapter.add.assert_called_once_with( youtube_video_url, data_type=DataType.YOUTUBE_VIDEO ) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) def test_youtube_channel_search_tool(mock_adapter): @@ -213,7 +213,7 @@ def test_youtube_channel_search_tool(mock_adapter): mock_adapter.add.assert_called_once_with( youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL ) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) mock_adapter.query.reset_mock() mock_adapter.add.reset_mock() @@ -227,7 +227,7 @@ def test_youtube_channel_search_tool(mock_adapter): mock_adapter.add.assert_called_once_with( youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL ) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) def test_code_docs_search_tool(mock_adapter): @@ -239,7 +239,7 @@ def test_code_docs_search_tool(mock_adapter): result = tool._run(search_query=search_query) assert "test documentation" in result mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) mock_adapter.query.reset_mock() mock_adapter.add.reset_mock() @@ -248,7 +248,7 @@ def test_code_docs_search_tool(mock_adapter): result = tool._run(docs_url=docs_url, search_query=search_query) assert "test documentation" in result mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE) - mock_adapter.query.assert_called_once_with(search_query) + mock_adapter.query.assert_called_once_with(search_query, similarity_threshold=0.6, limit=5) def test_github_search_tool(mock_adapter): @@ -264,9 +264,11 @@ def test_github_search_tool(mock_adapter): result = tool._run(search_query="tell me about crewai repo") assert "repo description" in result mock_adapter.add.assert_called_once_with( - "repo:crewai/crewai type:code", data_type="github", loader=ANY + "https://github.com/crewai/crewai", + data_type=DataType.GITHUB, + metadata={"content_types": ["code"], "gh_token": "test_token"} ) - mock_adapter.query.assert_called_once_with("tell me about crewai repo") + mock_adapter.query.assert_called_once_with("tell me about crewai repo", similarity_threshold=0.6, limit=5) # ensure content types provided by run call is used mock_adapter.query.reset_mock() @@ -280,9 +282,11 @@ def test_github_search_tool(mock_adapter): ) assert "repo description" in result mock_adapter.add.assert_called_once_with( - "repo:crewai/crewai type:code,issue", data_type="github", loader=ANY + "https://github.com/crewai/crewai", + data_type=DataType.GITHUB, + metadata={"content_types": ["code", "issue"], "gh_token": "test_token"} ) - mock_adapter.query.assert_called_once_with("tell me about crewai repo") + mock_adapter.query.assert_called_once_with("tell me about crewai repo", similarity_threshold=0.6, limit=5) # ensure default content types are used if not provided mock_adapter.query.reset_mock() @@ -295,9 +299,11 @@ def test_github_search_tool(mock_adapter): ) assert "repo description" in result mock_adapter.add.assert_called_once_with( - "repo:crewai/crewai type:code,repo,pr,issue", data_type="github", loader=ANY + "https://github.com/crewai/crewai", + data_type=DataType.GITHUB, + metadata={"content_types": ["code", "repo", "pr", "issue"], "gh_token": "test_token"} ) - mock_adapter.query.assert_called_once_with("tell me about crewai repo") + mock_adapter.query.assert_called_once_with("tell me about crewai repo", similarity_threshold=0.6, limit=5) # ensure nothing is added if no repo is provided mock_adapter.query.reset_mock() @@ -306,4 +312,4 @@ def test_github_search_tool(mock_adapter): tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter) result = tool._run(search_query="tell me about crewai repo") mock_adapter.add.assert_not_called() - mock_adapter.query.assert_called_once_with("tell me about crewai repo") + mock_adapter.query.assert_called_once_with("tell me about crewai repo", similarity_threshold=0.6, limit=5) From 1bd10bb2546c63e63f32bf1811724c4ba63fb912 Mon Sep 17 00:00:00 2001 From: Ryan Date: Tue, 23 Sep 2025 00:19:56 +0800 Subject: [PATCH 388/391] Add base_url param to exa search tool (#454) --- .../tools/exa_tools/exa_search_tool.py | 13 ++++- tests/tools/exa_search_tool_test.py | 58 ++++++++++++++++--- 2 files changed, 61 insertions(+), 10 deletions(-) diff --git a/src/crewai_tools/tools/exa_tools/exa_search_tool.py b/src/crewai_tools/tools/exa_tools/exa_search_tool.py index 332576039..5dbd1df0b 100644 --- a/src/crewai_tools/tools/exa_tools/exa_search_tool.py +++ b/src/crewai_tools/tools/exa_tools/exa_search_tool.py @@ -43,10 +43,18 @@ class EXASearchTool(BaseTool): description="API key for Exa services", json_schema_extra={"required": False}, ) + base_url: Optional[str] = Field( + default_factory=lambda: os.getenv("EXA_BASE_URL"), + description="API server url", + json_schema_extra={"required": False}, + ) env_vars: List[EnvVar] = [ EnvVar( name="EXA_API_KEY", description="API key for Exa services", required=False ), + EnvVar( + name="EXA_BASE_URL", description="API url for the Exa services", required=False + ), ] def __init__( @@ -73,7 +81,10 @@ class EXASearchTool(BaseTool): raise ImportError( "You are missing the 'exa_py' package. Would you like to install it?" ) - self.client = Exa(api_key=self.api_key) + client_kwargs = {"api_key": self.api_key} + if self.base_url: + client_kwargs["base_url"] = self.base_url + self.client = Exa(**client_kwargs) self.content = content self.summary = summary self.type = type diff --git a/tests/tools/exa_search_tool_test.py b/tests/tools/exa_search_tool_test.py index 17c92e2f4..02e9b4cc2 100644 --- a/tests/tools/exa_search_tool_test.py +++ b/tests/tools/exa_search_tool_test.py @@ -15,18 +15,58 @@ def mock_exa_api_key(): yield def test_exa_search_tool_initialization(): - with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: - api_key = "test_api_key" - tool = EXASearchTool(api_key=api_key) + with patch.dict(os.environ, {}, clear=True): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + api_key = "test_api_key" + tool = EXASearchTool(api_key=api_key) - assert tool.api_key == api_key - assert tool.content is False - assert tool.summary is False - assert tool.type == "auto" - mock_exa_class.assert_called_once_with(api_key=api_key) + assert tool.api_key == api_key + assert tool.content is False + assert tool.summary is False + assert tool.type == "auto" + mock_exa_class.assert_called_once_with(api_key=api_key) def test_exa_search_tool_initialization_with_env(mock_exa_api_key): + with patch.dict(os.environ, {"EXA_API_KEY": "test_key_from_env"}, clear=True): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + EXASearchTool() + mock_exa_class.assert_called_once_with(api_key="test_key_from_env") + + +def test_exa_search_tool_initialization_with_base_url(): + with patch.dict(os.environ, {}, clear=True): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + api_key = "test_api_key" + base_url = "https://custom.exa.api.com" + tool = EXASearchTool(api_key=api_key, base_url=base_url) + + assert tool.api_key == api_key + assert tool.base_url == base_url + assert tool.content is False + assert tool.summary is False + assert tool.type == "auto" + mock_exa_class.assert_called_once_with(api_key=api_key, base_url=base_url) + + +@pytest.fixture +def mock_exa_base_url(): + with patch.dict(os.environ, {"EXA_BASE_URL": "https://env.exa.api.com"}): + yield + + +def test_exa_search_tool_initialization_with_env_base_url(mock_exa_api_key, mock_exa_base_url): with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: EXASearchTool() - mock_exa_class.assert_called_once_with(api_key="test_key_from_env") + mock_exa_class.assert_called_once_with(api_key="test_key_from_env", base_url="https://env.exa.api.com") + + +def test_exa_search_tool_initialization_without_base_url(): + with patch.dict(os.environ, {}, clear=True): + with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class: + api_key = "test_api_key" + tool = EXASearchTool(api_key=api_key) + + assert tool.api_key == api_key + assert tool.base_url is None + mock_exa_class.assert_called_once_with(api_key=api_key) From 7c2aa2f9239e93e7986731a4f94bcd699ac47efa Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Mon, 22 Sep 2025 19:35:08 -0400 Subject: [PATCH 389/391] fix: remove circular deps - Remove embedchain and resolve circular deps with ChromaDB - Adjust lockfile to match crewai requirements - Mock embeddings and vector DB in RAG tool tests --- tests/tools/rag/rag_tool_test.py | 150 ++++++++++++++++++++++++++++--- 1 file changed, 136 insertions(+), 14 deletions(-) diff --git a/tests/tools/rag/rag_tool_test.py b/tests/tools/rag/rag_tool_test.py index 693cd120a..d50d49498 100644 --- a/tests/tools/rag/rag_tool_test.py +++ b/tests/tools/rag/rag_tool_test.py @@ -1,54 +1,176 @@ -from tempfile import TemporaryDirectory -from typing import cast -from pathlib import Path +"""Tests for RAG tool with mocked embeddings and vector database.""" +from tempfile import TemporaryDirectory +from typing import Any, cast +from pathlib import Path +from unittest.mock import Mock, patch, MagicMock + +import pytest from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter from crewai_tools.tools.rag.rag_tool import RagTool -def test_rag_tool_initialization(): +@patch('crewai_tools.adapters.crewai_rag_adapter.get_rag_client') +@patch('crewai_tools.adapters.crewai_rag_adapter.create_client') +def test_rag_tool_initialization( + mock_create_client: Mock, + mock_get_rag_client: Mock +) -> None: """Test that RagTool initializes with CrewAI adapter by default.""" + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_get_rag_client.return_value = mock_client + mock_create_client.return_value = mock_client + class MyTool(RagTool): pass tool = MyTool() assert tool.adapter is not None assert isinstance(tool.adapter, CrewAIRagAdapter) - + adapter = cast(CrewAIRagAdapter, tool.adapter) assert adapter.collection_name == "rag_tool_collection" assert adapter._client is not None -def test_rag_tool_add_and_query(): +@patch('crewai_tools.adapters.crewai_rag_adapter.get_rag_client') +@patch('crewai_tools.adapters.crewai_rag_adapter.create_client') +def test_rag_tool_add_and_query( + mock_create_client: Mock, + mock_get_rag_client: Mock +) -> None: """Test adding content and querying with RagTool.""" + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_client.add_documents = MagicMock(return_value=None) + mock_client.search = MagicMock(return_value=[ + {"content": "The sky is blue on a clear day.", "metadata": {}, "score": 0.9} + ]) + mock_get_rag_client.return_value = mock_client + mock_create_client.return_value = mock_client + class MyTool(RagTool): pass - + tool = MyTool() - + tool.add("The sky is blue on a clear day.") tool.add("Machine learning is a subset of artificial intelligence.") - + + # Verify documents were added + assert mock_client.add_documents.call_count == 2 + result = tool._run(query="What color is the sky?") assert "Relevant Content:" in result - + assert "The sky is blue" in result + + mock_client.search.return_value = [ + {"content": "Machine learning is a subset of artificial intelligence.", "metadata": {}, "score": 0.85} + ] + result = tool._run(query="Tell me about machine learning") assert "Relevant Content:" in result + assert "Machine learning" in result -def test_rag_tool_with_file(): +@patch('crewai_tools.adapters.crewai_rag_adapter.get_rag_client') +@patch('crewai_tools.adapters.crewai_rag_adapter.create_client') +def test_rag_tool_with_file( + mock_create_client: Mock, + mock_get_rag_client: Mock +) -> None: """Test RagTool with file content.""" + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_client.add_documents = MagicMock(return_value=None) + mock_client.search = MagicMock(return_value=[ + {"content": "Python is a programming language known for its simplicity.", "metadata": {"file_path": "test.txt"}, "score": 0.95} + ]) + mock_get_rag_client.return_value = mock_client + mock_create_client.return_value = mock_client + with TemporaryDirectory() as tmpdir: test_file = Path(tmpdir) / "test.txt" test_file.write_text("Python is a programming language known for its simplicity.") - + class MyTool(RagTool): pass - + tool = MyTool() tool.add(str(test_file)) - + + assert mock_client.add_documents.called + result = tool._run(query="What is Python?") assert "Relevant Content:" in result + assert "Python is a programming language" in result + + +@patch('crewai_tools.tools.rag.rag_tool.RagTool._create_embedding_function') +@patch('crewai_tools.adapters.crewai_rag_adapter.create_client') +def test_rag_tool_with_custom_embeddings( + mock_create_client: Mock, + mock_create_embedding: Mock +) -> None: + """Test RagTool with custom embeddings configuration to ensure no API calls.""" + mock_embedding_func = MagicMock() + mock_embedding_func.return_value = [[0.2] * 1536] + mock_create_embedding.return_value = mock_embedding_func + + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_client.add_documents = MagicMock(return_value=None) + mock_client.search = MagicMock(return_value=[ + {"content": "Test content", "metadata": {}, "score": 0.8} + ]) + mock_create_client.return_value = mock_client + + class MyTool(RagTool): + pass + + config = { + "vectordb": { + "provider": "chromadb", + "config": {} + }, + "embedding_model": { + "provider": "openai", + "config": { + "model": "text-embedding-3-small" + } + } + } + + tool = MyTool(config=config) + tool.add("Test content") + + result = tool._run(query="Test query") + assert "Relevant Content:" in result + assert "Test content" in result + + mock_create_embedding.assert_called() + + +@patch('crewai_tools.adapters.crewai_rag_adapter.get_rag_client') +@patch('crewai_tools.adapters.crewai_rag_adapter.create_client') +def test_rag_tool_no_results( + mock_create_client: Mock, + mock_get_rag_client: Mock +) -> None: + """Test RagTool when no relevant content is found.""" + mock_client = MagicMock() + mock_client.get_or_create_collection = MagicMock(return_value=None) + mock_client.search = MagicMock(return_value=[]) + mock_get_rag_client.return_value = mock_client + mock_create_client.return_value = mock_client + + class MyTool(RagTool): + pass + + tool = MyTool() + + result = tool._run(query="Non-existent content") + assert "Relevant Content:" in result + assert "No relevant content found" in result \ No newline at end of file From 2a927933f2ed2d47fe08a9a2eb40ffc1d6c4e850 Mon Sep 17 00:00:00 2001 From: Thiago Moretto <168731+thiagomoretto@users.noreply.github.com> Date: Wed, 24 Sep 2025 13:34:37 -0300 Subject: [PATCH 390/391] Add OPENAI_API_KEY as required by QdrantSearchTool (#461) --- .../tools/qdrant_vector_search_tool/qdrant_search_tool.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py index 73e373ae8..61fd63c8c 100644 --- a/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py +++ b/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py @@ -15,7 +15,7 @@ except ImportError: FieldCondition = Any MatchValue = Any -from crewai.tools import BaseTool +from crewai.tools import BaseTool, EnvVar from pydantic import BaseModel, Field @@ -75,6 +75,9 @@ class QdrantVectorSearchTool(BaseTool): description="A custom embedding function to use for vectorization. If not provided, the default model will be used.", ) package_dependencies: List[str] = ["qdrant-client"] + env_vars: List[EnvVar] = [ + EnvVar(name="OPENAI_API_KEY", description="API key for OpenAI", required=True) + ] def __init__(self, **kwargs): super().__init__(**kwargs) From e350817b8d6cf3ebbe642c23d83e3b4d950daa11 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Thu, 25 Sep 2025 08:00:41 -0400 Subject: [PATCH 391/391] fix: remove debug logging (#462) --- src/crewai_tools/tools/rag/rag_tool.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/crewai_tools/tools/rag/rag_tool.py b/src/crewai_tools/tools/rag/rag_tool.py index 2397eac6f..ff4bc6a10 100644 --- a/src/crewai_tools/tools/rag/rag_tool.py +++ b/src/crewai_tools/tools/rag/rag_tool.py @@ -131,12 +131,9 @@ class RagTool(BaseTool): if api_key: factory_config["api_key"] = api_key - print(f"Creating embedding function with config: {factory_config}") if provider == "chromadb": embedding_func = get_embedding_function(factory_config) - print(f"Created embedding function: {embedding_func}") - print(f"Embedding function type: {type(embedding_func)}") return embedding_func elif provider == "qdrant":