From e066b4dcb1130f9abfc908e6b0dfe089e4afe5f8 Mon Sep 17 00:00:00 2001 From: Selim Erhan <62727953+selimhanerhan@users.noreply.github.com> Date: Fri, 19 Apr 2024 01:39:33 -0400 Subject: [PATCH 1/6] Update LLM-Connections.md (#359) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Created a short documentation on how to use Llama2 locally with crewAI thanks to the help of Ollama. Co-authored-by: João Moura --- docs/how-to/LLM-Connections.md | 66 +++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/docs/how-to/LLM-Connections.md b/docs/how-to/LLM-Connections.md index 54eedaa89..510f2405f 100644 --- a/docs/how-to/LLM-Connections.md +++ b/docs/how-to/LLM-Connections.md @@ -42,7 +42,7 @@ example_agent = Agent( ``` ## Ollama Integration -Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below. Note: Detailed Ollama setup is beyond this document's scope, but general guidance is provided. +Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below. ### Setting Up Ollama - **Environment Variables Configuration**: To integrate Ollama, set the following environment variables: @@ -52,6 +52,70 @@ OPENAI_MODEL_NAME='openhermes' # Adjust based on available model OPENAI_API_KEY='' ``` +## Ollama Integration (ex. for using Llama 2 locally) +1. [Download Ollama](https://ollama.com/download). +2. After setting up the Ollama, Pull the Llama2 by typing following lines into the terminal ```ollama pull Llama2```. +3. Create a ModelFile similar the one below in your project directory. +``` +FROM llama2 + +# Set parameters + +PARAMETER temperature 0.8 +PARAMETER stop Result + +# Sets a custom system message to specify the behavior of the chat assistant + +# Leaving it blank for now. + +SYSTEM """""" +``` +4. Create a script to get the base model, which in our case is llama2, and create a model on top of that with ModelFile above. PS: this will be ".sh" file. +``` +#!/bin/zsh + +# variables +model_name="llama2" +custom_model_name="crewai-llama2" + +#get the base model +ollama pull $model_name + +#create the model file +ollama create $custom_model_name -f ./Llama2ModelFile +``` +5. Go into the directory where the script file and ModelFile is located and run the script. +6. Enjoy your free Llama2 model that powered up by excellent agents from crewai. +``` +from crewai import Agent, Task, Crew +from langchain_openai import ChatOpenAI +import os +os.environ["OPENAI_API_KEY"] = "NA" + +llm = ChatOpenAI( + model = "crewai-llama2", + base_url = "http://localhost:11434/v1") + +general_agent = Agent(role = "Math Professor", + goal = """Provide the solution to the students that are asking mathematical questions and give them the answer.""", + backstory = """You are an excellent math professor that likes to solve math questions in a way that everyone can understand your solution""", + allow_delegation = False, + verbose = True, + llm = llm) +task = Task (description="""what is 3 + 5""", + agent = general_agent) + +crew = Crew( + agents=[general_agent], + tasks=[task], + verbose=2 + ) + +result = crew.kickoff() + +print(result) +``` + ## HuggingFace Integration There are a couple of different ways you can use HuggingFace to host your LLM. From afc616d263590c4feac7f42b1b5fbc3289f6f540 Mon Sep 17 00:00:00 2001 From: Kaushal Powar <90775147+kaushalpowar@users.noreply.github.com> Date: Fri, 19 Apr 2024 11:10:38 +0530 Subject: [PATCH 2/6] Update GitHubSearchTool.md (#357) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GithubSearchTool was misspelled as GitHubSearchTool Co-authored-by: João Moura --- docs/tools/GitHubSearchTool.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/tools/GitHubSearchTool.md b/docs/tools/GitHubSearchTool.md index 707e4c86c..6751da598 100644 --- a/docs/tools/GitHubSearchTool.md +++ b/docs/tools/GitHubSearchTool.md @@ -22,15 +22,15 @@ from crewai_tools import GithubSearchTool # Initialize the tool for semantic searches within a specific GitHub repository tool = GithubSearchTool( - github_repo='https://github.com/example/repo', - content_types=['code', 'issue'] # Options: code, repo, pr, issue + github_repo='https://github.com/example/repo', + content_types=['code', 'issue'] # Options: code, repo, pr, issue ) # OR # Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution tool = GithubSearchTool( - content_types=['code', 'issue'] # Options: code, repo, pr, issue + content_types=['code', 'issue'] # Options: code, repo, pr, issue ) ``` From b0acae81b0a56c9cef942fa52a674f66dd0c3e79 Mon Sep 17 00:00:00 2001 From: Sajal Sharma Date: Fri, 19 Apr 2024 13:41:36 +0800 Subject: [PATCH 3/6] Update LLM-Connections.md (#353) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: João Moura --- docs/how-to/LLM-Connections.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/how-to/LLM-Connections.md b/docs/how-to/LLM-Connections.md index 510f2405f..516388efc 100644 --- a/docs/how-to/LLM-Connections.md +++ b/docs/how-to/LLM-Connections.md @@ -16,8 +16,8 @@ The `Agent` class is the cornerstone for implementing AI solutions in CrewAI. He - `role`: Defines the agent's role within the solution. - `goal`: Specifies the agent's objective. - `backstory`: Provides a background story to the agent. - - `llm`: The language model that will run the agent. By default, it uses the GPT-4 model defined in the environment variable "OPENAI_MODEL_NAME". - - `function_calling_llm`: The language model that will handle the tool calling for this agent, overriding the crew function_calling_llm. Optional. + - `llm`: Indicates the Large Language Model the agent uses. By default, it uses the GPT-4 model defined in the environment variable "OPENAI_MODEL_NAME". + - `function_calling_llm` *Optional*: Will turn the ReAct crewAI agent into a function calling agent. - `max_iter`: Maximum number of iterations for an agent to execute a task, default is 15. - `memory`: Enables the agent to retain information during and a across executions. Default is `False`. - `max_rpm`: Maximum number of requests per minute the agent's execution should respect. Optional. From cdb0a9c9536f29aeef9970292869319611d09235 Mon Sep 17 00:00:00 2001 From: Emmanuel Crown Date: Fri, 19 Apr 2024 01:42:04 -0400 Subject: [PATCH 4/6] Fixed a typo in the main readme on the llm selection , options for an agent (#349) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 40ae9a0f0..09d01c889 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ researcher = Agent( verbose=True, allow_delegation=False, tools=[search_tool] - # You can pass an optional llm attribute specifying what mode you wanna use. + # You can pass an optional llm attribute specifying what model you wanna use. # It can be a local model through Ollama / LM Studio or a remote # model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/) # @@ -253,6 +253,7 @@ CrewAI uses anonymous telemetry to collect usage data with the main purpose of h There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor tools usage, no API calls, nor responses nor any data that is being processed by the agents, nor any secrets and env vars. Data collected includes: + - Version of crewAI - So we can understand how many users are using the latest version - Version of Python From ff76715cd2a1d636410e0dba4852d0ab972930c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elijas=20Dap=C5=A1auskas?= <4084885+Elijas@users.noreply.github.com> Date: Fri, 19 Apr 2024 08:44:08 +0300 Subject: [PATCH 5/6] Allow minor version patches to `python-dotenv` (#339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: João Moura --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f4c3f915f..01436da07 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ instructor = "^0.5.2" regex = "^2023.12.25" crewai-tools = { version = "^0.1.7", optional = true } click = "^8.1.7" -python-dotenv = "1.0.0" +python-dotenv = "^1.0.0" embedchain = "^0.1.98" appdirs = "^1.4.4" From 3d5257592b440fca8c46b77471e1c34c901ee57f Mon Sep 17 00:00:00 2001 From: Braelyn Boynton Date: Sat, 20 Apr 2024 08:20:13 -0700 Subject: [PATCH 6/6] AgentOps Implementation (#411) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implements agentops with a langchain handler, agent tracking and tool call recording * track tool usage * end session after completion * track tool usage time * better tool and llm tracking * code cleanup * make agentops optional * optional dependency usage * remove telemetry code * optional agentops * agentops version bump * remove org key * true dependency * add crew org key to agentops * cleanup * Update pyproject.toml * Revert "true dependency" This reverts commit e52e8e9568c2c00f29c1f800ea00d472413b6067. * Revert "cleanup" This reverts commit 7f5635fb9ef9aaafc4283fdf1a892d8b5c425220. * optional parent key * agentops 0.1.5 * Revert "Revert "cleanup"" This reverts commit cea33d9a5d37289b9e257046c1354057b27fbe7f. * Revert "Revert "true dependency"" This reverts commit 4d1b460b * cleanup * Forcing version 0.1.5 * Update pyproject.toml --------- Co-authored-by: João Moura --- pyproject.toml | 1 + src/crewai/agent.py | 5 +++++ src/crewai/crew.py | 3 +++ src/crewai/tools/tool_usage.py | 4 ++++ 4 files changed, 13 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 01436da07..179c48293 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ click = "^8.1.7" python-dotenv = "^1.0.0" embedchain = "^0.1.98" appdirs = "^1.4.4" +agentops = "0.1.6" [tool.poetry.extras] tools = ["crewai-tools"] diff --git a/src/crewai/agent.py b/src/crewai/agent.py index 12bc785ce..ac080f2d0 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -24,8 +24,10 @@ from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, Tool from crewai.memory.contextual.contextual_memory import ContextualMemory from crewai.utilities import I18N, Logger, Prompts, RPMController from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess +from agentops.agent import track_agent +@track_agent() class Agent(BaseModel): """Represents an agent in a system. @@ -55,6 +57,8 @@ class Agent(BaseModel): _rpm_controller: RPMController = PrivateAttr(default=None) _request_within_rpm_limit: Any = PrivateAttr(default=None) _token_process: TokenProcess = TokenProcess() + agent_ops_agent_name: str = None + agent_ops_agent_id: str = None formatting_errors: int = 0 model_config = ConfigDict(arbitrary_types_allowed=True) @@ -129,6 +133,7 @@ class Agent(BaseModel): def __init__(__pydantic_self__, **data): config = data.pop("config", {}) super().__init__(**config, **data) + __pydantic_self__.agent_ops_agent_name = __pydantic_self__.role @field_validator("id", mode="before") @classmethod diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 0b88d5e4b..6ab6e61c9 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -26,6 +26,7 @@ from crewai.task import Task from crewai.telemetry import Telemetry from crewai.tools.agent_tools import AgentTools from crewai.utilities import I18N, FileHandler, Logger, RPMController +import agentops class Crew(BaseModel): @@ -239,6 +240,7 @@ class Crew(BaseModel): self._set_tasks_callbacks() i18n = I18N(language=self.language, language_file=self.language_file) + agentops.set_parent_key("daebe730-f54d-4af5-98df-e6946fb76d13") for agent in self.agents: agent.i18n = i18n @@ -374,6 +376,7 @@ class Crew(BaseModel): def _finish_execution(self, output) -> None: if self.max_rpm: self._rpm_controller.stop_rpm_counter() + agentops.end_session(end_state="Success", end_state_reason="Finished Execution") self._telemetry.end_crew(self, output) def __repr__(self): diff --git a/src/crewai/tools/tool_usage.py b/src/crewai/tools/tool_usage.py index 954572328..b11f4ec2a 100644 --- a/src/crewai/tools/tool_usage.py +++ b/src/crewai/tools/tool_usage.py @@ -9,6 +9,7 @@ from crewai.agents.tools_handler import ToolsHandler from crewai.telemetry import Telemetry from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling from crewai.utilities import I18N, Converter, ConverterError, Printer +import agentops OPENAI_BIGGER_MODELS = ["gpt-4"] @@ -96,6 +97,7 @@ class ToolUsage: tool: BaseTool, calling: Union[ToolCalling, InstructorToolCalling], ) -> None: + tool_event = agentops.ToolEvent(name=calling.tool_name) if self._check_tool_repeated_usage(calling=calling): try: result = self._i18n.errors("task_repeated_usage").format( @@ -159,6 +161,7 @@ class ToolUsage: self._printer.print(content=f"\n\n{error_message}\n", color="red") return error self.task.increment_tools_errors() + agentops.record(agentops.ErrorEvent(details=e, trigger_event=tool_event)) return self.use(calling=calling, tool_string=tool_string) if self.tools_handler: @@ -179,6 +182,7 @@ class ToolUsage: ) self._printer.print(content=f"\n\n{result}\n", color="purple") + agentops.record(tool_event) self._telemetry.tool_usage( llm=self.function_calling_llm, tool_name=tool.name,