mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-25 16:58:29 +00:00
Compare commits
8 Commits
feat/slidi
...
add-codeql
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65053e3b4a | ||
|
|
498e96a419 | ||
|
|
c0c59dc932 | ||
|
|
f3b3d321e5 | ||
|
|
67e4433dc2 | ||
|
|
4a7ae8df71 | ||
|
|
09f92122d5 | ||
|
|
8118b7b7d6 |
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve CrewAI
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Description**
|
||||
Provide a clear and concise description of what the bug is.
|
||||
|
||||
**Steps to Reproduce**
|
||||
Provide a step-by-step process to reproduce the behavior:
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots/Code snippets**
|
||||
If applicable, add screenshots or code snippets to help explain your problem.
|
||||
|
||||
**Environment Details:**
|
||||
- **Operating System**: [e.g., Ubuntu 20.04, macOS Catalina, Windows 10]
|
||||
- **Python Version**: [e.g., 3.8, 3.9, 3.10]
|
||||
- **crewAI Version**: [e.g., 0.30.11]
|
||||
- **crewAI Tools Version**: [e.g., 0.2.6]
|
||||
|
||||
**Logs**
|
||||
Include relevant logs or error messages if applicable.
|
||||
|
||||
**Possible Solution**
|
||||
Have a solution in mind? Please suggest it here, or write "None".
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest a Feature to improve CrewAI
|
||||
title: "[FEAT]"
|
||||
labels: feature-request, improvement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
If possible attach the Issue related to it
|
||||
|
||||
**Describe the solution you'd like / Use-case**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
93
.github/workflows/codeql.yml
vendored
Normal file
93
.github/workflows/codeql.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '28 20 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
@@ -20,7 +20,7 @@ Before getting started with CrewAI, make sure that you have installed it via pip
|
||||
$ pip install crewai crewai-tools
|
||||
```
|
||||
|
||||
### Virtual Environemnts
|
||||
### Virtual Environments
|
||||
It is highly recommended that you use virtual environments to ensure that your CrewAI project is isolated from other projects and dependencies. Virtual environments provide a clean, separate workspace for each project, preventing conflicts between different versions of packages and libraries. This isolation is crucial for maintaining consistency and reproducibility in your development process. You have multiple options for setting up virtual environments depending on your operating system and Python version:
|
||||
|
||||
1. Use venv (Python's built-in virtual environment tool):
|
||||
|
||||
@@ -7,6 +7,7 @@ description: Comprehensive guide on crafting, using, and managing custom tools w
|
||||
This guide provides detailed instructions on creating custom tools for the crewAI framework and how to efficiently manage and utilize these tools, incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools, enabling agents to perform a wide range of actions.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before creating your own tools, ensure you have the crewAI extra tools package installed:
|
||||
|
||||
```bash
|
||||
@@ -31,7 +32,7 @@ class MyCustomTool(BaseTool):
|
||||
|
||||
### Using the `tool` Decorator
|
||||
|
||||
Alternatively, use the `tool` decorator for a direct approach to create tools. This requires specifying attributes and the tool's logic within a function.
|
||||
Alternatively, you can use the tool decorator `@tool`. This approach allows you to define the tool's attributes and functionality directly within a function, offering a concise and efficient way to create specialized tools tailored to your needs.
|
||||
|
||||
```python
|
||||
from crewai_tools import tool
|
||||
|
||||
@@ -16,7 +16,7 @@ Here's an example of how to force the tool output as the result of an agent's ta
|
||||
# Define a custom tool that returns the result as the answer
|
||||
coding_agent =Agent(
|
||||
role="Data Scientist",
|
||||
goal="Product amazing resports on AI",
|
||||
goal="Product amazing reports on AI",
|
||||
backstory="You work with data and AI",
|
||||
tools=[MyCustomTool(result_as_answer=True)],
|
||||
)
|
||||
|
||||
@@ -6,33 +6,25 @@ description: Comprehensive guide on integrating CrewAI with various Large Langua
|
||||
## Connect CrewAI to LLMs
|
||||
|
||||
!!! note "Default LLM"
|
||||
By default, CrewAI uses OpenAI's GPT-4 model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4o") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
By default, CrewAI uses OpenAI's GPT-4o model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4o") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
By default, CrewAI uses OpenAI's GPT-4 model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
|
||||
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
|
||||
CrewAI provides extensive versatility in integrating with various Language Models (LLMs), including local options through Ollama such as Llama and Mixtral to cloud-based solutions like Azure. Its compatibility extends to all [LangChain LLM components](https://python.langchain.com/v0.2/docs/integrations/llms/), offering a wide range of integration possibilities for customized AI applications.
|
||||
|
||||
## CrewAI Agent Overview
|
||||
The platform supports connections to an array of Generative AI models, including:
|
||||
|
||||
The `Agent` class is the cornerstone for implementing AI solutions in CrewAI. Here's a comprehensive overview of the Agent class attributes and methods:
|
||||
- OpenAI's suite of advanced language models
|
||||
- Anthropic's cutting-edge AI offerings
|
||||
- Ollama's diverse range of locally-hosted generative model & embeddings
|
||||
- LM Studio's diverse range of locally hosted generative models & embeddings
|
||||
- Groq's Super Fast LLM offerings
|
||||
- Azures' generative AI offerings
|
||||
- HuggingFace's generative AI offerings
|
||||
|
||||
- **Attributes**:
|
||||
- `role`: Defines the agent's role within the solution.
|
||||
- `goal`: Specifies the agent's objective.
|
||||
- `backstory`: Provides a background story to the agent.
|
||||
- `cache` *Optional*: Determines whether the agent should use a cache for tool usage. Default is `True`.
|
||||
- `max_rpm` *Optional*: Maximum number of requests per minute the agent's execution should respect. Optional.
|
||||
- `verbose` *Optional*: Enables detailed logging of the agent's execution. Default is `False`.
|
||||
- `allow_delegation` *Optional*: Allows the agent to delegate tasks to other agents, default is `True`.
|
||||
- `tools`: Specifies the tools available to the agent for task execution. Optional.
|
||||
- `max_iter` *Optional*: Maximum number of iterations for an agent to execute a task, default is 25.
|
||||
- `max_execution_time` *Optional*: Maximum execution time for an agent to execute a task. Optional.
|
||||
- `step_callback` *Optional*: Provides a callback function to be executed after each step. Optional.
|
||||
- `llm` *Optional*: Indicates the Large Language Model the agent uses. By default, it uses the GPT-4 model defined in the environment variable "OPENAI_MODEL_NAME".
|
||||
- `function_calling_llm` *Optional* : Will turn the ReAct CrewAI agent into a function-calling agent.
|
||||
- `callbacks` *Optional*: A list of callback functions from the LangChain library that are triggered during the agent's execution process.
|
||||
- `system_template` *Optional*: Optional string to define the system format for the agent.
|
||||
- `prompt_template` *Optional*: Optional string to define the prompt format for the agent.
|
||||
- `response_template` *Optional*: Optional string to define the response format for the agent.
|
||||
This broad spectrum of LLM options enables users to select the most suitable model for their specific needs, whether prioritizing local deployment, specialized capabilities, or cloud-based scalability.
|
||||
|
||||
## Changing the default LLM
|
||||
The default LLM is provided through the `langchain openai` package, which is installed by default when you install CrewAI. You can change this default LLM to a different model or API by setting the `OPENAI_MODEL_NAME` environment variable. This straightforward process allows you to harness the power of different OpenAI models, enhancing the flexibility and capabilities of your CrewAI implementation.
|
||||
```python
|
||||
# Required
|
||||
os.environ["OPENAI_MODEL_NAME"]="gpt-4-0125-preview"
|
||||
@@ -45,30 +37,27 @@ example_agent = Agent(
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
## Ollama Local Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, you will need the `langchain-ollama` package. You can then set the following environment variables to connect to your Ollama instance running locally on port 11434.
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below.
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Environment Variables Configuration**: To integrate Ollama, set the following environment variables:
|
||||
```sh
|
||||
OPENAI_API_BASE='http://localhost:11434'
|
||||
OPENAI_MODEL_NAME='llama2' # Adjust based on available model
|
||||
OPENAI_API_KEY=''
|
||||
os.environ[OPENAI_API_BASE]='http://localhost:11434'
|
||||
os.environ[OPENAI_MODEL_NAME]='llama2' # Adjust based on available model
|
||||
os.environ[OPENAI_API_KEY]='' # No API Key required for Ollama
|
||||
```
|
||||
|
||||
## Ollama Integration (ex. for using Llama 2 locally)
|
||||
1. [Download Ollama](https://ollama.com/download).
|
||||
2. After setting up the Ollama, Pull the Llama2 by typing following lines into the terminal ```ollama pull llama2```.
|
||||
3. Enjoy your free Llama2 model that powered up by excellent agents from crewai.
|
||||
## Ollama Integration Step by Step (ex. for using Llama 3.1 8B locally)
|
||||
1. [Download and install Ollama](https://ollama.com/download).
|
||||
2. After setting up the Ollama, Pull the Llama3.1 8B model by typing following lines into your terminal ```ollama run llama3.1```.
|
||||
3. Llama3.1 should now be served locally on `http://localhost:11434`
|
||||
```
|
||||
from crewai import Agent, Task, Crew
|
||||
from langchain.llms import Ollama
|
||||
from langchain_ollama import ChatOllama
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "NA"
|
||||
|
||||
llm = Ollama(
|
||||
model = "llama2",
|
||||
model = "llama3.1",
|
||||
base_url = "http://localhost:11434")
|
||||
|
||||
general_agent = Agent(role = "Math Professor",
|
||||
@@ -98,13 +87,14 @@ There are a couple of different ways you can use HuggingFace to host your LLM.
|
||||
|
||||
### Your own HuggingFace endpoint
|
||||
```python
|
||||
from langchain_community.llms import HuggingFaceEndpoint
|
||||
from langchain_huggingface import HuggingFaceEndpoint,
|
||||
|
||||
llm = HuggingFaceEndpoint(
|
||||
endpoint_url="<YOUR_ENDPOINT_URL_HERE>",
|
||||
huggingfacehub_api_token="<HF_TOKEN_HERE>",
|
||||
repo_id="microsoft/Phi-3-mini-4k-instruct",
|
||||
task="text-generation",
|
||||
max_new_tokens=512
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
repetition_penalty=1.03,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
@@ -115,66 +105,50 @@ agent = Agent(
|
||||
)
|
||||
```
|
||||
|
||||
### From HuggingFaceHub endpoint
|
||||
```python
|
||||
from langchain_community.llms import HuggingFaceHub
|
||||
|
||||
llm = HuggingFaceHub(
|
||||
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
||||
huggingfacehub_api_token="<HF_TOKEN_HERE>",
|
||||
task="text-generation",
|
||||
)
|
||||
```
|
||||
|
||||
## OpenAI Compatible API Endpoints
|
||||
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, Groq, and Mistral AI.
|
||||
|
||||
### Configuration Examples
|
||||
#### FastChat
|
||||
```sh
|
||||
OPENAI_API_BASE="http://localhost:8001/v1"
|
||||
OPENAI_MODEL_NAME='oh-2.5m7b-q51'
|
||||
OPENAI_API_KEY=NA
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:8001/v1"
|
||||
os.environ[OPENAI_MODEL_NAME]='oh-2.5m7b-q51'
|
||||
os.environ[OPENAI_API_KEY]=NA
|
||||
```
|
||||
|
||||
#### LM Studio
|
||||
Launch [LM Studio](https://lmstudio.ai) and go to the Server tab. Then select a model from the dropdown menu and wait for it to load. Once it's loaded, click the green Start Server button and use the URL, port, and API key that's shown (you can modify them). Below is an example of the default settings as of LM Studio 0.2.19:
|
||||
```sh
|
||||
OPENAI_API_BASE="http://localhost:1234/v1"
|
||||
OPENAI_API_KEY="lm-studio"
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:1234/v1"
|
||||
os.environ[OPENAI_API_KEY]="lm-studio"
|
||||
```
|
||||
|
||||
#### Groq API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-groq-api-key
|
||||
OPENAI_MODEL_NAME='llama3-8b-8192'
|
||||
OPENAI_API_BASE=https://api.groq.com/openai/v1
|
||||
os.environ[OPENAI_API_KEY]=your-groq-api-key
|
||||
os.environ[OPENAI_MODEL_NAME]='llama3-8b-8192'
|
||||
os.environ[OPENAI_API_BASE]=https://api.groq.com/openai/v1
|
||||
```
|
||||
|
||||
#### Mistral API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-mistral-api-key
|
||||
OPENAI_API_BASE=https://api.mistral.ai/v1
|
||||
OPENAI_MODEL_NAME="mistral-small"
|
||||
os.environ[OPENAI_API_KEY]=your-mistral-api-key
|
||||
os.environ[OPENAI_API_BASE]=https://api.mistral.ai/v1
|
||||
os.environ[OPENAI_MODEL_NAME]="mistral-small"
|
||||
```
|
||||
|
||||
### Solar
|
||||
```python
|
||||
```sh
|
||||
from langchain_community.chat_models.solar import SolarChat
|
||||
# Initialize language model
|
||||
os.environ["SOLAR_API_KEY"] = "your-solar-api-key"
|
||||
llm = SolarChat(max_tokens=1024)
|
||||
```
|
||||
```sh
|
||||
os.environ[SOLAR_API_BASE]="https://api.upstage.ai/v1/solar"
|
||||
os.environ[SOLAR_API_KEY]="your-solar-api-key"
|
||||
```
|
||||
|
||||
# Free developer API key available here: https://console.upstage.ai/services/solar
|
||||
# Langchain Example: https://github.com/langchain-ai/langchain/pull/18556
|
||||
```
|
||||
|
||||
### text-gen-web-ui
|
||||
```sh
|
||||
OPENAI_API_BASE=http://localhost:5000/v1
|
||||
OPENAI_MODEL_NAME=NA
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### Cohere
|
||||
```python
|
||||
@@ -190,10 +164,11 @@ llm = ChatCohere()
|
||||
### Azure Open AI Configuration
|
||||
For Azure OpenAI API integration, set the following environment variables:
|
||||
```sh
|
||||
AZURE_OPENAI_VERSION="2022-12-01"
|
||||
AZURE_OPENAI_DEPLOYMENT=""
|
||||
AZURE_OPENAI_ENDPOINT=""
|
||||
AZURE_OPENAI_KEY=""
|
||||
|
||||
os.environ[AZURE_OPENAI_DEPLOYMENT] = "You deployment"
|
||||
os.environ["OPENAI_API_VERSION"] = "2023-12-01-preview"
|
||||
os.environ["AZURE_OPENAI_ENDPOINT"] = "Your Endpoint"
|
||||
os.environ["AZURE_OPENAI_API_KEY"] = "<Your API Key>"
|
||||
```
|
||||
|
||||
### Example Agent with Azure LLM
|
||||
@@ -216,6 +191,5 @@ azure_agent = Agent(
|
||||
llm=azure_llm
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# CodeInterpreterTool
|
||||
|
||||
## Description
|
||||
This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code.
|
||||
This tool enables the Agent to execute Python 3 code that it has generated autonomously. The code is run in a secure, isolated environment, ensuring safety regardless of the content.
|
||||
|
||||
It is incredible useful since it allows the Agent to generate code, run it in the same environment, get the result and use it to make decisions.
|
||||
This functionality is particularly valuable as it allows the Agent to create code, execute it within the same ecosystem, obtain the results, and utilize that information to inform subsequent decisions and actions.
|
||||
|
||||
## Requirements
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Description
|
||||
|
||||
This tools is a wrapper around the composio toolset and gives your agent access to a wide variety of tools from the composio SDK.
|
||||
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the composio SDK.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -19,7 +19,7 @@ after the installation is complete, either run `composio login` or export your c
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a github action:
|
||||
|
||||
1. Initialize toolset
|
||||
1. Initialize Composio tools
|
||||
|
||||
```python
|
||||
from composio import App
|
||||
|
||||
@@ -40,10 +40,9 @@ The `SerperDevTool` comes with several parameters that will be passed to the API
|
||||
- **locale**: Optional. Specify the locale for the search results.
|
||||
- **n_results**: Number of search results to return. Default is `10`.
|
||||
|
||||
The values for `country`, `location`, `lovale` and `search_url` can be found on the [Serper Playground](https://serper.dev/playground).
|
||||
The values for `country`, `location`, `locale` and `search_url` can be found on the [Serper Playground](https://serper.dev/playground).
|
||||
|
||||
## Example with Parameters
|
||||
|
||||
Here is an example demonstrating how to use the tool with additional parameters:
|
||||
|
||||
```python
|
||||
|
||||
@@ -10,24 +10,24 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model_name: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
encoding: tiktoken.Encoding
|
||||
|
||||
def __init__(self, model_name, token_cost_process):
|
||||
self.model_name = model_name
|
||||
self.token_cost_process = token_cost_process
|
||||
try:
|
||||
self.encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
self.encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
if self.token_cost_process is None:
|
||||
return
|
||||
|
||||
for prompt in prompts:
|
||||
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
|
||||
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
||||
self.token_cost_process.sum_completion_tokens(1)
|
||||
|
||||
Reference in New Issue
Block a user