mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-12 17:48:30 +00:00
Merge remote-tracking branch 'refs/remotes/upstream/main' into undo-agentops-api-check
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.pipeline import Pipeline
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
|
||||
__all__ = ["Agent", "Crew", "Process", "Task"]
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline"]
|
||||
|
||||
@@ -19,18 +19,28 @@ from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_F
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" already defined on line 21
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
|
||||
def track_agent():
|
||||
def mock_agent_ops_provider():
|
||||
def track_agent(*args, **kwargs):
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
return track_agent
|
||||
|
||||
|
||||
agentops = None
|
||||
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" already defined on line 21
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
else:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
|
||||
|
||||
@track_agent()
|
||||
class Agent(BaseAgent):
|
||||
@@ -103,40 +113,46 @@ class Agent(BaseAgent):
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
)
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
__pydantic_self__.agent_ops_agent_name = __pydantic_self__.role
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_agent_executor(self) -> "Agent":
|
||||
"""Ensure agent executor and token process are set."""
|
||||
if hasattr(self.llm, "model_name"):
|
||||
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
# Ensure self.llm.callbacks is a list
|
||||
if not isinstance(self.llm.callbacks, list):
|
||||
self.llm.callbacks = []
|
||||
# Different llms store the model name in different attributes
|
||||
model_name = getattr(self.llm, "model_name", None) or getattr(
|
||||
self.llm, "deployment_name", None
|
||||
)
|
||||
|
||||
# Check if an instance of TokenCalcHandler already exists in the list
|
||||
if not any(
|
||||
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||
):
|
||||
self.llm.callbacks.append(token_handler)
|
||||
|
||||
if agentops and not any(
|
||||
isinstance(handler, agentops.LangchainCallbackHandler)
|
||||
for handler in self.llm.callbacks
|
||||
):
|
||||
agentops.stop_instrumenting()
|
||||
self.llm.callbacks.append(agentops.LangchainCallbackHandler())
|
||||
if model_name:
|
||||
self._setup_llm_callbacks(model_name)
|
||||
|
||||
if not self.agent_executor:
|
||||
if not self.cache_handler:
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
self._setup_agent_executor()
|
||||
|
||||
return self
|
||||
|
||||
def _setup_llm_callbacks(self, model_name: str):
|
||||
token_handler = TokenCalcHandler(model_name, self._token_process)
|
||||
|
||||
if not isinstance(self.llm.callbacks, list):
|
||||
self.llm.callbacks = []
|
||||
|
||||
if not any(
|
||||
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||
):
|
||||
self.llm.callbacks.append(token_handler)
|
||||
|
||||
if agentops and not any(
|
||||
isinstance(handler, agentops.LangchainCallbackHandler)
|
||||
for handler in self.llm.callbacks
|
||||
):
|
||||
agentops.stop_instrumenting()
|
||||
self.llm.callbacks.append(agentops.LangchainCallbackHandler())
|
||||
|
||||
def _setup_agent_executor(self):
|
||||
if not self.cache_handler:
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
@@ -203,7 +219,7 @@ class Agent(BaseAgent):
|
||||
raise e
|
||||
result = self.execute_task(task, context, tools)
|
||||
|
||||
if self.max_rpm:
|
||||
if self.max_rpm and self._rpm_controller:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
# If there was any tool in self.tools_results that had result_as_answer
|
||||
|
||||
@@ -2,3 +2,5 @@ from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
__all__ = ["CacheHandler", "CrewAgentExecutor", "CrewAgentParser", "ToolsHandler"]
|
||||
|
||||
@@ -7,7 +7,6 @@ from typing import Any, Dict, List, Optional, TypeVar
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
@@ -20,6 +19,7 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
|
||||
@@ -74,21 +74,26 @@ class BaseAgent(ABC, BaseModel):
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
formatting_errors: int = 0
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
_original_role: Optional[str] = PrivateAttr(default=None)
|
||||
_original_goal: Optional[str] = PrivateAttr(default=None)
|
||||
_original_backstory: Optional[str] = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
formatting_errors: int = Field(
|
||||
default=0, description="Number of formatting errors."
|
||||
)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent", default=None, exclude=True
|
||||
)
|
||||
cache: bool = Field(
|
||||
default=True, description="Whether the agent should use a cache for tool usage."
|
||||
)
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent", default=None
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
@@ -123,20 +128,29 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
|
||||
_original_role: str | None = None
|
||||
_original_goal: str | None = None
|
||||
_original_backstory: str | None = None
|
||||
_token_process: TokenProcess = TokenProcess()
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def process_model_config(cls, values):
|
||||
return process_config(values, cls)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_config_attributes(self):
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
def validate_and_set_attributes(self):
|
||||
# Validate required fields
|
||||
for field in ["role", "goal", "backstory"]:
|
||||
if getattr(self, field) is None:
|
||||
raise ValueError(
|
||||
f"{field} must be provided either directly or through config"
|
||||
)
|
||||
|
||||
# Set private attributes
|
||||
self._logger = Logger(verbose=self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
max_rpm=self.max_rpm, logger=self._logger
|
||||
)
|
||||
if not self._token_process:
|
||||
self._token_process = TokenProcess()
|
||||
|
||||
return self
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@@ -147,14 +161,6 @@ class BaseAgent(ABC, BaseModel):
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "BaseAgent":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
@@ -170,7 +176,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
@property
|
||||
def key(self):
|
||||
source = [self.role, self.goal, self.backstory]
|
||||
return md5("|".join(source).encode()).hexdigest()
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
|
||||
@abstractmethod
|
||||
def execute_task(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
@@ -18,10 +18,10 @@ class TokenProcess:
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
def get_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"total_tokens": self.total_tokens,
|
||||
"prompt_tokens": self.prompt_tokens,
|
||||
"completion_tokens": self.completion_tokens,
|
||||
"successful_requests": self.successful_requests,
|
||||
}
|
||||
def get_summary(self) -> UsageMetrics:
|
||||
return UsageMetrics(
|
||||
total_tokens=self.total_tokens,
|
||||
prompt_tokens=self.prompt_tokens,
|
||||
completion_tokens=self.completion_tokens,
|
||||
successful_requests=self.successful_requests,
|
||||
)
|
||||
|
||||
2
src/crewai/agents/cache/__init__.py
vendored
2
src/crewai/agents/cache/__init__.py
vendored
@@ -1 +1,3 @@
|
||||
from .cache_handler import CacheHandler
|
||||
|
||||
__all__ = ["CacheHandler"]
|
||||
|
||||
11
src/crewai/agents/cache/cache_handler.py
vendored
11
src/crewai/agents/cache/cache_handler.py
vendored
@@ -1,13 +1,12 @@
|
||||
from typing import Optional
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
|
||||
|
||||
class CacheHandler:
|
||||
class CacheHandler(BaseModel):
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
_cache: dict = {}
|
||||
|
||||
def __init__(self):
|
||||
self._cache = {}
|
||||
_cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
|
||||
|
||||
def add(self, tool, input, output):
|
||||
self._cache[f"{tool}-{input}"] = output
|
||||
|
||||
@@ -1,33 +1,29 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
|
||||
import click
|
||||
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import ExceptionTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
@@ -69,7 +65,7 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
)
|
||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||
# Allowing human input given task setting
|
||||
if self.task.human_input:
|
||||
if self.task and self.task.human_input:
|
||||
self.should_ask_for_human_input = True
|
||||
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
@@ -213,11 +209,7 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
yield step
|
||||
return
|
||||
|
||||
yield AgentStep(
|
||||
action=AgentAction("_Exception", str(e), str(e)),
|
||||
observation=str(e),
|
||||
)
|
||||
return
|
||||
raise e
|
||||
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
|
||||
3
src/crewai/cli/authentication/__init__.py
Normal file
3
src/crewai/cli/authentication/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .main import AuthenticationCommand
|
||||
|
||||
__all__ = ["AuthenticationCommand"]
|
||||
4
src/crewai/cli/authentication/constants.py
Normal file
4
src/crewai/cli/authentication/constants.py
Normal file
@@ -0,0 +1,4 @@
|
||||
ALGORITHMS = ["RS256"]
|
||||
AUTH0_DOMAIN = "dev-jzsr0j8zs0atl5ha.us.auth0.com"
|
||||
AUTH0_CLIENT_ID = "CZtyRHuVW80HbLSjk4ggXNzjg4KAt7Oe"
|
||||
AUTH0_AUDIENCE = "https://dev-jzsr0j8zs0atl5ha.us.auth0.com/api/v2/"
|
||||
75
src/crewai/cli/authentication/main.py
Normal file
75
src/crewai/cli/authentication/main.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import time
|
||||
import webbrowser
|
||||
from typing import Any, Dict
|
||||
|
||||
import requests
|
||||
from rich.console import Console
|
||||
|
||||
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
from .utils import TokenManager, validate_token
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class AuthenticationCommand:
|
||||
DEVICE_CODE_URL = f"https://{AUTH0_DOMAIN}/oauth/device/code"
|
||||
TOKEN_URL = f"https://{AUTH0_DOMAIN}/oauth/token"
|
||||
|
||||
def __init__(self):
|
||||
self.token_manager = TokenManager()
|
||||
|
||||
def signup(self) -> None:
|
||||
"""Sign up to CrewAI+"""
|
||||
console.print("Signing Up to CrewAI+ \n", style="bold blue")
|
||||
device_code_data = self._get_device_code()
|
||||
self._display_auth_instructions(device_code_data)
|
||||
|
||||
return self._poll_for_token(device_code_data)
|
||||
|
||||
def _get_device_code(self) -> Dict[str, Any]:
|
||||
"""Get the device code to authenticate the user."""
|
||||
|
||||
device_code_payload = {
|
||||
"client_id": AUTH0_CLIENT_ID,
|
||||
"scope": "openid",
|
||||
"audience": AUTH0_AUDIENCE,
|
||||
}
|
||||
response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def _display_auth_instructions(self, device_code_data: Dict[str, str]) -> None:
|
||||
"""Display the authentication instructions to the user."""
|
||||
console.print("1. Navigate to: ", device_code_data["verification_uri_complete"])
|
||||
console.print("2. Enter the following code: ", device_code_data["user_code"])
|
||||
webbrowser.open(device_code_data["verification_uri_complete"])
|
||||
|
||||
def _poll_for_token(self, device_code_data: Dict[str, Any]) -> None:
|
||||
"""Poll the server for the token."""
|
||||
token_payload = {
|
||||
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
|
||||
"device_code": device_code_data["device_code"],
|
||||
"client_id": AUTH0_CLIENT_ID,
|
||||
}
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 5:
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload)
|
||||
token_data = response.json()
|
||||
|
||||
if response.status_code == 200:
|
||||
validate_token(token_data["id_token"])
|
||||
expires_in = 360000 # Token expiration time in seconds
|
||||
self.token_manager.save_tokens(token_data["access_token"], expires_in)
|
||||
console.print("\nWelcome to CrewAI+ !!", style="green")
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
raise requests.HTTPError(token_data["error_description"])
|
||||
|
||||
time.sleep(device_code_data["interval"])
|
||||
attempts += 1
|
||||
|
||||
console.print(
|
||||
"Timeout: Failed to get the token. Please try again.", style="bold red"
|
||||
)
|
||||
144
src/crewai/cli/authentication/utils.py
Normal file
144
src/crewai/cli/authentication/utils.py
Normal file
@@ -0,0 +1,144 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from auth0.authentication.token_verifier import (
|
||||
AsymmetricSignatureVerifier,
|
||||
TokenVerifier,
|
||||
)
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
from .constants import AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
|
||||
|
||||
def validate_token(id_token: str) -> None:
|
||||
"""
|
||||
Verify the token and its precedence
|
||||
|
||||
:param id_token:
|
||||
"""
|
||||
jwks_url = f"https://{AUTH0_DOMAIN}/.well-known/jwks.json"
|
||||
issuer = f"https://{AUTH0_DOMAIN}/"
|
||||
signature_verifier = AsymmetricSignatureVerifier(jwks_url)
|
||||
token_verifier = TokenVerifier(
|
||||
signature_verifier=signature_verifier, issuer=issuer, audience=AUTH0_CLIENT_ID
|
||||
)
|
||||
token_verifier.verify(id_token)
|
||||
|
||||
|
||||
class TokenManager:
|
||||
def __init__(self, file_path: str = "tokens.enc") -> None:
|
||||
"""
|
||||
Initialize the TokenManager class.
|
||||
|
||||
:param file_path: The file path to store the encrypted tokens. Default is "tokens.enc".
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.key = self._get_or_create_key()
|
||||
self.fernet = Fernet(self.key)
|
||||
|
||||
def _get_or_create_key(self) -> bytes:
|
||||
"""
|
||||
Get or create the encryption key.
|
||||
|
||||
:return: The encryption key.
|
||||
"""
|
||||
key_filename = "secret.key"
|
||||
key = self.read_secure_file(key_filename)
|
||||
|
||||
if key is not None:
|
||||
return key
|
||||
|
||||
new_key = Fernet.generate_key()
|
||||
self.save_secure_file(key_filename, new_key)
|
||||
return new_key
|
||||
|
||||
def save_tokens(self, access_token: str, expires_in: int) -> None:
|
||||
"""
|
||||
Save the access token and its expiration time.
|
||||
|
||||
:param access_token: The access token to save.
|
||||
:param expires_in: The expiration time of the access token in seconds.
|
||||
"""
|
||||
expiration_time = datetime.now() + timedelta(seconds=expires_in)
|
||||
data = {
|
||||
"access_token": access_token,
|
||||
"expiration": expiration_time.isoformat(),
|
||||
}
|
||||
encrypted_data = self.fernet.encrypt(json.dumps(data).encode())
|
||||
self.save_secure_file(self.file_path, encrypted_data)
|
||||
|
||||
def get_token(self) -> Optional[str]:
|
||||
"""
|
||||
Get the access token if it is valid and not expired.
|
||||
|
||||
:return: The access token if valid and not expired, otherwise None.
|
||||
"""
|
||||
encrypted_data = self.read_secure_file(self.file_path)
|
||||
|
||||
decrypted_data = self.fernet.decrypt(encrypted_data)
|
||||
data = json.loads(decrypted_data)
|
||||
|
||||
expiration = datetime.fromisoformat(data["expiration"])
|
||||
if expiration <= datetime.now():
|
||||
return None
|
||||
|
||||
return data["access_token"]
|
||||
|
||||
def get_secure_storage_path(self) -> Path:
|
||||
"""
|
||||
Get the secure storage path based on the operating system.
|
||||
|
||||
:return: The secure storage path.
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# Windows: Use %LOCALAPPDATA%
|
||||
base_path = os.environ.get("LOCALAPPDATA")
|
||||
elif sys.platform == "darwin":
|
||||
# macOS: Use ~/Library/Application Support
|
||||
base_path = os.path.expanduser("~/Library/Application Support")
|
||||
else:
|
||||
# Linux and other Unix-like: Use ~/.local/share
|
||||
base_path = os.path.expanduser("~/.local/share")
|
||||
|
||||
app_name = "crewai/credentials"
|
||||
storage_path = Path(base_path) / app_name
|
||||
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return storage_path
|
||||
|
||||
def save_secure_file(self, filename: str, content: bytes) -> None:
|
||||
"""
|
||||
Save the content to a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:param content: The content to save.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(content)
|
||||
|
||||
# Set appropriate permissions (read/write for owner only)
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
def read_secure_file(self, filename: str) -> Optional[bytes]:
|
||||
"""
|
||||
Read the content of a secure file.
|
||||
|
||||
:param filename: The name of the file.
|
||||
:return: The content of the file if it exists, otherwise None.
|
||||
"""
|
||||
storage_path = self.get_secure_storage_path()
|
||||
file_path = storage_path / filename
|
||||
|
||||
if not file_path.exists():
|
||||
return None
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
return f.read()
|
||||
@@ -1,12 +1,18 @@
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
import pkg_resources
|
||||
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_pipeline import create_pipeline
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
|
||||
from .create_crew import create_crew
|
||||
from .authentication.main import AuthenticationCommand
|
||||
from .deploy.main import DeployCommand
|
||||
from .evaluate_crew import evaluate_crew
|
||||
from .install_crew import install_crew
|
||||
from .replay_from_task import replay_task_command
|
||||
from .reset_memories_command import reset_memories_command
|
||||
from .run_crew import run_crew
|
||||
@@ -19,10 +25,19 @@ def crewai():
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.argument("project_name")
|
||||
def create(project_name):
|
||||
"""Create a new crew."""
|
||||
create_crew(project_name)
|
||||
@click.argument("type", type=click.Choice(["crew", "pipeline"]))
|
||||
@click.argument("name")
|
||||
@click.option(
|
||||
"--router", is_flag=True, help="Create a pipeline with router functionality"
|
||||
)
|
||||
def create(type, name, router):
|
||||
"""Create a new crew or pipeline."""
|
||||
if type == "crew":
|
||||
create_crew(name)
|
||||
elif type == "pipeline":
|
||||
create_pipeline(name, router)
|
||||
else:
|
||||
click.secho("Error: Invalid type. Must be 'crew' or 'pipeline'.", fg="red")
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -50,10 +65,17 @@ def version(tools):
|
||||
default=5,
|
||||
help="Number of iterations to train the crew",
|
||||
)
|
||||
def train(n_iterations: int):
|
||||
@click.option(
|
||||
"-f",
|
||||
"--filename",
|
||||
type=str,
|
||||
default="trained_agents_data.pkl",
|
||||
help="Path to a custom file for training",
|
||||
)
|
||||
def train(n_iterations: int, filename: str):
|
||||
"""Train the crew."""
|
||||
click.echo(f"Training the crew for {n_iterations} iterations")
|
||||
train_crew(n_iterations)
|
||||
click.echo(f"Training the Crew for {n_iterations} iterations")
|
||||
train_crew(n_iterations, filename)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -148,12 +170,83 @@ def test(n_iterations: int, model: str):
|
||||
evaluate_crew(n_iterations, model)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def install():
|
||||
"""Install the Crew."""
|
||||
install_crew()
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def run():
|
||||
"""Run the crew."""
|
||||
click.echo("Running the crew")
|
||||
"""Run the Crew."""
|
||||
click.echo("Running the Crew")
|
||||
run_crew()
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def signup():
|
||||
"""Sign Up/Login to CrewAI+."""
|
||||
AuthenticationCommand().signup()
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def login():
|
||||
"""Sign Up/Login to CrewAI+."""
|
||||
AuthenticationCommand().signup()
|
||||
|
||||
|
||||
# DEPLOY CREWAI+ COMMANDS
|
||||
@crewai.group()
|
||||
def deploy():
|
||||
"""Deploy the Crew CLI group."""
|
||||
pass
|
||||
|
||||
|
||||
@deploy.command(name="create")
|
||||
def deploy_create():
|
||||
"""Create a Crew deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.create_crew()
|
||||
|
||||
|
||||
@deploy.command(name="list")
|
||||
def deploy_list():
|
||||
"""List all deployments."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.list_crews()
|
||||
|
||||
|
||||
@deploy.command(name="push")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_push(uuid: Optional[str]):
|
||||
"""Deploy the Crew."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.deploy(uuid=uuid)
|
||||
|
||||
|
||||
@deploy.command(name="status")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deply_status(uuid: Optional[str]):
|
||||
"""Get the status of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_status(uuid=uuid)
|
||||
|
||||
|
||||
@deploy.command(name="logs")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_logs(uuid: Optional[str]):
|
||||
"""Get the logs of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_logs(uuid=uuid)
|
||||
|
||||
|
||||
@deploy.command(name="remove")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_remove(uuid: Optional[str]):
|
||||
"""Remove a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.remove_crew(uuid=uuid)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
|
||||
@@ -1,25 +1,35 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import copy_template
|
||||
|
||||
def create_crew(name):
|
||||
|
||||
def create_crew(name, parent_folder=None):
|
||||
"""Create a new crew."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating folder {folder_name}...", fg="green", bold=True)
|
||||
if parent_folder:
|
||||
folder_path = Path(parent_folder) / folder_name
|
||||
else:
|
||||
folder_path = Path(folder_name)
|
||||
|
||||
if not os.path.exists(folder_name):
|
||||
os.mkdir(folder_name)
|
||||
os.mkdir(folder_name + "/tests")
|
||||
os.mkdir(folder_name + "/src")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}/tools")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}/config")
|
||||
with open(folder_name + "/.env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
|
||||
if not folder_path.exists():
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
with open(folder_path / ".env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
else:
|
||||
click.secho(
|
||||
f"\tFolder {folder_name} already exists. Please choose a different name.",
|
||||
@@ -28,53 +38,34 @@ def create_crew(name):
|
||||
return
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates"
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [
|
||||
".gitignore",
|
||||
"pyproject.toml",
|
||||
"README.md",
|
||||
]
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
src_template_files = ["__init__.py", "main.py", "crew.py"]
|
||||
src_template_files = (
|
||||
["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"]
|
||||
)
|
||||
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / file_name
|
||||
dst_file = folder_path / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
src_folder = folder_path / "src" / folder_name if not parent_folder else folder_path
|
||||
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
dst_file = src_folder / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in config_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
if not parent_folder:
|
||||
for file_name in tools_template_files + config_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = src_folder / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
click.secho(f"Crew {name} created successfully!", fg="green", bold=True)
|
||||
|
||||
|
||||
def copy_template(src, dst, name, class_name, folder_name):
|
||||
"""Copy a file from src to dst."""
|
||||
with open(src, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Interpolate the content
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
|
||||
# Write the interpolated content to the new file
|
||||
with open(dst, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
click.secho(f" - Created {dst}", fg="green")
|
||||
|
||||
107
src/crewai/cli/create_pipeline.py
Normal file
107
src/crewai/cli/create_pipeline.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def create_pipeline(name, router=False):
|
||||
"""Create a new pipeline project."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating pipeline {folder_name}...", fg="green", bold=True)
|
||||
|
||||
project_root = Path(folder_name)
|
||||
if project_root.exists():
|
||||
click.secho(f"Error: Folder {folder_name} already exists.", fg="red")
|
||||
return
|
||||
|
||||
# Create directory structure
|
||||
(project_root / "src" / folder_name).mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "pipelines").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "crews").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(project_root / "tests").mkdir(exist_ok=True)
|
||||
|
||||
# Create .env file
|
||||
with open(project_root / ".env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
template_folder = "pipeline_router" if router else "pipeline"
|
||||
templates_dir = package_dir / "templates" / template_folder
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
|
||||
src_template_files = ["__init__.py", "main.py"]
|
||||
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
|
||||
|
||||
if router:
|
||||
crew_folders = [
|
||||
"classifier_crew",
|
||||
"normal_crew",
|
||||
"urgent_crew",
|
||||
]
|
||||
pipelines_folders = [
|
||||
"pipelines/__init__.py",
|
||||
"pipelines/pipeline_classifier.py",
|
||||
"pipelines/pipeline_normal.py",
|
||||
"pipelines/pipeline_urgent.py",
|
||||
]
|
||||
else:
|
||||
crew_folders = [
|
||||
"research_crew",
|
||||
"write_linkedin_crew",
|
||||
"write_x_crew",
|
||||
]
|
||||
pipelines_folders = ["pipelines/__init__.py", "pipelines/pipeline.py"]
|
||||
|
||||
def process_file(src_file, dst_file):
|
||||
with open(src_file, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
content = content.replace("{{pipeline_name}}", class_name)
|
||||
|
||||
with open(dst_file, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
# Copy and process root template files
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy and process src template files
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy tools files
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
shutil.copy(src_file, dst_file)
|
||||
|
||||
# Copy pipelines folders
|
||||
for file_name in pipelines_folders:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy crew folders
|
||||
for crew_folder in crew_folders:
|
||||
src_crew_folder = templates_dir / "crews" / crew_folder
|
||||
dst_crew_folder = project_root / "src" / folder_name / "crews" / crew_folder
|
||||
if src_crew_folder.exists():
|
||||
shutil.copytree(src_crew_folder, dst_crew_folder)
|
||||
else:
|
||||
click.secho(
|
||||
f"Warning: Crew folder {crew_folder} not found in template.",
|
||||
fg="yellow",
|
||||
)
|
||||
|
||||
click.secho(f"Pipeline {name} created successfully!", fg="green", bold=True)
|
||||
66
src/crewai/cli/deploy/api.py
Normal file
66
src/crewai/cli/deploy/api.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from os import getenv
|
||||
|
||||
import requests
|
||||
|
||||
from crewai.cli.deploy.utils import get_crewai_version
|
||||
|
||||
|
||||
class CrewAPI:
|
||||
"""
|
||||
CrewAPI class to interact with the crewAI+ API.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
self.api_key = api_key
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
|
||||
}
|
||||
self.base_url = getenv(
|
||||
"CREWAI_BASE_URL", "https://dev.crewai.com/crewai_plus/api/v1/crews"
|
||||
)
|
||||
|
||||
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
||||
url = f"{self.base_url}/{endpoint}"
|
||||
return requests.request(method, url, headers=self.headers, **kwargs)
|
||||
|
||||
# Deploy
|
||||
def deploy_by_name(self, project_name: str) -> requests.Response:
|
||||
return self._make_request("POST", f"by-name/{project_name}/deploy")
|
||||
|
||||
def deploy_by_uuid(self, uuid: str) -> requests.Response:
|
||||
return self._make_request("POST", f"{uuid}/deploy")
|
||||
|
||||
# Status
|
||||
def status_by_name(self, project_name: str) -> requests.Response:
|
||||
return self._make_request("GET", f"by-name/{project_name}/status")
|
||||
|
||||
def status_by_uuid(self, uuid: str) -> requests.Response:
|
||||
return self._make_request("GET", f"{uuid}/status")
|
||||
|
||||
# Logs
|
||||
def logs_by_name(
|
||||
self, project_name: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
return self._make_request("GET", f"by-name/{project_name}/logs/{log_type}")
|
||||
|
||||
def logs_by_uuid(
|
||||
self, uuid: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
return self._make_request("GET", f"{uuid}/logs/{log_type}")
|
||||
|
||||
# Delete
|
||||
def delete_by_name(self, project_name: str) -> requests.Response:
|
||||
return self._make_request("DELETE", f"by-name/{project_name}")
|
||||
|
||||
def delete_by_uuid(self, uuid: str) -> requests.Response:
|
||||
return self._make_request("DELETE", f"{uuid}")
|
||||
|
||||
# List
|
||||
def list_crews(self) -> requests.Response:
|
||||
return self._make_request("GET", "")
|
||||
|
||||
# Create
|
||||
def create_crew(self, payload) -> requests.Response:
|
||||
return self._make_request("POST", "", json=payload)
|
||||
312
src/crewai/cli/deploy/main.py
Normal file
312
src/crewai/cli/deploy/main.py
Normal file
@@ -0,0 +1,312 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.telemetry import Telemetry
|
||||
from .api import CrewAPI
|
||||
from .utils import (
|
||||
fetch_and_json_env_file,
|
||||
get_auth_token,
|
||||
get_git_remote_url,
|
||||
get_project_name,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class DeployCommand:
|
||||
"""
|
||||
A class to handle deployment-related operations for CrewAI projects.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the DeployCommand with project name and API client.
|
||||
"""
|
||||
try:
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
access_token = get_auth_token()
|
||||
except Exception:
|
||||
self._deploy_signup_error_span = self._telemetry.deploy_signup_error_span(
|
||||
self
|
||||
)
|
||||
console.print(
|
||||
"Please sign up/login to CrewAI+ before using the CLI.",
|
||||
style="bold red",
|
||||
)
|
||||
console.print("Run 'crewai signup' to sign up/login.", style="bold green")
|
||||
raise SystemExit
|
||||
|
||||
self.project_name = get_project_name()
|
||||
if self.project_name is None:
|
||||
console.print("No project name found. Please ensure your project has a valid pyproject.toml file.", style="bold red")
|
||||
raise SystemExit
|
||||
|
||||
self.client = CrewAPI(api_key=access_token)
|
||||
|
||||
def _handle_error(self, json_response: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Handle and display error messages from API responses.
|
||||
|
||||
Args:
|
||||
json_response (Dict[str, Any]): The JSON response containing error information.
|
||||
"""
|
||||
error = json_response.get("error", "Unknown error")
|
||||
message = json_response.get("message", "No message provided")
|
||||
console.print(f"Error: {error}", style="bold red")
|
||||
console.print(f"Message: {message}", style="bold red")
|
||||
|
||||
def _standard_no_param_error_message(self) -> None:
|
||||
"""
|
||||
Display a standard error message when no UUID or project name is available.
|
||||
"""
|
||||
console.print(
|
||||
"No UUID provided, project pyproject.toml not found or with error.",
|
||||
style="bold red",
|
||||
)
|
||||
|
||||
def _display_deployment_info(self, json_response: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Display deployment information.
|
||||
|
||||
Args:
|
||||
json_response (Dict[str, Any]): The deployment information to display.
|
||||
"""
|
||||
console.print("Deploying the crew...\n", style="bold blue")
|
||||
for key, value in json_response.items():
|
||||
console.print(f"{key.title()}: [green]{value}[/green]")
|
||||
console.print("\nTo check the status of the deployment, run:")
|
||||
console.print("crewai deploy status")
|
||||
console.print(" or")
|
||||
console.print(f"crewai deploy status --uuid \"{json_response['uuid']}\"")
|
||||
|
||||
def _display_logs(self, log_messages: List[Dict[str, Any]]) -> None:
|
||||
"""
|
||||
Display log messages.
|
||||
|
||||
Args:
|
||||
log_messages (List[Dict[str, Any]]): The log messages to display.
|
||||
"""
|
||||
for log_message in log_messages:
|
||||
console.print(
|
||||
f"{log_message['timestamp']} - {log_message['level']}: {log_message['message']}"
|
||||
)
|
||||
|
||||
def deploy(self, uuid: Optional[str] = None) -> None:
|
||||
"""
|
||||
Deploy a crew using either UUID or project name.
|
||||
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to deploy.
|
||||
"""
|
||||
self._start_deployment_span = self._telemetry.start_deployment_span(self, uuid)
|
||||
console.print("Starting deployment...", style="bold blue")
|
||||
if uuid:
|
||||
response = self.client.deploy_by_uuid(uuid)
|
||||
elif self.project_name:
|
||||
response = self.client.deploy_by_name(self.project_name)
|
||||
else:
|
||||
self._standard_no_param_error_message()
|
||||
return
|
||||
|
||||
json_response = response.json()
|
||||
if response.status_code == 200:
|
||||
self._display_deployment_info(json_response)
|
||||
else:
|
||||
self._handle_error(json_response)
|
||||
|
||||
def create_crew(self) -> None:
|
||||
"""
|
||||
Create a new crew deployment.
|
||||
"""
|
||||
self._create_crew_deployment_span = self._telemetry.create_crew_deployment_span(
|
||||
self
|
||||
)
|
||||
console.print("Creating deployment...", style="bold blue")
|
||||
env_vars = fetch_and_json_env_file()
|
||||
remote_repo_url = get_git_remote_url()
|
||||
|
||||
if remote_repo_url is None:
|
||||
console.print("No remote repository URL found.", style="bold red")
|
||||
console.print("Please ensure your project has a valid remote repository.", style="yellow")
|
||||
return
|
||||
|
||||
self._confirm_input(env_vars, remote_repo_url)
|
||||
payload = self._create_payload(env_vars, remote_repo_url)
|
||||
|
||||
response = self.client.create_crew(payload)
|
||||
if response.status_code == 201:
|
||||
self._display_creation_success(response.json())
|
||||
else:
|
||||
self._handle_error(response.json())
|
||||
|
||||
def _confirm_input(self, env_vars: Dict[str, str], remote_repo_url: str) -> None:
|
||||
"""
|
||||
Confirm input parameters with the user.
|
||||
|
||||
Args:
|
||||
env_vars (Dict[str, str]): Environment variables.
|
||||
remote_repo_url (str): Remote repository URL.
|
||||
"""
|
||||
input(f"Press Enter to continue with the following Env vars: {env_vars}")
|
||||
input(
|
||||
f"Press Enter to continue with the following remote repository: {remote_repo_url}\n"
|
||||
)
|
||||
|
||||
def _create_payload(
|
||||
self,
|
||||
env_vars: Dict[str, str],
|
||||
remote_repo_url: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create the payload for crew creation.
|
||||
|
||||
Args:
|
||||
remote_repo_url (str): Remote repository URL.
|
||||
env_vars (Dict[str, str]): Environment variables.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The payload for crew creation.
|
||||
"""
|
||||
return {
|
||||
"deploy": {
|
||||
"name": self.project_name,
|
||||
"repo_clone_url": remote_repo_url,
|
||||
"env": env_vars,
|
||||
}
|
||||
}
|
||||
|
||||
def _display_creation_success(self, json_response: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Display success message after crew creation.
|
||||
|
||||
Args:
|
||||
json_response (Dict[str, Any]): The response containing crew information.
|
||||
"""
|
||||
console.print("Deployment created successfully!\n", style="bold green")
|
||||
console.print(
|
||||
f"Name: {self.project_name} ({json_response['uuid']})", style="bold green"
|
||||
)
|
||||
console.print(f"Status: {json_response['status']}", style="bold green")
|
||||
console.print("\nTo (re)deploy the crew, run:")
|
||||
console.print("crewai deploy push")
|
||||
console.print(" or")
|
||||
console.print(f"crewai deploy push --uuid {json_response['uuid']}")
|
||||
|
||||
def list_crews(self) -> None:
|
||||
"""
|
||||
List all available crews.
|
||||
"""
|
||||
console.print("Listing all Crews\n", style="bold blue")
|
||||
|
||||
response = self.client.list_crews()
|
||||
json_response = response.json()
|
||||
if response.status_code == 200:
|
||||
self._display_crews(json_response)
|
||||
else:
|
||||
self._display_no_crews_message()
|
||||
|
||||
def _display_crews(self, crews_data: List[Dict[str, Any]]) -> None:
|
||||
"""
|
||||
Display the list of crews.
|
||||
|
||||
Args:
|
||||
crews_data (List[Dict[str, Any]]): List of crew data to display.
|
||||
"""
|
||||
for crew_data in crews_data:
|
||||
console.print(
|
||||
f"- {crew_data['name']} ({crew_data['uuid']}) [blue]{crew_data['status']}[/blue]"
|
||||
)
|
||||
|
||||
def _display_no_crews_message(self) -> None:
|
||||
"""
|
||||
Display a message when no crews are available.
|
||||
"""
|
||||
console.print("You don't have any Crews yet. Let's create one!", style="yellow")
|
||||
console.print(" crewai create crew <crew_name>", style="green")
|
||||
|
||||
def get_crew_status(self, uuid: Optional[str] = None) -> None:
|
||||
"""
|
||||
Get the status of a crew.
|
||||
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to check.
|
||||
"""
|
||||
console.print("Fetching deployment status...", style="bold blue")
|
||||
if uuid:
|
||||
response = self.client.status_by_uuid(uuid)
|
||||
elif self.project_name:
|
||||
response = self.client.status_by_name(self.project_name)
|
||||
else:
|
||||
self._standard_no_param_error_message()
|
||||
return
|
||||
|
||||
json_response = response.json()
|
||||
if response.status_code == 200:
|
||||
self._display_crew_status(json_response)
|
||||
else:
|
||||
self._handle_error(json_response)
|
||||
|
||||
def _display_crew_status(self, status_data: Dict[str, str]) -> None:
|
||||
"""
|
||||
Display the status of a crew.
|
||||
|
||||
Args:
|
||||
status_data (Dict[str, str]): The status data to display.
|
||||
"""
|
||||
console.print(f"Name:\t {status_data['name']}")
|
||||
console.print(f"Status:\t {status_data['status']}")
|
||||
|
||||
def get_crew_logs(self, uuid: Optional[str], log_type: str = "deployment") -> None:
|
||||
"""
|
||||
Get logs for a crew.
|
||||
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to get logs for.
|
||||
log_type (str): The type of logs to retrieve (default: "deployment").
|
||||
"""
|
||||
self._get_crew_logs_span = self._telemetry.get_crew_logs_span(
|
||||
self, uuid, log_type
|
||||
)
|
||||
console.print(f"Fetching {log_type} logs...", style="bold blue")
|
||||
|
||||
if uuid:
|
||||
response = self.client.logs_by_uuid(uuid, log_type)
|
||||
elif self.project_name:
|
||||
response = self.client.logs_by_name(self.project_name, log_type)
|
||||
else:
|
||||
self._standard_no_param_error_message()
|
||||
return
|
||||
|
||||
if response.status_code == 200:
|
||||
self._display_logs(response.json())
|
||||
else:
|
||||
self._handle_error(response.json())
|
||||
|
||||
def remove_crew(self, uuid: Optional[str]) -> None:
|
||||
"""
|
||||
Remove a crew deployment.
|
||||
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to remove.
|
||||
"""
|
||||
self._remove_crew_span = self._telemetry.remove_crew_span(self, uuid)
|
||||
console.print("Removing deployment...", style="bold blue")
|
||||
|
||||
if uuid:
|
||||
response = self.client.delete_by_uuid(uuid)
|
||||
elif self.project_name:
|
||||
response = self.client.delete_by_name(self.project_name)
|
||||
else:
|
||||
self._standard_no_param_error_message()
|
||||
return
|
||||
|
||||
if response.status_code == 204:
|
||||
console.print(
|
||||
f"Crew '{self.project_name}' removed successfully.", style="green"
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
f"Failed to remove crew '{self.project_name}'", style="bold red"
|
||||
)
|
||||
155
src/crewai/cli/deploy/utils.py
Normal file
155
src/crewai/cli/deploy/utils.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from ..authentication.utils import TokenManager
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
|
||||
|
||||
# Drop the simple_toml_parser when we move to python3.11
|
||||
def simple_toml_parser(content):
|
||||
result = {}
|
||||
current_section = result
|
||||
for line in content.split('\n'):
|
||||
line = line.strip()
|
||||
if line.startswith('[') and line.endswith(']'):
|
||||
# New section
|
||||
section = line[1:-1].split('.')
|
||||
current_section = result
|
||||
for key in section:
|
||||
current_section = current_section.setdefault(key, {})
|
||||
elif '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip()
|
||||
value = value.strip().strip('"')
|
||||
current_section[key] = value
|
||||
return result
|
||||
|
||||
|
||||
def parse_toml(content):
|
||||
if sys.version_info >= (3, 11):
|
||||
return tomllib.loads(content)
|
||||
else:
|
||||
return simple_toml_parser(content)
|
||||
|
||||
|
||||
def get_git_remote_url() -> str | None:
|
||||
"""Get the Git repository's remote URL."""
|
||||
try:
|
||||
# Run the git remote -v command
|
||||
result = subprocess.run(
|
||||
["git", "remote", "-v"], capture_output=True, text=True, check=True
|
||||
)
|
||||
|
||||
# Get the output
|
||||
output = result.stdout
|
||||
|
||||
# Parse the output to find the origin URL
|
||||
matches = re.findall(r"origin\s+(.*?)\s+\(fetch\)", output)
|
||||
|
||||
if matches:
|
||||
return matches[0] # Return the first match (origin URL)
|
||||
else:
|
||||
console.print("No origin remote found.", style="bold red")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"Error running trying to fetch the Git Repository: {e}", style="bold red")
|
||||
except FileNotFoundError:
|
||||
console.print("Git command not found. Make sure Git is installed and in your PATH.", style="bold red")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_project_name(pyproject_path: str = "pyproject.toml") -> str | None:
|
||||
"""Get the project name from the pyproject.toml file."""
|
||||
try:
|
||||
# Read the pyproject.toml file
|
||||
with open(pyproject_path, "r") as f:
|
||||
pyproject_content = parse_toml(f.read())
|
||||
|
||||
# Extract the project name
|
||||
project_name = pyproject_content["tool"]["poetry"]["name"]
|
||||
|
||||
if "crewai" not in pyproject_content["tool"]["poetry"]["dependencies"]:
|
||||
raise Exception("crewai is not in the dependencies.")
|
||||
|
||||
return project_name
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: {pyproject_path} not found.")
|
||||
except KeyError:
|
||||
print(f"Error: {pyproject_path} is not a valid pyproject.toml file.")
|
||||
except tomllib.TOMLDecodeError if sys.version_info >= (3, 11) else Exception as e: # type: ignore
|
||||
print(
|
||||
f"Error: {pyproject_path} is not a valid TOML file."
|
||||
if sys.version_info >= (3, 11)
|
||||
else f"Error reading the pyproject.toml file: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error reading the pyproject.toml file: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_crewai_version(poetry_lock_path: str = "poetry.lock") -> str:
|
||||
"""Get the version number of crewai from the poetry.lock file."""
|
||||
try:
|
||||
with open(poetry_lock_path, "r") as f:
|
||||
lock_content = f.read()
|
||||
|
||||
match = re.search(
|
||||
r'\[\[package\]\]\s*name\s*=\s*"crewai"\s*version\s*=\s*"([^"]+)"',
|
||||
lock_content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
print("crewai package not found in poetry.lock")
|
||||
return "no-version-found"
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: {poetry_lock_path} not found.")
|
||||
except Exception as e:
|
||||
print(f"Error reading the poetry.lock file: {e}")
|
||||
|
||||
return "no-version-found"
|
||||
|
||||
|
||||
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
|
||||
"""Fetch the environment variables from a .env file and return them as a dictionary."""
|
||||
try:
|
||||
# Read the .env file
|
||||
with open(env_file_path, "r") as f:
|
||||
env_content = f.read()
|
||||
|
||||
# Parse the .env file content to a dictionary
|
||||
env_dict = {}
|
||||
for line in env_content.splitlines():
|
||||
if line.strip() and not line.strip().startswith("#"):
|
||||
key, value = line.split("=", 1)
|
||||
env_dict[key.strip()] = value.strip()
|
||||
|
||||
return env_dict
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: {env_file_path} not found.")
|
||||
except Exception as e:
|
||||
print(f"Error reading the .env file: {e}")
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
21
src/crewai/cli/install_crew.py
Normal file
21
src/crewai/cli/install_crew.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def install_crew() -> None:
|
||||
"""
|
||||
Install the crew by running the Poetry command to lock and install.
|
||||
"""
|
||||
try:
|
||||
subprocess.run(["poetry", "lock"], check=True, capture_output=False, text=True)
|
||||
subprocess.run(
|
||||
["poetry", "install"], check=True, capture_output=False, text=True
|
||||
)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
click.echo(e.output, err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"An unexpected error occurred: {e}", err=True)
|
||||
54
src/crewai/cli/templates/crew/README.md
Normal file
54
src/crewai/cli/templates/crew/README.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and install them by using the CLI command:
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
$ crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
0
src/crewai/cli/templates/crew/__init__.py
Normal file
0
src/crewai/cli/templates/crew/__init__.py
Normal file
@@ -25,7 +25,7 @@ def train():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), inputs=inputs)
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
@@ -6,7 +6,8 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = "^0.46.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
0
src/crewai/cli/templates/crew/tools/__init__.py
Normal file
0
src/crewai/cli/templates/crew/tools/__init__.py
Normal file
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
Normal file
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
__pycache__/
|
||||
@@ -15,12 +15,11 @@ pip install poetry
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
|
||||
```bash
|
||||
poetry lock
|
||||
```
|
||||
```bash
|
||||
poetry install
|
||||
crewai install
|
||||
```
|
||||
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
@@ -35,11 +34,7 @@ poetry install
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
$ crewai run
|
||||
```
|
||||
or
|
||||
```bash
|
||||
poetry run {{folder_name}}
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
@@ -53,6 +48,7 @@ The {{name}} Crew is composed of multiple AI agents, each with unique roles, goa
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
0
src/crewai/cli/templates/pipeline/__init__.py
Normal file
0
src/crewai/cli/templates/pipeline/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
@@ -0,0 +1,16 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with a title, mains topics, each with a full section of information.
|
||||
agent: reporting_analyst
|
||||
@@ -0,0 +1,58 @@
|
||||
from pydantic import BaseModel
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
class ResearchReport(BaseModel):
|
||||
"""Research Report"""
|
||||
title: str
|
||||
body: str
|
||||
|
||||
@CrewBase
|
||||
class ResearchCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_pydantic=ResearchReport
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,51 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class WriteLinkedInCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,14 @@
|
||||
x_writer_agent:
|
||||
role: >
|
||||
Expert Social Media Content Creator specializing in short form written content
|
||||
goal: >
|
||||
Create viral-worthy, engaging short form posts that distill complex {topic} information
|
||||
into compelling 280-character messages
|
||||
backstory: >
|
||||
You're a social media virtuoso with a particular talent for short form content. Your posts
|
||||
consistently go viral due to your ability to craft hooks that stop users mid-scroll.
|
||||
You've studied the techniques of social media masters like Justin Welsh, Dickie Bush,
|
||||
Nicolas Cole, and Shaan Puri, incorporating their best practices into your own unique style.
|
||||
Your superpower is taking intricate {topic} concepts and transforming them into
|
||||
bite-sized, shareable content that resonates with a wide audience. You know exactly
|
||||
how to structure a post for maximum impact and engagement.
|
||||
@@ -0,0 +1,22 @@
|
||||
write_x_task:
|
||||
description: >
|
||||
Using the research report provided, create an engaging short form post about {topic}.
|
||||
Your post should have a great hook, summarize key points, and be structured for easy
|
||||
consumption on a digital platform. The post must be under 280 characters.
|
||||
Follow these guidelines:
|
||||
1. Start with an attention-grabbing hook
|
||||
2. Condense the main insights from the research
|
||||
3. Use clear, concise language
|
||||
4. Include a call-to-action or thought-provoking question if space allows
|
||||
5. Ensure the post flows well and is easy to read quickly
|
||||
|
||||
Here is the title of the research report you will be using
|
||||
|
||||
Title: {title}
|
||||
Research:
|
||||
{body}
|
||||
|
||||
expected_output: >
|
||||
A compelling X post under 280 characters that effectively summarizes the key findings
|
||||
about {topic}, starts with a strong hook, and is optimized for engagement on the platform.
|
||||
agent: x_writer_agent
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class WriteXCrew:
|
||||
"""Research Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def x_writer_agent(self) -> Agent:
|
||||
return Agent(config=self.agents_config["x_writer_agent"], verbose=True)
|
||||
|
||||
@task
|
||||
def write_x_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_x_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Write X Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
26
src/crewai/cli/templates/pipeline/main.py
Normal file
26
src/crewai/cli/templates/pipeline/main.py
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from {{folder_name}}.pipelines.pipeline import {{pipeline_name}}Pipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{"topic": "AI wearables"},
|
||||
]
|
||||
pipeline = {{pipeline_name}}Pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
87
src/crewai/cli/templates/pipeline/pipelines/pipeline.py
Normal file
87
src/crewai/cli/templates/pipeline/pipelines/pipeline.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
This pipeline file includes two different examples to demonstrate the flexibility of crewAI pipelines.
|
||||
|
||||
Example 1: Two-Stage Pipeline
|
||||
-----------------------------
|
||||
This pipeline consists of two crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew: Generates an X (Twitter) post based on the research findings.
|
||||
|
||||
Key features:
|
||||
- The ResearchCrew's final task uses output_json to store all research findings in a JSON object.
|
||||
- This JSON object is then passed to the WriteXCrew, where tasks can access the research findings.
|
||||
|
||||
Example 2: Two-Stage Pipeline with Parallel Execution
|
||||
-------------------------------------------------------
|
||||
This pipeline consists of three crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew and WriteLinkedInCrew: Run in parallel, using the research findings to generate posts for X and LinkedIn, respectively.
|
||||
|
||||
Key features:
|
||||
- Demonstrates the ability to run multiple crews in parallel.
|
||||
- Shows how to structure a pipeline with both sequential and parallel stages.
|
||||
|
||||
Usage:
|
||||
- To switch between examples, comment/uncomment the respective code blocks below.
|
||||
- Ensure that you have implemented all necessary crew classes (ResearchCrew, WriteXCrew, WriteLinkedInCrew) before running.
|
||||
"""
|
||||
|
||||
# Common imports for both examples
|
||||
from crewai import Pipeline
|
||||
|
||||
|
||||
|
||||
# Uncomment the crews you need for your chosen example
|
||||
from ..crews.research_crew.research_crew import ResearchCrew
|
||||
from ..crews.write_x_crew.write_x_crew import WriteXCrew
|
||||
# from .crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew # Uncomment for Example 2
|
||||
|
||||
# EXAMPLE 1: Two-Stage Pipeline
|
||||
# -----------------------------
|
||||
# Uncomment the following code block to use Example 1
|
||||
|
||||
class {{pipeline_name}}Pipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.research_crew = ResearchCrew().crew()
|
||||
self.write_x_crew = WriteXCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.research_crew,
|
||||
self.write_x_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
# EXAMPLE 2: Two-Stage Pipeline with Parallel Execution
|
||||
# -------------------------------------------------------
|
||||
# Uncomment the following code block to use Example 2
|
||||
|
||||
# @PipelineBase
|
||||
# class {{pipeline_name}}Pipeline:
|
||||
# def __init__(self):
|
||||
# # Initialize crews
|
||||
# self.research_crew = ResearchCrew().crew()
|
||||
# self.write_x_crew = WriteXCrew().crew()
|
||||
# self.write_linkedin_crew = WriteLinkedInCrew().crew()
|
||||
|
||||
# @pipeline
|
||||
# def create_pipeline(self):
|
||||
# return Pipeline(
|
||||
# stages=[
|
||||
# self.research_crew,
|
||||
# [self.write_x_crew, self.write_linkedin_crew] # Parallel execution
|
||||
# ]
|
||||
# )
|
||||
|
||||
# async def run(self, inputs):
|
||||
# pipeline = self.create_pipeline()
|
||||
# results = await pipeline.kickoff(inputs)
|
||||
# return results
|
||||
17
src/crewai/cli/templates/pipeline/pyproject.toml
Normal file
17
src/crewai/cli/templates/pipeline/pyproject.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
0
src/crewai/cli/templates/pipeline/tools/__init__.py
Normal file
0
src/crewai/cli/templates/pipeline/tools/__init__.py
Normal file
12
src/crewai/cli/templates/pipeline/tools/custom_tool.py
Normal file
12
src/crewai/cli/templates/pipeline/tools/custom_tool.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
2
src/crewai/cli/templates/pipeline_router/.gitignore
vendored
Normal file
2
src/crewai/cli/templates/pipeline_router/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
__pycache__/
|
||||
54
src/crewai/cli/templates/pipeline_router/README.md
Normal file
54
src/crewai/cli/templates/pipeline_router/README.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
19
src/crewai/cli/templates/pipeline_router/config/agents.yaml
Normal file
19
src/crewai/cli/templates/pipeline_router/config/agents.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
17
src/crewai/cli/templates/pipeline_router/config/tasks.yaml
Normal file
17
src/crewai/cli/templates/pipeline_router/config/tasks.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
@@ -0,0 +1,40 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
class UrgencyScore(BaseModel):
|
||||
urgency_score: int
|
||||
|
||||
@CrewBase
|
||||
class ClassifierCrew:
|
||||
"""Email Classifier Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def classifier(self) -> Agent:
|
||||
return Agent(config=self.agents_config["classifier"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["classify_email"],
|
||||
output_pydantic=UrgencyScore,
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Email Classifier Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,7 @@
|
||||
classifier:
|
||||
role: >
|
||||
Email Classifier
|
||||
goal: >
|
||||
Classify the email: {email} as urgent or normal from a score of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.`
|
||||
backstory: >
|
||||
You are a highly efficient and experienced email classifier, trained to quickly assess and classify emails. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,7 @@
|
||||
classify_email:
|
||||
description: >
|
||||
Classify the email: {email}
|
||||
as urgent or normal.
|
||||
expected_output: >
|
||||
Classify the email from a scale of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.
|
||||
agent: classifier
|
||||
@@ -0,0 +1,7 @@
|
||||
normal_handler:
|
||||
role: >
|
||||
Normal Email Processor
|
||||
goal: >
|
||||
Process normal emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced normal email handler, trained to quickly assess and respond to normal communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,6 @@
|
||||
normal_task:
|
||||
description: >
|
||||
Process and respond to normal email quickly.
|
||||
expected_output: >
|
||||
An email response to the normal email.
|
||||
agent: normal_handler
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class NormalCrew:
|
||||
"""Normal Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def normal_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["normal_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["normal_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Normal Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,7 @@
|
||||
urgent_handler:
|
||||
role: >
|
||||
Urgent Email Processor
|
||||
goal: >
|
||||
Process urgent emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced urgent email handler, trained to quickly assess and respond to time-sensitive communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing critical situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,6 @@
|
||||
urgent_task:
|
||||
description: >
|
||||
Process and respond to urgent email quickly.
|
||||
expected_output: >
|
||||
An email response to the urgent email.
|
||||
agent: urgent_handler
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class UrgentCrew:
|
||||
"""Urgent Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def urgent_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["urgent_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["urgent_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Urgent Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
75
src/crewai/cli/templates/pipeline_router/main.py
Normal file
75
src/crewai/cli/templates/pipeline_router/main.py
Normal file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from crewai.routers.router import Route
|
||||
from crewai.routers.router import Router
|
||||
|
||||
from {{folder_name}}.pipelines.pipeline_classifier import EmailClassifierPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_normal import NormalPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_urgent import UrgentPipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{
|
||||
"email": """
|
||||
Subject: URGENT: Marketing Campaign Launch - Immediate Action Required
|
||||
Dear Team,
|
||||
I'm reaching out regarding our upcoming marketing campaign that requires your immediate attention and swift action. We're facing a critical deadline, and our success hinges on our ability to mobilize quickly.
|
||||
Key points:
|
||||
|
||||
Campaign launch: 48 hours from now
|
||||
Target audience: 250,000 potential customers
|
||||
Expected ROI: 35% increase in Q3 sales
|
||||
|
||||
What we need from you NOW:
|
||||
|
||||
Final approval on creative assets (due in 3 hours)
|
||||
Confirmation of media placements (due by end of day)
|
||||
Last-minute budget allocation for paid social media push
|
||||
|
||||
Our competitors are poised to launch similar campaigns, and we must act fast to maintain our market advantage. Delays could result in significant lost opportunities and potential revenue.
|
||||
Please prioritize this campaign above all other tasks. I'll be available for the next 24 hours to address any concerns or roadblocks.
|
||||
Let's make this happen!
|
||||
[Your Name]
|
||||
Marketing Director
|
||||
P.S. I'll be scheduling an emergency team meeting in 1 hour to discuss our action plan. Attendance is mandatory.
|
||||
"""
|
||||
}
|
||||
]
|
||||
|
||||
pipeline_classifier = EmailClassifierPipeline().create_pipeline()
|
||||
pipeline_urgent = UrgentPipeline().create_pipeline()
|
||||
pipeline_normal = NormalPipeline().create_pipeline()
|
||||
|
||||
router = Router(
|
||||
routes={
|
||||
"high_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) > 7,
|
||||
pipeline=pipeline_urgent
|
||||
),
|
||||
"low_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) <= 7,
|
||||
pipeline=pipeline_normal
|
||||
)
|
||||
},
|
||||
default=pipeline_normal
|
||||
)
|
||||
|
||||
pipeline = pipeline_classifier >> router
|
||||
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,24 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.classifier_crew.classifier_crew import ClassifierCrew
|
||||
|
||||
|
||||
@PipelineBase
|
||||
class EmailClassifierPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.classifier_crew = ClassifierCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.classifier_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.normal_crew.normal_crew import NormalCrew
|
||||
|
||||
|
||||
@PipelineBase
|
||||
class NormalPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.normal_crew = NormalCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.normal_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.urgent_crew.urgent_crew import UrgentCrew
|
||||
|
||||
@PipelineBase
|
||||
class UrgentPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.urgent_crew = UrgentCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.urgent_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
20
src/crewai/cli/templates/pipeline_router/pyproject.toml
Normal file
20
src/crewai/cli/templates/pipeline_router/pyproject.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.54.0,<1.0.0" }
|
||||
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:main"
|
||||
train = "{{folder_name}}.main:train"
|
||||
replay = "{{folder_name}}.main:replay"
|
||||
test = "{{folder_name}}.main:test"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
@@ -0,0 +1,12 @@
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
@@ -3,19 +3,22 @@ import subprocess
|
||||
import click
|
||||
|
||||
|
||||
def train_crew(n_iterations: int) -> None:
|
||||
def train_crew(n_iterations: int, filename: str) -> None:
|
||||
"""
|
||||
Train the crew by running a command in the Poetry environment.
|
||||
|
||||
Args:
|
||||
n_iterations (int): The number of iterations to train the crew.
|
||||
"""
|
||||
command = ["poetry", "run", "train", str(n_iterations)]
|
||||
command = ["poetry", "run", "train", str(n_iterations), filename]
|
||||
|
||||
try:
|
||||
if n_iterations <= 0:
|
||||
raise ValueError("The number of iterations must be a positive integer.")
|
||||
|
||||
if not filename.endswith(".pkl"):
|
||||
raise ValueError("The filename must not end with .pkl")
|
||||
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
|
||||
18
src/crewai/cli/utils.py
Normal file
18
src/crewai/cli/utils.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import click
|
||||
|
||||
|
||||
def copy_template(src, dst, name, class_name, folder_name):
|
||||
"""Copy a file from src to dst."""
|
||||
with open(src, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Interpolate the content
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
|
||||
# Write the interpolated content to the new file
|
||||
with open(dst, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
click.secho(f" - Created {dst}", fg="green")
|
||||
@@ -1,15 +1,15 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
Json,
|
||||
@@ -32,9 +32,9 @@ from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import (
|
||||
TRAINED_AGENTS_DATA_FILE,
|
||||
TRAINING_DATA_FILE,
|
||||
)
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
@@ -47,10 +47,15 @@ from crewai.utilities.planning_handler import CrewPlanner
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
try:
|
||||
import agentops
|
||||
except ImportError:
|
||||
agentops = None
|
||||
agentops = None
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
|
||||
|
||||
class Crew(BaseModel):
|
||||
@@ -97,8 +102,8 @@ class Crew(BaseModel):
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
|
||||
name: Optional[str] = Field(default=None)
|
||||
cache: bool = Field(default=True)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
@@ -111,7 +116,7 @@ class Crew(BaseModel):
|
||||
default={"provider": "openai"},
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
)
|
||||
usage_metrics: Optional[dict] = Field(
|
||||
usage_metrics: Optional[UsageMetrics] = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
@@ -147,8 +152,8 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
default=False,
|
||||
output_log_file: Optional[str] = Field(
|
||||
default=None,
|
||||
description="output_log_file",
|
||||
)
|
||||
planning: Optional[bool] = Field(
|
||||
@@ -356,7 +361,7 @@ class Crew(BaseModel):
|
||||
source = [agent.key for agent in self.agents] + [
|
||||
task.key for task in self.tasks
|
||||
]
|
||||
return md5("|".join(source).encode()).hexdigest()
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
|
||||
def _setup_from_config(self):
|
||||
assert self.config is not None, "Config should not be None."
|
||||
@@ -386,7 +391,7 @@ class Crew(BaseModel):
|
||||
del task_config["agent"]
|
||||
return Task(**task_config, agent=task_agent)
|
||||
|
||||
def _setup_for_training(self) -> None:
|
||||
def _setup_for_training(self, filename: str) -> None:
|
||||
"""Sets up the crew for training."""
|
||||
self._train = True
|
||||
|
||||
@@ -397,11 +402,13 @@ class Crew(BaseModel):
|
||||
agent.allow_delegation = False
|
||||
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).initialize_file()
|
||||
CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).initialize_file()
|
||||
CrewTrainingHandler(filename).initialize_file()
|
||||
|
||||
def train(self, n_iterations: int, inputs: Optional[Dict[str, Any]] = {}) -> None:
|
||||
def train(
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
self._setup_for_training()
|
||||
self._setup_for_training(filename)
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
self._train_iteration = n_iteration
|
||||
@@ -414,7 +421,7 @@ class Crew(BaseModel):
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).save_trained_data(
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
@@ -453,7 +460,7 @@ class Crew(BaseModel):
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics = []
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
@@ -463,11 +470,12 @@ class Crew(BaseModel):
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = {
|
||||
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
|
||||
}
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
|
||||
return result
|
||||
|
||||
@@ -476,12 +484,7 @@ class Crew(BaseModel):
|
||||
results: List[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
total_usage_metrics = UsageMetrics()
|
||||
|
||||
for input_data in inputs:
|
||||
crew = self.copy()
|
||||
@@ -489,8 +492,7 @@ class Crew(BaseModel):
|
||||
output = crew.kickoff(inputs=input_data)
|
||||
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
total_usage_metrics.add_usage_metrics(crew.usage_metrics)
|
||||
|
||||
results.append(output)
|
||||
|
||||
@@ -519,29 +521,10 @@ class Crew(BaseModel):
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
total_usage_metrics = UsageMetrics()
|
||||
for crew in crew_copies:
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
for crew in crew_copies:
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
total_usage_metrics.add_usage_metrics(crew.usage_metrics)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
self._task_output_handler.reset()
|
||||
@@ -555,7 +538,7 @@ class Crew(BaseModel):
|
||||
)._handle_crew_planning()
|
||||
|
||||
for task, step_plan in zip(self.tasks, result.list_of_plans_per_task):
|
||||
task.description += step_plan
|
||||
task.description += step_plan.plan
|
||||
|
||||
def _store_execution_log(
|
||||
self,
|
||||
@@ -932,25 +915,18 @@ class Crew(BaseModel):
|
||||
)
|
||||
self._telemetry.end_crew(self, final_string_output)
|
||||
|
||||
def calculate_usage_metrics(self) -> Dict[str, int]:
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
total_usage_metrics = UsageMetrics()
|
||||
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += token_sum.get(key, 0)
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
|
||||
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += token_sum.get(key, 0)
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
|
||||
return total_usage_metrics
|
||||
|
||||
@@ -961,6 +937,9 @@ class Crew(BaseModel):
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations."""
|
||||
self._test_execution_span = self._telemetry.test_execution_span(
|
||||
self, n_iterations, inputs, openai_model_name
|
||||
)
|
||||
evaluator = CrewEvaluator(self, openai_model_name)
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
@@ -969,5 +948,17 @@ class Crew(BaseModel):
|
||||
|
||||
evaluator.print_crew_evaluation_result()
|
||||
|
||||
def __rshift__(self, other: "Crew") -> "Pipeline":
|
||||
"""
|
||||
Implements the >> operator to add another Crew to an existing Pipeline.
|
||||
"""
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
|
||||
if not isinstance(other, Crew):
|
||||
raise TypeError(
|
||||
f"Unsupported operand type for >>: '{type(self).__name__}' and '{type(other).__name__}'"
|
||||
)
|
||||
return Pipeline(stages=[self, other])
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
from .crew_output import CrewOutput
|
||||
|
||||
__all__ = ["CrewOutput"]
|
||||
|
||||
@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
class CrewOutput(BaseModel):
|
||||
@@ -20,9 +21,7 @@ class CrewOutput(BaseModel):
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
)
|
||||
token_usage: Dict[str, Any] = Field(
|
||||
description="Processed token summary", default={}
|
||||
)
|
||||
token_usage: UsageMetrics = Field(description="Processed token summary", default={})
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[str]:
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from .entity.entity_memory import EntityMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
|
||||
__all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
|
||||
@@ -21,7 +21,7 @@ class Memory:
|
||||
if agent:
|
||||
metadata["agent"] = agent
|
||||
|
||||
self.storage.save(value, metadata) # type: ignore # Maybe BUG? Should be self.storage.save(key, value, metadata)
|
||||
self.storage.save(value, metadata)
|
||||
|
||||
def search(self, query: str) -> Dict[str, Any]:
|
||||
return self.storage.search(query)
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional
|
||||
|
||||
from embedchain import App
|
||||
from embedchain.llm.base import BaseLlm
|
||||
from embedchain.models.data_type import DataType
|
||||
from embedchain.vectordb.chroma import InvalidDimensionException
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
@@ -47,7 +48,7 @@ class RAGStorage(Storage):
|
||||
os.environ["OPENAI_API_KEY"] = "fake"
|
||||
|
||||
agents = crew.agents if crew else []
|
||||
agents = [agent.role for agent in agents]
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
|
||||
config = {
|
||||
@@ -78,6 +79,12 @@ class RAGStorage(Storage):
|
||||
if allow_reset:
|
||||
self.app.reset()
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
self._generate_embedding(value, metadata)
|
||||
|
||||
@@ -101,8 +108,7 @@ class RAGStorage(Storage):
|
||||
return [r for r in results if r["metadata"]["score"] >= score_threshold]
|
||||
|
||||
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any:
|
||||
with suppress_logging():
|
||||
self.app.add(text, data_type="text", metadata=metadata)
|
||||
self.app.add(text, data_type=DataType.TEXT, metadata=metadata)
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
|
||||
5
src/crewai/pipeline/__init__.py
Normal file
5
src/crewai/pipeline/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult
|
||||
from crewai.pipeline.pipeline_output import PipelineOutput
|
||||
|
||||
__all__ = ["Pipeline", "PipelineKickoffResult", "PipelineOutput"]
|
||||
405
src/crewai/pipeline/pipeline.py
Normal file
405
src/crewai/pipeline/pipeline.py
Normal file
@@ -0,0 +1,405 @@
|
||||
import asyncio
|
||||
import copy
|
||||
from typing import Any, Dict, List, Tuple, Union
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult
|
||||
from crewai.routers.router import Router
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
Trace = Union[Union[str, Dict[str, Any]], List[Union[str, Dict[str, Any]]]]
|
||||
PipelineStage = Union[Crew, List[Crew], Router]
|
||||
|
||||
"""
|
||||
Developer Notes:
|
||||
|
||||
This module defines a Pipeline class that represents a sequence of operations (stages)
|
||||
to process inputs. Each stage can be either sequential or parallel, and the pipeline
|
||||
can process multiple kickoffs concurrently.
|
||||
|
||||
Core Loop Explanation:
|
||||
1. The `process_kickoffs` method processes multiple kickoffs in parallel, each going through
|
||||
all pipeline stages.
|
||||
2. The `process_single_kickoff` method handles the processing of a single kickouff through
|
||||
all stages, updating metrics and input data along the way.
|
||||
3. The `_process_stage` method determines whether a stage is sequential or parallel
|
||||
and processes it accordingly.
|
||||
4. The `_process_single_crew` and `_process_parallel_crews` methods handle the
|
||||
execution of single and parallel crew stages.
|
||||
5. The `_update_metrics_and_input` method updates usage metrics and the current input
|
||||
with the outputs from a stage.
|
||||
6. The `_build_pipeline_kickoff_results` method constructs the final results of the
|
||||
pipeline kickoff, including traces and outputs.
|
||||
|
||||
Handling Traces and Crew Outputs:
|
||||
- During the processing of stages, we handle the results (traces and crew outputs)
|
||||
for all stages except the last one differently from the final stage.
|
||||
- For intermediate stages, the primary focus is on passing the input data between stages.
|
||||
This involves merging the output dictionaries from all crews in a stage into a single
|
||||
dictionary and passing it to the next stage. This merged dictionary allows for smooth
|
||||
data flow between stages.
|
||||
- For the final stage, in addition to passing the input data, we also need to prepare
|
||||
the final outputs and traces to be returned as the overall result of the pipeline kickoff.
|
||||
In this case, we do not merge the results, as each result needs to be included
|
||||
separately in its own pipeline kickoff result.
|
||||
|
||||
Pipeline Terminology:
|
||||
- Pipeline: The overall structure that defines a sequence of operations.
|
||||
- Stage: A distinct part of the pipeline, which can be either sequential or parallel.
|
||||
- Kickoff: A specific execution of the pipeline for a given set of inputs, representing a single instance of processing through the pipeline.
|
||||
- Branch: Parallel executions within a stage (e.g., concurrent crew operations).
|
||||
- Trace: The journey of an individual input through the entire pipeline.
|
||||
|
||||
Example pipeline structure:
|
||||
crew1 >> crew2 >> crew3
|
||||
|
||||
This represents a pipeline with three sequential stages:
|
||||
1. crew1 is the first stage, which processes the input and passes its output to crew2.
|
||||
2. crew2 is the second stage, which takes the output from crew1 as its input, processes it, and passes its output to crew3.
|
||||
3. crew3 is the final stage, which takes the output from crew2 as its input and produces the final output of the pipeline.
|
||||
|
||||
Each input creates its own kickoff, flowing through all stages of the pipeline.
|
||||
Multiple kickoffss can be processed concurrently, each following the defined pipeline structure.
|
||||
|
||||
Another example pipeline structure:
|
||||
crew1 >> [crew2, crew3] >> crew4
|
||||
|
||||
This represents a pipeline with three stages:
|
||||
1. A sequential stage (crew1)
|
||||
2. A parallel stage with two branches (crew2 and crew3 executing concurrently)
|
||||
3. Another sequential stage (crew4)
|
||||
|
||||
Each input creates its own kickoff, flowing through all stages of the pipeline.
|
||||
Multiple kickoffs can be processed concurrently, each following the defined pipeline structure.
|
||||
"""
|
||||
|
||||
|
||||
class Pipeline(BaseModel):
|
||||
stages: List[PipelineStage] = Field(
|
||||
..., description="List of crews representing stages to be executed in sequence"
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_stages(cls, values):
|
||||
stages = values.get("stages", [])
|
||||
|
||||
def check_nesting_and_type(item, depth=0):
|
||||
if depth > 1:
|
||||
raise ValueError("Double nesting is not allowed in pipeline stages")
|
||||
if isinstance(item, list):
|
||||
for sub_item in item:
|
||||
check_nesting_and_type(sub_item, depth + 1)
|
||||
elif not isinstance(item, (Crew, Router)):
|
||||
raise ValueError(
|
||||
f"Expected Crew instance, Router instance, or list of Crews, got {type(item)}"
|
||||
)
|
||||
|
||||
for stage in stages:
|
||||
check_nesting_and_type(stage)
|
||||
return values
|
||||
|
||||
async def kickoff(
|
||||
self, inputs: List[Dict[str, Any]]
|
||||
) -> List[PipelineKickoffResult]:
|
||||
"""
|
||||
Processes multiple runs in parallel, each going through all pipeline stages.
|
||||
|
||||
Args:
|
||||
inputs (List[Dict[str, Any]]): List of inputs for each run.
|
||||
|
||||
Returns:
|
||||
List[PipelineKickoffResult]: List of results from each run.
|
||||
"""
|
||||
pipeline_results: List[PipelineKickoffResult] = []
|
||||
|
||||
# Process all runs in parallel
|
||||
all_run_results = await asyncio.gather(
|
||||
*(self.process_single_kickoff(input_data) for input_data in inputs)
|
||||
)
|
||||
|
||||
# Flatten the list of lists into a single list of results
|
||||
pipeline_results.extend(
|
||||
result for run_result in all_run_results for result in run_result
|
||||
)
|
||||
|
||||
return pipeline_results
|
||||
|
||||
async def process_single_kickoff(
|
||||
self, kickoff_input: Dict[str, Any]
|
||||
) -> List[PipelineKickoffResult]:
|
||||
"""
|
||||
Processes a single run through all pipeline stages.
|
||||
|
||||
Args:
|
||||
input (Dict[str, Any]): The input for the run.
|
||||
|
||||
Returns:
|
||||
List[PipelineKickoffResult]: The results of processing the run.
|
||||
"""
|
||||
initial_input = copy.deepcopy(kickoff_input)
|
||||
current_input = copy.deepcopy(kickoff_input)
|
||||
stages = self._copy_stages()
|
||||
pipeline_usage_metrics: Dict[str, UsageMetrics] = {}
|
||||
all_stage_outputs: List[List[CrewOutput]] = []
|
||||
traces: List[List[Union[str, Dict[str, Any]]]] = [[initial_input]]
|
||||
|
||||
stage_index = 0
|
||||
while stage_index < len(stages):
|
||||
stage = stages[stage_index]
|
||||
stage_input = copy.deepcopy(current_input)
|
||||
|
||||
if isinstance(stage, Router):
|
||||
next_pipeline, route_taken = stage.route(stage_input)
|
||||
stages = (
|
||||
stages[: stage_index + 1]
|
||||
+ list(next_pipeline.stages)
|
||||
+ stages[stage_index + 1 :]
|
||||
)
|
||||
traces.append([{"route_taken": route_taken}])
|
||||
stage_index += 1
|
||||
continue
|
||||
|
||||
stage_outputs, stage_trace = await self._process_stage(stage, stage_input)
|
||||
|
||||
self._update_metrics_and_input(
|
||||
pipeline_usage_metrics, current_input, stage, stage_outputs
|
||||
)
|
||||
traces.append(stage_trace)
|
||||
all_stage_outputs.append(stage_outputs)
|
||||
stage_index += 1
|
||||
|
||||
return self._build_pipeline_kickoff_results(
|
||||
all_stage_outputs, traces, pipeline_usage_metrics
|
||||
)
|
||||
|
||||
async def _process_stage(
|
||||
self, stage: PipelineStage, current_input: Dict[str, Any]
|
||||
) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]:
|
||||
"""
|
||||
Processes a single stage of the pipeline, which can be either sequential or parallel.
|
||||
|
||||
Args:
|
||||
stage (Union[Crew, List[Crew]]): The stage to process.
|
||||
current_input (Dict[str, Any]): The input for the stage.
|
||||
|
||||
Returns:
|
||||
Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The outputs and trace of the stage.
|
||||
"""
|
||||
if isinstance(stage, Crew):
|
||||
return await self._process_single_crew(stage, current_input)
|
||||
elif isinstance(stage, list) and all(isinstance(crew, Crew) for crew in stage):
|
||||
return await self._process_parallel_crews(stage, current_input)
|
||||
else:
|
||||
raise ValueError(f"Unsupported stage type: {type(stage)}")
|
||||
|
||||
async def _process_single_crew(
|
||||
self, crew: Crew, current_input: Dict[str, Any]
|
||||
) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]:
|
||||
"""
|
||||
Processes a single crew.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew to process.
|
||||
current_input (Dict[str, Any]): The input for the crew.
|
||||
|
||||
Returns:
|
||||
Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The output and trace of the crew.
|
||||
"""
|
||||
output = await crew.kickoff_async(inputs=current_input)
|
||||
return [output], [crew.name or str(crew.id)]
|
||||
|
||||
async def _process_parallel_crews(
|
||||
self, crews: List[Crew], current_input: Dict[str, Any]
|
||||
) -> Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]:
|
||||
"""
|
||||
Processes multiple crews in parallel.
|
||||
|
||||
Args:
|
||||
crews (List[Crew]): The list of crews to process in parallel.
|
||||
current_input (Dict[str, Any]): The input for the crews.
|
||||
|
||||
Returns:
|
||||
Tuple[List[CrewOutput], List[Union[str, Dict[str, Any]]]]: The outputs and traces of the crews.
|
||||
"""
|
||||
parallel_outputs = await asyncio.gather(
|
||||
*[crew.kickoff_async(inputs=current_input) for crew in crews]
|
||||
)
|
||||
return parallel_outputs, [crew.name or str(crew.id) for crew in crews]
|
||||
|
||||
def _update_metrics_and_input(
|
||||
self,
|
||||
usage_metrics: Dict[str, UsageMetrics],
|
||||
current_input: Dict[str, Any],
|
||||
stage: PipelineStage,
|
||||
outputs: List[CrewOutput],
|
||||
) -> None:
|
||||
"""
|
||||
Updates metrics and current input with the outputs of a stage.
|
||||
|
||||
Args:
|
||||
usage_metrics (Dict[str, Any]): The usage metrics to update.
|
||||
current_input (Dict[str, Any]): The current input to update.
|
||||
stage (Union[Crew, List[Crew]]): The stage that was processed.
|
||||
outputs (List[CrewOutput]): The outputs of the stage.
|
||||
"""
|
||||
if isinstance(stage, Crew):
|
||||
usage_metrics[stage.name or str(stage.id)] = outputs[0].token_usage
|
||||
current_input.update(outputs[0].to_dict())
|
||||
elif isinstance(stage, list) and all(isinstance(crew, Crew) for crew in stage):
|
||||
for crew, output in zip(stage, outputs):
|
||||
usage_metrics[crew.name or str(crew.id)] = output.token_usage
|
||||
current_input.update(output.to_dict())
|
||||
else:
|
||||
raise ValueError(f"Unsupported stage type: {type(stage)}")
|
||||
|
||||
def _build_pipeline_kickoff_results(
|
||||
self,
|
||||
all_stage_outputs: List[List[CrewOutput]],
|
||||
traces: List[List[Union[str, Dict[str, Any]]]],
|
||||
token_usage: Dict[str, UsageMetrics],
|
||||
) -> List[PipelineKickoffResult]:
|
||||
"""
|
||||
Builds the results of a pipeline run.
|
||||
|
||||
Args:
|
||||
all_stage_outputs (List[List[CrewOutput]]): All stage outputs.
|
||||
traces (List[List[Union[str, Dict[str, Any]]]]): All traces.
|
||||
token_usage (Dict[str, Any]): Token usage metrics.
|
||||
|
||||
Returns:
|
||||
List[PipelineKickoffResult]: The results of the pipeline run.
|
||||
"""
|
||||
formatted_traces = self._format_traces(traces)
|
||||
formatted_crew_outputs = self._format_crew_outputs(all_stage_outputs)
|
||||
|
||||
return [
|
||||
PipelineKickoffResult(
|
||||
token_usage=token_usage,
|
||||
trace=formatted_trace,
|
||||
raw=crews_outputs[-1].raw,
|
||||
pydantic=crews_outputs[-1].pydantic,
|
||||
json_dict=crews_outputs[-1].json_dict,
|
||||
crews_outputs=crews_outputs,
|
||||
)
|
||||
for crews_outputs, formatted_trace in zip(
|
||||
formatted_crew_outputs, formatted_traces
|
||||
)
|
||||
]
|
||||
|
||||
def _format_traces(
|
||||
self, traces: List[List[Union[str, Dict[str, Any]]]]
|
||||
) -> List[List[Trace]]:
|
||||
"""
|
||||
Formats the traces of a pipeline run.
|
||||
|
||||
Args:
|
||||
traces (List[List[Union[str, Dict[str, Any]]]]): The traces to format.
|
||||
|
||||
Returns:
|
||||
List[List[Trace]]: The formatted traces.
|
||||
"""
|
||||
formatted_traces: List[Trace] = self._format_single_trace(traces[:-1])
|
||||
return self._format_multiple_traces(formatted_traces, traces[-1])
|
||||
|
||||
def _format_single_trace(
|
||||
self, traces: List[List[Union[str, Dict[str, Any]]]]
|
||||
) -> List[Trace]:
|
||||
"""
|
||||
Formats single traces.
|
||||
|
||||
Args:
|
||||
traces (List[List[Union[str, Dict[str, Any]]]]): The traces to format.
|
||||
|
||||
Returns:
|
||||
List[Trace]: The formatted single traces.
|
||||
"""
|
||||
formatted_traces: List[Trace] = []
|
||||
for trace in traces:
|
||||
formatted_traces.append(trace[0] if len(trace) == 1 else trace)
|
||||
return formatted_traces
|
||||
|
||||
def _format_multiple_traces(
|
||||
self,
|
||||
formatted_traces: List[Trace],
|
||||
final_trace: List[Union[str, Dict[str, Any]]],
|
||||
) -> List[List[Trace]]:
|
||||
"""
|
||||
Formats multiple traces.
|
||||
|
||||
Args:
|
||||
formatted_traces (List[Trace]): The formatted single traces.
|
||||
final_trace (List[Union[str, Dict[str, Any]]]): The final trace to format.
|
||||
|
||||
Returns:
|
||||
List[List[Trace]]: The formatted multiple traces.
|
||||
"""
|
||||
traces_to_return: List[List[Trace]] = []
|
||||
if len(final_trace) == 1:
|
||||
formatted_traces.append(final_trace[0])
|
||||
traces_to_return.append(formatted_traces)
|
||||
else:
|
||||
for trace in final_trace:
|
||||
copied_traces = formatted_traces.copy()
|
||||
copied_traces.append(trace)
|
||||
traces_to_return.append(copied_traces)
|
||||
return traces_to_return
|
||||
|
||||
def _format_crew_outputs(
|
||||
self, all_stage_outputs: List[List[CrewOutput]]
|
||||
) -> List[List[CrewOutput]]:
|
||||
"""
|
||||
Formats the outputs of all stages into a list of crew outputs.
|
||||
|
||||
Args:
|
||||
all_stage_outputs (List[List[CrewOutput]]): All stage outputs.
|
||||
|
||||
Returns:
|
||||
List[List[CrewOutput]]: Formatted crew outputs.
|
||||
"""
|
||||
crew_outputs: List[CrewOutput] = [
|
||||
output
|
||||
for stage_outputs in all_stage_outputs[:-1]
|
||||
for output in stage_outputs
|
||||
]
|
||||
return [crew_outputs + [output] for output in all_stage_outputs[-1]]
|
||||
|
||||
def _copy_stages(self):
|
||||
"""Create a deep copy of the Pipeline's stages."""
|
||||
new_stages = []
|
||||
for stage in self.stages:
|
||||
if isinstance(stage, list):
|
||||
new_stages.append(
|
||||
[
|
||||
crew.copy() if hasattr(crew, "copy") else copy.deepcopy(crew)
|
||||
for crew in stage
|
||||
]
|
||||
)
|
||||
elif hasattr(stage, "copy"):
|
||||
new_stages.append(stage.copy())
|
||||
else:
|
||||
new_stages.append(copy.deepcopy(stage))
|
||||
|
||||
return new_stages
|
||||
|
||||
def __rshift__(self, other: PipelineStage) -> "Pipeline":
|
||||
"""
|
||||
Implements the >> operator to add another Stage (Crew or List[Crew]) to an existing Pipeline.
|
||||
|
||||
Args:
|
||||
other (Any): The stage to add.
|
||||
|
||||
Returns:
|
||||
Pipeline: A new pipeline with the added stage.
|
||||
"""
|
||||
if isinstance(other, (Crew, Router)) or (
|
||||
isinstance(other, list) and all(isinstance(item, Crew) for item in other)
|
||||
):
|
||||
return type(self)(stages=self.stages + [other])
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Unsupported operand type for >>: '{type(self).__name__}' and '{type(other).__name__}'"
|
||||
)
|
||||
61
src/crewai/pipeline/pipeline_kickoff_result.py
Normal file
61
src/crewai/pipeline/pipeline_kickoff_result.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import json
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import UUID4, BaseModel, Field
|
||||
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
class PipelineKickoffResult(BaseModel):
|
||||
"""Class that represents the result of a pipeline run."""
|
||||
|
||||
id: UUID4 = Field(
|
||||
default_factory=uuid.uuid4,
|
||||
frozen=True,
|
||||
description="Unique identifier for the object, not set by user.",
|
||||
)
|
||||
raw: str = Field(description="Raw output of the pipeline run", default="")
|
||||
pydantic: Any = Field(
|
||||
description="Pydantic output of the pipeline run", default=None
|
||||
)
|
||||
json_dict: Union[Dict[str, Any], None] = Field(
|
||||
description="JSON dict output of the pipeline run", default={}
|
||||
)
|
||||
|
||||
token_usage: Dict[str, UsageMetrics] = Field(
|
||||
description="Token usage for each crew in the run"
|
||||
)
|
||||
trace: List[Any] = Field(
|
||||
description="Trace of the journey of inputs through the run"
|
||||
)
|
||||
crews_outputs: List[CrewOutput] = Field(
|
||||
description="Output from each crew in the run",
|
||||
default=[],
|
||||
)
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[str]:
|
||||
if self.crews_outputs[-1].tasks_output[-1].output_format != "json":
|
||||
raise ValueError(
|
||||
"No JSON output found in the final task of the final crew. Please make sure to set the output_json property in the final task in your crew."
|
||||
)
|
||||
|
||||
return json.dumps(self.json_dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json_dict:
|
||||
output_dict.update(self.json_dict)
|
||||
elif self.pydantic:
|
||||
output_dict.update(self.pydantic.model_dump())
|
||||
return output_dict
|
||||
|
||||
def __str__(self):
|
||||
if self.pydantic:
|
||||
return str(self.pydantic)
|
||||
if self.json_dict:
|
||||
return str(self.json_dict)
|
||||
return self.raw
|
||||
20
src/crewai/pipeline/pipeline_output.py
Normal file
20
src/crewai/pipeline/pipeline_output.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import uuid
|
||||
from typing import List
|
||||
|
||||
from pydantic import UUID4, BaseModel, Field
|
||||
|
||||
from crewai.pipeline.pipeline_kickoff_result import PipelineKickoffResult
|
||||
|
||||
|
||||
class PipelineOutput(BaseModel):
|
||||
id: UUID4 = Field(
|
||||
default_factory=uuid.uuid4,
|
||||
frozen=True,
|
||||
description="Unique identifier for the object, not set by user.",
|
||||
)
|
||||
run_results: List[PipelineKickoffResult] = Field(
|
||||
description="List of results for each run through the pipeline", default=[]
|
||||
)
|
||||
|
||||
def add_run_result(self, result: PipelineKickoffResult):
|
||||
self.run_results.append(result)
|
||||
@@ -1,15 +1,17 @@
|
||||
from .annotations import (
|
||||
agent,
|
||||
cache_handler,
|
||||
callback,
|
||||
crew,
|
||||
task,
|
||||
llm,
|
||||
output_json,
|
||||
output_pydantic,
|
||||
pipeline,
|
||||
task,
|
||||
tool,
|
||||
callback,
|
||||
llm,
|
||||
cache_handler,
|
||||
)
|
||||
from .crew_base import CrewBase
|
||||
from .pipeline_base import PipelineBase
|
||||
|
||||
__all__ = [
|
||||
"agent",
|
||||
@@ -20,6 +22,8 @@ __all__ = [
|
||||
"tool",
|
||||
"callback",
|
||||
"CrewBase",
|
||||
"PipelineBase",
|
||||
"llm",
|
||||
"cache_handler",
|
||||
"pipeline",
|
||||
]
|
||||
|
||||
@@ -1,27 +1,23 @@
|
||||
def memoize(func):
|
||||
cache = {}
|
||||
from functools import wraps
|
||||
|
||||
def memoized_func(*args, **kwargs):
|
||||
key = (args, tuple(kwargs.items()))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
memoized_func.__dict__.update(func.__dict__)
|
||||
return memoized_func
|
||||
from crewai.project.utils import memoize
|
||||
|
||||
|
||||
def task(func):
|
||||
if not hasattr(task, "registration_order"):
|
||||
task.registration_order = []
|
||||
|
||||
func.is_task = True
|
||||
wrapped_func = memoize(func)
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if not result.name:
|
||||
result.name = func.__name__
|
||||
return result
|
||||
|
||||
# Append the function name to the registration order list
|
||||
setattr(wrapper, "is_task", True)
|
||||
task.registration_order.append(func.__name__)
|
||||
|
||||
return wrapped_func
|
||||
return memoize(wrapper)
|
||||
|
||||
|
||||
def agent(func):
|
||||
@@ -61,6 +57,21 @@ def cache_handler(func):
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def stage(func):
|
||||
func.is_stage = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def router(func):
|
||||
func.is_router = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def pipeline(func):
|
||||
func.is_pipeline = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def crew(func):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
instantiated_tasks = []
|
||||
|
||||
@@ -1,25 +1,19 @@
|
||||
import inspect
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict
|
||||
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import ConfigDict
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def CrewBase(cls):
|
||||
class WrappedClass(cls):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
is_crew_class: bool = True # type: ignore
|
||||
|
||||
base_directory = None
|
||||
for frame_info in inspect.stack():
|
||||
if "site-packages" not in frame_info.filename:
|
||||
base_directory = Path(frame_info.filename).parent.resolve()
|
||||
break
|
||||
# Get the directory of the class being decorated
|
||||
base_directory = Path(inspect.getfile(cls)).parent
|
||||
|
||||
original_agents_config_path = getattr(
|
||||
cls, "agents_config", "config/agents.yaml"
|
||||
@@ -29,25 +23,23 @@ def CrewBase(cls):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if self.base_directory is None:
|
||||
raise Exception(
|
||||
"Unable to dynamically determine the project's base directory, you must run it from the project's root directory."
|
||||
)
|
||||
agents_config_path = self.base_directory / self.original_agents_config_path
|
||||
tasks_config_path = self.base_directory / self.original_tasks_config_path
|
||||
|
||||
self.agents_config = self.load_yaml(agents_config_path)
|
||||
self.tasks_config = self.load_yaml(tasks_config_path)
|
||||
|
||||
self.agents_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_agents_config_path)
|
||||
)
|
||||
self.tasks_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_tasks_config_path)
|
||||
)
|
||||
self.map_all_agent_variables()
|
||||
self.map_all_task_variables()
|
||||
|
||||
@staticmethod
|
||||
def load_yaml(config_path: str):
|
||||
with open(config_path, "r") as file:
|
||||
# parsedContent = YamlParser.parse(file) # type: ignore # Argument 1 to "parse" has incompatible type "TextIOWrapper"; expected "YamlParser"
|
||||
return yaml.safe_load(file)
|
||||
def load_yaml(config_path: Path):
|
||||
try:
|
||||
with open(config_path, "r") as file:
|
||||
return yaml.safe_load(file)
|
||||
except FileNotFoundError:
|
||||
print(f"File not found: {config_path}")
|
||||
raise
|
||||
|
||||
def _get_all_functions(self):
|
||||
return {
|
||||
|
||||
58
src/crewai/project/pipeline_base.py
Normal file
58
src/crewai/project/pipeline_base.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from typing import Any, Callable, Dict, List, Type, Union
|
||||
|
||||
from crewai.crew import Crew
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
from crewai.routers.router import Router
|
||||
|
||||
PipelineStage = Union[Crew, List[Crew], Router]
|
||||
|
||||
|
||||
# TODO: Could potentially remove. Need to check with @joao and @gui if this is needed for CrewAI+
|
||||
def PipelineBase(cls: Type[Any]) -> Type[Any]:
|
||||
class WrappedClass(cls):
|
||||
is_pipeline_class: bool = True # type: ignore
|
||||
stages: List[PipelineStage]
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
self.stages = []
|
||||
self._map_pipeline_components()
|
||||
|
||||
def _get_all_functions(self) -> Dict[str, Callable[..., Any]]:
|
||||
return {
|
||||
name: getattr(self, name)
|
||||
for name in dir(self)
|
||||
if callable(getattr(self, name))
|
||||
}
|
||||
|
||||
def _filter_functions(
|
||||
self, functions: Dict[str, Callable[..., Any]], attribute: str
|
||||
) -> Dict[str, Callable[..., Any]]:
|
||||
return {
|
||||
name: func
|
||||
for name, func in functions.items()
|
||||
if hasattr(func, attribute)
|
||||
}
|
||||
|
||||
def _map_pipeline_components(self) -> None:
|
||||
all_functions = self._get_all_functions()
|
||||
crew_functions = self._filter_functions(all_functions, "is_crew")
|
||||
router_functions = self._filter_functions(all_functions, "is_router")
|
||||
|
||||
for stage_attr in dir(self):
|
||||
stage = getattr(self, stage_attr)
|
||||
if isinstance(stage, (Crew, Router)):
|
||||
self.stages.append(stage)
|
||||
elif callable(stage) and hasattr(stage, "is_crew"):
|
||||
self.stages.append(crew_functions[stage_attr]())
|
||||
elif callable(stage) and hasattr(stage, "is_router"):
|
||||
self.stages.append(router_functions[stage_attr]())
|
||||
elif isinstance(stage, list) and all(
|
||||
isinstance(item, Crew) for item in stage
|
||||
):
|
||||
self.stages.append(stage)
|
||||
|
||||
def build_pipeline(self) -> Pipeline:
|
||||
return Pipeline(stages=self.stages)
|
||||
|
||||
return WrappedClass
|
||||
11
src/crewai/project/utils.py
Normal file
11
src/crewai/project/utils.py
Normal file
@@ -0,0 +1,11 @@
|
||||
def memoize(func):
|
||||
cache = {}
|
||||
|
||||
def memoized_func(*args, **kwargs):
|
||||
key = (args, tuple(kwargs.items()))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
memoized_func.__dict__.update(func.__dict__)
|
||||
return memoized_func
|
||||
3
src/crewai/routers/__init__.py
Normal file
3
src/crewai/routers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from crewai.routers.router import Router
|
||||
|
||||
__all__ = ["Router"]
|
||||
84
src/crewai/routers/router.py
Normal file
84
src/crewai/routers/router.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from copy import deepcopy
|
||||
from typing import Any, Callable, Dict, Tuple
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
|
||||
class Route(BaseModel):
|
||||
condition: Callable[[Dict[str, Any]], bool]
|
||||
pipeline: Any
|
||||
|
||||
|
||||
class Router(BaseModel):
|
||||
routes: Dict[str, Route] = Field(
|
||||
default_factory=dict,
|
||||
description="Dictionary of route names to (condition, pipeline) tuples",
|
||||
)
|
||||
default: Any = Field(..., description="Default pipeline if no conditions are met")
|
||||
_route_types: Dict[str, type] = PrivateAttr(default_factory=dict)
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def __init__(self, routes: Dict[str, Route], default: Any, **data):
|
||||
super().__init__(routes=routes, default=default, **data)
|
||||
self._check_copyable(default)
|
||||
for name, route in routes.items():
|
||||
self._check_copyable(route.pipeline)
|
||||
self._route_types[name] = type(route.pipeline)
|
||||
|
||||
@staticmethod
|
||||
def _check_copyable(obj: Any) -> None:
|
||||
if not hasattr(obj, "copy") or not callable(getattr(obj, "copy")):
|
||||
raise ValueError(f"Object of type {type(obj)} must have a 'copy' method")
|
||||
|
||||
def add_route(
|
||||
self,
|
||||
name: str,
|
||||
condition: Callable[[Dict[str, Any]], bool],
|
||||
pipeline: Any,
|
||||
) -> "Router":
|
||||
"""
|
||||
Add a named route with its condition and corresponding pipeline to the router.
|
||||
|
||||
Args:
|
||||
name: A unique name for this route
|
||||
condition: A function that takes a dictionary input and returns a boolean
|
||||
pipeline: The Pipeline to execute if the condition is met
|
||||
|
||||
Returns:
|
||||
The Router instance for method chaining
|
||||
"""
|
||||
self._check_copyable(pipeline)
|
||||
self.routes[name] = Route(condition=condition, pipeline=pipeline)
|
||||
self._route_types[name] = type(pipeline)
|
||||
return self
|
||||
|
||||
def route(self, input_data: Dict[str, Any]) -> Tuple[Any, str]:
|
||||
"""
|
||||
Evaluate the input against the conditions and return the appropriate pipeline.
|
||||
|
||||
Args:
|
||||
input_data: The input dictionary to be evaluated
|
||||
|
||||
Returns:
|
||||
A tuple containing the next Pipeline to be executed and the name of the route taken
|
||||
"""
|
||||
for name, route in self.routes.items():
|
||||
if route.condition(input_data):
|
||||
return route.pipeline, name
|
||||
|
||||
return self.default, "default"
|
||||
|
||||
def copy(self) -> "Router":
|
||||
"""Create a deep copy of the Router."""
|
||||
new_routes = {
|
||||
name: Route(
|
||||
condition=deepcopy(route.condition),
|
||||
pipeline=route.pipeline.copy(),
|
||||
)
|
||||
for name, route in self.routes.items()
|
||||
}
|
||||
new_default = self.default.copy()
|
||||
|
||||
return Router(routes=new_routes, default=new_default)
|
||||
@@ -9,13 +9,21 @@ from hashlib import md5
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from opentelemetry.trace import Span
|
||||
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
@@ -39,14 +47,12 @@ class Task(BaseModel):
|
||||
tools: List of tools/resources limited for task execution.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
used_tools: int = 0
|
||||
tools_errors: int = 0
|
||||
delegations: int = 0
|
||||
i18n: I18N = I18N()
|
||||
name: Optional[str] = Field(default=None)
|
||||
prompt_context: Optional[str] = None
|
||||
description: str = Field(description="Description of the actual task.")
|
||||
expected_output: str = Field(
|
||||
@@ -103,16 +109,27 @@ class Task(BaseModel):
|
||||
default=None,
|
||||
)
|
||||
|
||||
_telemetry: Telemetry
|
||||
_execution_span: Span | None = None
|
||||
_original_description: str | None = None
|
||||
_original_expected_output: str | None = None
|
||||
_thread: threading.Thread | None = None
|
||||
_execution_time: float | None = None
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
||||
_execution_span: Optional[Span] = PrivateAttr(default=None)
|
||||
_original_description: Optional[str] = PrivateAttr(default=None)
|
||||
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
||||
_thread: Optional[threading.Thread] = PrivateAttr(default=None)
|
||||
_execution_time: Optional[float] = PrivateAttr(default=None)
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def process_model_config(cls, values):
|
||||
return process_config(values, cls)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_required_fields(self):
|
||||
required_fields = ["description", "expected_output"]
|
||||
for field in required_fields:
|
||||
if getattr(self, field) is None:
|
||||
raise ValueError(
|
||||
f"{field} must be provided either directly or through config"
|
||||
)
|
||||
return self
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -136,12 +153,6 @@ class Task(BaseModel):
|
||||
return value[1:]
|
||||
return value
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> "Task":
|
||||
"""Set private attributes."""
|
||||
self._telemetry = Telemetry()
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "Task":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
@@ -184,7 +195,7 @@ class Task(BaseModel):
|
||||
expected_output = self._original_expected_output or self.expected_output
|
||||
source = [description, expected_output]
|
||||
|
||||
return md5("|".join(source).encode()).hexdigest()
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
|
||||
def execute_async(
|
||||
self,
|
||||
@@ -239,7 +250,9 @@ class Task(BaseModel):
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
|
||||
task_output = TaskOutput(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
|
||||
@@ -10,6 +10,10 @@ class TaskOutput(BaseModel):
|
||||
"""Class that represents the result of a task."""
|
||||
|
||||
description: str = Field(description="Description of the task")
|
||||
name: Optional[str] = Field(description="Name of the task", default=None)
|
||||
expected_output: Optional[str] = Field(
|
||||
description="Expected output of the task", default=None
|
||||
)
|
||||
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
||||
raw: str = Field(description="Raw output of the task", default="")
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
from .telemetry import Telemetry
|
||||
|
||||
__all__ = ["Telemetry"]
|
||||
|
||||
@@ -4,7 +4,7 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
@@ -28,18 +28,6 @@ class Telemetry:
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
|
||||
Users can opt-in to sharing more complete data using the `share_crew`
|
||||
attribute in the Crew class.
|
||||
"""
|
||||
@@ -98,69 +86,136 @@ class Telemetry:
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": task.key,
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(span, "platform", platform.platform())
|
||||
self._add_attribute(span, "platform_release", platform.release())
|
||||
self._add_attribute(span, "platform_system", platform.system())
|
||||
self._add_attribute(span, "platform_version", platform.version())
|
||||
self._add_attribute(span, "cpus", os.cpu_count())
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": task.key,
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role
|
||||
if task.agent
|
||||
else "None",
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(span, "platform", platform.platform())
|
||||
self._add_attribute(span, "platform_release", platform.release())
|
||||
self._add_attribute(span, "platform_system", platform.system())
|
||||
self._add_attribute(span, "platform_version", platform.version())
|
||||
self._add_attribute(span, "cpus", os.cpu_count())
|
||||
self._add_attribute(
|
||||
span, "crew_inputs", json.dumps(inputs) if inputs else None
|
||||
)
|
||||
|
||||
else:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": task.key,
|
||||
"id": str(task.id),
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role
|
||||
if task.agent
|
||||
else "None",
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -289,6 +344,118 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def individual_test_result_span(
|
||||
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
||||
):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Individual Test Result")
|
||||
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "quality", str(quality))
|
||||
self._add_attribute(span, "exec_time", str(exec_time))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def test_execution_span(
|
||||
self,
|
||||
crew: Crew,
|
||||
iterations: int,
|
||||
inputs: dict[str, Any] | None,
|
||||
model_name: str,
|
||||
):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Test Execution")
|
||||
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "iterations", str(iterations))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span, "inputs", json.dumps(inputs) if inputs else None
|
||||
)
|
||||
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def deploy_signup_error_span(self):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Deploy Signup Error")
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def start_deployment_span(self, uuid: Optional[str] = None):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Start Deployment")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def create_crew_deployment_span(self):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Create Crew Deployment")
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Get Crew Logs")
|
||||
self._add_attribute(span, "log_type", log_type)
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def remove_crew_span(self, uuid: Optional[str] = None):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Remove Crew")
|
||||
if uuid:
|
||||
self._add_attribute(span, "uuid", uuid)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
@@ -402,7 +569,7 @@ class Telemetry:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "base_url", "model", "top_k", "temperature"]
|
||||
attributes = ["name", "model_name", "model", "top_k", "temperature"]
|
||||
if llm:
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agents.cache import CacheHandler
|
||||
|
||||
@@ -7,11 +7,10 @@ from crewai.agents.cache import CacheHandler
|
||||
class CacheTools(BaseModel):
|
||||
"""Default tools to hit the cache."""
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
name: str = "Hit Cache"
|
||||
cache_handler: CacheHandler = Field(
|
||||
description="Cache Handler for the crew",
|
||||
default=CacheHandler(),
|
||||
default_factory=CacheHandler,
|
||||
)
|
||||
|
||||
def tool(self):
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import ast
|
||||
import os
|
||||
from difflib import SequenceMatcher
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, Union
|
||||
@@ -11,10 +12,12 @@ from crewai.telemetry import Telemetry
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
|
||||
try:
|
||||
import agentops
|
||||
except ImportError:
|
||||
agentops = None
|
||||
agentops = None
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4o"]
|
||||
|
||||
@@ -69,6 +72,13 @@ class ToolUsage:
|
||||
self.action = action
|
||||
self.function_calling_llm = function_calling_llm
|
||||
|
||||
# Handling bug (see https://github.com/langchain-ai/langchain/pull/16395): raise an error if tools_names have space for ChatOpenAI
|
||||
if isinstance(self.function_calling_llm, ChatOpenAI):
|
||||
if " " in self.tools_names:
|
||||
raise Exception(
|
||||
"Tools names should not have spaces for ChatOpenAI models."
|
||||
)
|
||||
|
||||
# Set the maximum parsing attempts for bigger models
|
||||
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
|
||||
self.function_calling_llm.openai_api_base is None
|
||||
@@ -108,7 +118,7 @@ class ToolUsage:
|
||||
tool: BaseTool,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> str: # TODO: Fix this return type
|
||||
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None
|
||||
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
|
||||
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
|
||||
0
src/crewai/types/__init__.py
Normal file
0
src/crewai/types/__init__.py
Normal file
36
src/crewai/types/usage_metrics.py
Normal file
36
src/crewai/types/usage_metrics.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class UsageMetrics(BaseModel):
|
||||
"""
|
||||
Model to track usage metrics for the crew's execution.
|
||||
|
||||
Attributes:
|
||||
total_tokens: Total number of tokens used.
|
||||
prompt_tokens: Number of tokens used in prompts.
|
||||
completion_tokens: Number of tokens used in completions.
|
||||
successful_requests: Number of successful requests made.
|
||||
"""
|
||||
|
||||
total_tokens: int = Field(default=0, description="Total number of tokens used.")
|
||||
prompt_tokens: int = Field(
|
||||
default=0, description="Number of tokens used in prompts."
|
||||
)
|
||||
completion_tokens: int = Field(
|
||||
default=0, description="Number of tokens used in completions."
|
||||
)
|
||||
successful_requests: int = Field(
|
||||
default=0, description="Number of successful requests made."
|
||||
)
|
||||
|
||||
def add_usage_metrics(self, usage_metrics: "UsageMetrics"):
|
||||
"""
|
||||
Add the usage metrics from another UsageMetrics object.
|
||||
|
||||
Args:
|
||||
usage_metrics (UsageMetrics): The usage metrics to add.
|
||||
"""
|
||||
self.total_tokens += usage_metrics.total_tokens
|
||||
self.prompt_tokens += usage_metrics.prompt_tokens
|
||||
self.completion_tokens += usage_metrics.completion_tokens
|
||||
self.successful_requests += usage_metrics.successful_requests
|
||||
40
src/crewai/utilities/config.py
Normal file
40
src/crewai/utilities/config.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import Any, Dict, Type
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
def process_config(
|
||||
values: Dict[str, Any], model_class: Type[BaseModel]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Process the config dictionary and update the values accordingly.
|
||||
|
||||
Args:
|
||||
values (Dict[str, Any]): The dictionary of values to update.
|
||||
model_class (Type[BaseModel]): The Pydantic model class to reference for field validation.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The updated values dictionary.
|
||||
"""
|
||||
config = values.get("config", {})
|
||||
if not config:
|
||||
return values
|
||||
|
||||
# Copy values from config (originally from YAML) to the model's attributes.
|
||||
# Only copy if the attribute isn't already set, preserving any explicitly defined values.
|
||||
for key, value in config.items():
|
||||
if key not in model_class.model_fields:
|
||||
continue
|
||||
if values.get(key) is not None:
|
||||
continue
|
||||
if isinstance(value, (str, int, float, bool, list)):
|
||||
values[key] = value
|
||||
elif isinstance(value, dict):
|
||||
if isinstance(values.get(key), dict):
|
||||
values[key].update(value)
|
||||
else:
|
||||
values[key] = value
|
||||
|
||||
# Remove the config from values to avoid duplicate processing
|
||||
values.pop("config", None)
|
||||
return values
|
||||
@@ -8,6 +8,7 @@ from rich.table import Table
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
|
||||
|
||||
class TaskEvaluationPydanticOutput(BaseModel):
|
||||
@@ -34,6 +35,7 @@ class CrewEvaluator:
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
self.crew = crew
|
||||
self.openai_model_name = openai_model_name
|
||||
self._telemetry = Telemetry()
|
||||
self._setup_for_evaluating()
|
||||
|
||||
def _setup_for_evaluating(self) -> None:
|
||||
@@ -155,6 +157,12 @@ class CrewEvaluator:
|
||||
evaluation_result = evaluation_task.execute_sync()
|
||||
|
||||
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
|
||||
self._test_result_span = self._telemetry.individual_test_result_span(
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task._execution_time,
|
||||
self.openai_model_name,
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
current_task._execution_time
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
@@ -6,17 +7,27 @@ from pydantic import BaseModel, Field
|
||||
from crewai.utilities import Converter
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
agentops = None
|
||||
try:
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
|
||||
def track_agent(name):
|
||||
def mock_agent_ops_provider():
|
||||
def track_agent(*args, **kwargs):
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
return track_agent
|
||||
|
||||
|
||||
agentops = None
|
||||
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
else:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
|
||||
|
||||
class Entity(BaseModel):
|
||||
name: str = Field(description="The name of the entity.")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user