mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@@ -249,8 +249,8 @@ pip install dist/*.tar.gz
|
||||
|
||||
## Hire CrewAI
|
||||
|
||||
We're a company developing crewAI and crewAI Enterprise, we for a limited time are offer consulting with selected customers, to get them early access to our enterprise solution
|
||||
If you are interested on having access to it and hiring weekly hours with our team, feel free to email us at [joao@crewai.com](mailto:joao@crewai.com).
|
||||
We're a company developing crewAI and crewAI Enterprise. We, for a limited time, are offering consulting with selected customers; to get them early access to our enterprise solution.
|
||||
If you are interested in having access to it, and hiring weekly hours with our team, feel free to email us at [joao@crewai.com](mailto:joao@crewai.com).
|
||||
|
||||
## Telemetry
|
||||
|
||||
|
||||
@@ -110,6 +110,16 @@ OPENAI_API_BASE=https://api.mistral.ai/v1
|
||||
OPENAI_MODEL_NAME="mistral-small"
|
||||
```
|
||||
|
||||
### Solar
|
||||
```sh
|
||||
from langchain_community.chat_models.solar import SolarChat
|
||||
# Initialize language model
|
||||
os.environ["SOLAR_API_KEY"] = "your-solar-api-key"
|
||||
llm = SolarChat(max_tokens=1024)
|
||||
|
||||
Free developer API key available here: https://console.upstage.ai/services/solar
|
||||
Langchain Example: https://github.com/langchain-ai/langchain/pull/18556
|
||||
```
|
||||
### text-gen-web-ui
|
||||
```sh
|
||||
OPENAI_API_BASE=http://localhost:5000/v1
|
||||
@@ -128,9 +138,9 @@ Free developer API key available here: https://cohere.com/
|
||||
Langchain Documentation: https://python.langchain.com/docs/integrations/chat/cohere
|
||||
```
|
||||
|
||||
### Azure Open AI
|
||||
Azure's OpenAI API needs a distinct setup, utilizing the `langchain_openai` component for Azure-specific configurations.
|
||||
|
||||
### Azure Open AI Configuration
|
||||
For Azure OpenAI API integration, set the following environment variables:
|
||||
```sh
|
||||
AZURE_OPENAI_VERSION="2022-12-01"
|
||||
AZURE_OPENAI_DEPLOYMENT=""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.27.2"
|
||||
version = "0.28.8"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
@@ -23,7 +23,7 @@ opentelemetry-sdk = "^1.22.0"
|
||||
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
|
||||
instructor = "^0.5.2"
|
||||
regex = "^2023.12.25"
|
||||
crewai-tools = { version = "^0.1.4", optional = true }
|
||||
crewai-tools = { version = "^0.1.7", optional = true }
|
||||
click = "^8.1.7"
|
||||
python-dotenv = "1.0.0"
|
||||
embedchain = "^0.1.98"
|
||||
@@ -46,7 +46,7 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
|
||||
mkdocs-material-extensions = "^1.3.1"
|
||||
pillow = "^10.2.0"
|
||||
cairosvg = "^2.7.1"
|
||||
crewai_tools = "^0.1.4"
|
||||
crewai-tools = "^0.1.7"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
||||
@@ -81,9 +81,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
datetime=str(time.time()),
|
||||
expected_output=self.task.expected_output,
|
||||
metadata={
|
||||
"suggestions": "\n".join(
|
||||
[f"- {s}" for s in evaluation.suggestions]
|
||||
),
|
||||
"suggestions": evaluation.suggestions,
|
||||
"quality": evaluation.quality,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = {extras = ["tools"], version = "^0.27.0"}
|
||||
crewai = {extras = ["tools"], version = "^0.28.8"}
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
|
||||
@@ -37,13 +37,18 @@ class ContextualMemory:
|
||||
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
ltm_results = self.ltm.search(task)
|
||||
ltm_results = self.ltm.search(task, latest_n=2)
|
||||
if not ltm_results:
|
||||
return None
|
||||
formatted_results = "\n".join(
|
||||
[f"{result['metadata']['suggestions']}" for result in ltm_results]
|
||||
)
|
||||
formatted_results = list(set(formatted_results))
|
||||
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results])
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
def _fetch_entity_context(self, query) -> str:
|
||||
|
||||
@@ -28,5 +28,5 @@ class LongTermMemory(Memory):
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
def search(self, task: str) -> Dict[str, Any]:
|
||||
return self.storage.load(task)
|
||||
def search(self, task: str, latest_n: int) -> Dict[str, Any]:
|
||||
return self.storage.load(task, latest_n)
|
||||
|
||||
@@ -67,19 +67,19 @@ class LTMSQLiteStorage:
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str) -> Dict[str, Any]:
|
||||
def load(self, task_description: str, latest_n: int) -> Dict[str, Any]:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT 2
|
||||
""",
|
||||
f"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""",
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
@@ -13,7 +13,23 @@ from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
"""Class that represent a task to be executed."""
|
||||
"""Class that represents a task to be executed.
|
||||
|
||||
Each task must have a description, an expected output and an agent responsible for execution.
|
||||
|
||||
Attributes:
|
||||
agent: Agent responsible for task execution. Represents entity performing task.
|
||||
async_execution: Boolean flag indicating asynchronous task execution.
|
||||
callback: Function/object executed post task completion for additional actions.
|
||||
config: Dictionary containing task-specific configuration parameters.
|
||||
context: List of Task instances providing task context or input data.
|
||||
description: Descriptive text detailing task's purpose and execution.
|
||||
expected_output: Clear definition of expected task outcome.
|
||||
output_file: File path for storing task output.
|
||||
output_json: Pydantic model for structuring JSON output.
|
||||
output_pydantic: Pydantic model for task output.
|
||||
tools: List of tools/resources limited for task execution.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDqDCCAy6gAwIBAgIRAPNkTmtuAFAjfglGvXvh9R0wCgYIKoZIzj0EAwMwgYgx
|
||||
CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz
|
||||
ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD
|
||||
EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTE4MTEw
|
||||
MjAwMDAwMFoXDTMwMTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAkdCMRswGQYDVQQI
|
||||
ExJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAOBgNVBAcTB1NhbGZvcmQxGDAWBgNVBAoT
|
||||
D1NlY3RpZ28gTGltaXRlZDE3MDUGA1UEAxMuU2VjdGlnbyBFQ0MgRG9tYWluIFZh
|
||||
bGlkYXRpb24gU2VjdXJlIFNlcnZlciBDQTBZMBMGByqGSM49AgEGCCqGSM49AwEH
|
||||
A0IABHkYk8qfbZ5sVwAjBTcLXw9YWsTef1Wj6R7W2SUKiKAgSh16TwUwimNJE4xk
|
||||
IQeV/To14UrOkPAY9z2vaKb71EijggFuMIIBajAfBgNVHSMEGDAWgBQ64QmG1M8Z
|
||||
wpZ2dEl23OA1xmNjmjAdBgNVHQ4EFgQU9oUKOxGG4QR9DqoLLNLuzGR7e64wDgYD
|
||||
VR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0lBBYwFAYIKwYB
|
||||
BQUHAwEGCCsGAQUFBwMCMBsGA1UdIAQUMBIwBgYEVR0gADAIBgZngQwBAgEwUAYD
|
||||
VR0fBEkwRzBFoEOgQYY/aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VTRVJUcnVz
|
||||
dEVDQ0NlcnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEBBGowaDA/
|
||||
BggrBgEFBQcwAoYzaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdEVD
|
||||
Q0FkZFRydXN0Q0EuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1
|
||||
c3QuY29tMAoGCCqGSM49BAMDA2gAMGUCMEvnx3FcsVwJbZpCYF9z6fDWJtS1UVRs
|
||||
cS0chWBNKPFNpvDKdrdKRe+oAkr2jU+ubgIxAODheSr2XhcA7oz9HmedGdMhlrd9
|
||||
4ToKFbZl+/OnFFzqnvOhcjHvClECEQcKmc8fmA==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID0zCCArugAwIBAgIQVmcdBOpPmUxvEIFHWdJ1lDANBgkqhkiG9w0BAQwFADB7
|
||||
MQswCQYDVQQGEwJHQjEbMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYD
|
||||
VQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UE
|
||||
AwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTE5MDMxMjAwMDAwMFoXDTI4
|
||||
MTIzMTIzNTk1OVowgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5
|
||||
MRQwEgYDVQQHEwtKZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBO
|
||||
ZXR3b3JrMS4wLAYDVQQDEyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0
|
||||
aG9yaXR5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEGqxUWqn5aCPnetUkb1PGWthL
|
||||
q8bVttHmc3Gu3ZzWDGH926CJA7gFFOxXzu5dP+Ihs8731Ip54KODfi2X0GHE8Znc
|
||||
JZFjq38wo7Rw4sehM5zzvy5cU7Ffs30yf4o043l5o4HyMIHvMB8GA1UdIwQYMBaA
|
||||
FKARCiM+lvEH7OKvKe+CpX/QMKS0MB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1
|
||||
xmNjmjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zARBgNVHSAECjAI
|
||||
MAYGBFUdIAAwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5j
|
||||
b20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNAYIKwYBBQUHAQEEKDAmMCQG
|
||||
CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQEM
|
||||
BQADggEBABns652JLCALBIAdGN5CmXKZFjK9Dpx1WywV4ilAbe7/ctvbq5AfjJXy
|
||||
ij0IckKJUAfiORVsAYfZFhr1wHUrxeZWEQff2Ji8fJ8ZOd+LygBkc7xGEJuTI42+
|
||||
FsMuCIKchjN0djsoTI0DQoWz4rIjQtUfenVqGtF8qmchxDM6OW1TyaLtYiKou+JV
|
||||
bJlsQ2uRl9EMC5MCHdK8aXdJ5htN978UeAOwproLtOGFfy/cQjutdAFI3tZs4RmY
|
||||
CV4Ks2dH/hzg1cEo70qLRDEmBDeNiXQ2Lu+lIg+DdEmSx/cQwgwp+7e9un/jX9Wf
|
||||
8qn0dNW44bOwgeThpWOjzOoEeJBuv/c=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncio
|
||||
import importlib.resources
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
@@ -41,19 +40,17 @@ class Telemetry:
|
||||
|
||||
def __init__(self):
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
try:
|
||||
telemetry_endpoint = "https://telemetry.crewai.com:4319"
|
||||
self.resource = Resource(
|
||||
attributes={SERVICE_NAME: "crewAI-telemetry"},
|
||||
)
|
||||
self.provider = TracerProvider(resource=self.resource)
|
||||
cert_file = importlib.resources.files("crewai.telemetry").joinpath(
|
||||
"STAR_crewai_com_bundle.pem"
|
||||
)
|
||||
|
||||
processor = BatchSpanProcessor(
|
||||
OTLPSpanExporter(
|
||||
endpoint=f"{telemetry_endpoint}/v1/traces",
|
||||
certificate_file=cert_file,
|
||||
timeout=30,
|
||||
)
|
||||
)
|
||||
@@ -69,13 +66,13 @@ class Telemetry:
|
||||
self.ready = False
|
||||
|
||||
def set_tracer(self):
|
||||
if self.ready:
|
||||
provider = trace.get_tracer_provider()
|
||||
if provider is None:
|
||||
try:
|
||||
trace.set_tracer_provider(self.provider)
|
||||
except Exception:
|
||||
self.ready = False
|
||||
if self.ready and not self.trace_set:
|
||||
try:
|
||||
trace.set_tracer_provider(self.provider)
|
||||
self.trace_set = True
|
||||
except Exception:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
def crew_creation(self, crew):
|
||||
"""Records the creation of a crew."""
|
||||
@@ -92,6 +89,7 @@ class Telemetry:
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(span, "crew_language", crew.language)
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
self._add_attribute(
|
||||
@@ -102,7 +100,6 @@ class Telemetry:
|
||||
{
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"memory_enabled?": agent.memory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
@@ -150,11 +147,17 @@ class Telemetry:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Repeated Usage")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -166,11 +169,17 @@ class Telemetry:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -183,8 +192,14 @@ class Telemetry:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage Error")
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -198,6 +213,11 @@ class Telemetry:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Execution")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(
|
||||
span,
|
||||
@@ -209,7 +229,6 @@ class Telemetry:
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"memory_enabled?": agent.memory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
@@ -253,6 +272,11 @@ class Telemetry:
|
||||
def end_crew(self, crew, output):
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(crew._execution_span, "crew_output", output)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
@@ -282,6 +306,8 @@ class Telemetry:
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "base_url", "model", "top_k", "temperature"]
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
if llm:
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
return {}
|
||||
|
||||
Reference in New Issue
Block a user