Compare commits

...

5 Commits

Author SHA1 Message Date
Lucas Gomide
05b0d21284 Merge branch 'main' into lg-allow-remove-stop 2025-05-02 14:41:47 -03:00
Vini Brasil
17474a3a0c Identify parent_flow of Crew and LiteAgent (#2723)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
This commit adds a new crew field called parent_flow, evaluated when the Crew
instance is instantiated. The stacktrace is traversed to look up if the caller
is an instance of Flow, and if so, it fills in the field.

Other alternatives were considered, such as a global context or even a new
field to be manually filled, however, this is the most magical solution that
was thread-safe and did not require public API changes.
2025-05-02 14:40:39 -03:00
Lucas Gomide
b25afcc95a Merge branch 'main' into lg-allow-remove-stop 2025-05-02 13:40:51 -03:00
Lucas Gomide
f89c2bfb7e Fix crewai reset-memories when Embedding dimension mismatch (#2737)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
* fix: support to reset memories after changing Crew's embedder

The sources must not be added while initializing the Knowledge otherwise we could not reset it

* chore: improve reset memory feedback

Previously, even when no memories were actually erased, we logged that they had been. From now on, the log will specify which memory has been reset.

* feat: improve get_crew discovery from a single file

Crew instances can now be discovered from any function or method with a return type annotation of -> Crew, as well as from module-level attributes assigned to a Crew instance. Additionally, crews can be retrieved from within a Flow

* refactor: make add_sources a public method from Knowledge
2025-05-02 12:40:42 -04:00
Lucas Gomide
3730648b9d feat: support to remove stop parameter from LLM call
Currently, we can't remove the stop parameter from models that don't support it, because setting it to None ends up as an empty list
2025-05-02 12:07:47 -03:00
13 changed files with 974 additions and 171 deletions

View File

@@ -2,7 +2,7 @@ import subprocess
import click
from crewai.cli.utils import get_crew
from crewai.cli.utils import get_crews
def reset_memories_command(
@@ -26,35 +26,47 @@ def reset_memories_command(
"""
try:
crew = get_crew()
if not crew:
raise ValueError("No crew found.")
if all:
crew.reset_memories(command_type="all")
click.echo("All memories have been reset.")
return
if not any([long, short, entity, kickoff_outputs, knowledge]):
if not any([long, short, entity, kickoff_outputs, knowledge, all]):
click.echo(
"No memory type specified. Please specify at least one type to reset."
)
return
if long:
crew.reset_memories(command_type="long")
click.echo("Long term memory has been reset.")
if short:
crew.reset_memories(command_type="short")
click.echo("Short term memory has been reset.")
if entity:
crew.reset_memories(command_type="entity")
click.echo("Entity memory has been reset.")
if kickoff_outputs:
crew.reset_memories(command_type="kickoff_outputs")
click.echo("Latest Kickoff outputs stored has been reset.")
if knowledge:
crew.reset_memories(command_type="knowledge")
click.echo("Knowledge has been reset.")
crews = get_crews()
if not crews:
raise ValueError("No crew found.")
for crew in crews:
if all:
crew.reset_memories(command_type="all")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Reset memories command has been completed."
)
continue
if long:
crew.reset_memories(command_type="long")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Long term memory has been reset."
)
if short:
crew.reset_memories(command_type="short")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Short term memory has been reset."
)
if entity:
crew.reset_memories(command_type="entity")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Entity memory has been reset."
)
if kickoff_outputs:
crew.reset_memories(command_type="kickoff_outputs")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Latest Kickoff outputs stored has been reset."
)
if knowledge:
crew.reset_memories(command_type="knowledge")
click.echo(
f"[Crew ({crew.name if crew.name else crew.id})] Knowledge has been reset."
)
except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while resetting the memories: {e}", err=True)

View File

@@ -2,7 +2,8 @@ import os
import shutil
import sys
from functools import reduce
from typing import Any, Dict, List
from inspect import isfunction, ismethod
from typing import Any, Dict, List, get_type_hints
import click
import tomli
@@ -10,6 +11,7 @@ from rich.console import Console
from crewai.cli.constants import ENV_VARS
from crewai.crew import Crew
from crewai.flow import Flow
if sys.version_info >= (3, 11):
import tomllib
@@ -250,11 +252,11 @@ def write_env_file(folder_path, env_vars):
file.write(f"{key}={value}\n")
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
"""Get the crew instance from the crew.py file."""
def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
"""Get the crew instances from the a file."""
crew_instances = []
try:
import importlib.util
import os
for root, _, files in os.walk("."):
if crew_path in files:
@@ -271,12 +273,10 @@ def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
spec.loader.exec_module(module)
for attr_name in dir(module):
attr = getattr(module, attr_name)
try:
if callable(attr) and hasattr(attr, "crew"):
crew_instance = attr().crew()
return crew_instance
module_attr = getattr(module, attr_name)
try:
crew_instances.extend(fetch_crews(module_attr))
except Exception as e:
print(f"Error processing attribute {attr_name}: {e}")
continue
@@ -286,7 +286,6 @@ def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
import traceback
print(f"Traceback: {traceback.format_exc()}")
except (ImportError, AttributeError) as e:
if require:
console.print(
@@ -300,7 +299,6 @@ def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
if require:
console.print("No valid Crew instance found in crew.py", style="bold red")
raise SystemExit
return None
except Exception as e:
if require:
@@ -308,4 +306,36 @@ def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
f"Unexpected error while loading crew: {str(e)}", style="bold red"
)
raise SystemExit
return crew_instances
def get_crew_instance(module_attr) -> Crew | None:
if (
callable(module_attr)
and hasattr(module_attr, "is_crew_class")
and module_attr.is_crew_class
):
return module_attr().crew()
if (ismethod(module_attr) or isfunction(module_attr)) and get_type_hints(
module_attr
).get("return") is Crew:
return module_attr()
elif isinstance(module_attr, Crew):
return module_attr
else:
return None
def fetch_crews(module_attr) -> list[Crew]:
crew_instances: list[Crew] = []
if crew_instance := get_crew_instance(module_attr):
crew_instances.append(crew_instance)
if isinstance(module_attr, type) and issubclass(module_attr, Flow):
instance = module_attr()
for attr_name in dir(instance):
attr = getattr(instance, attr_name)
if crew_instance := get_crew_instance(attr):
crew_instances.append(crew_instance)
return crew_instances

View File

@@ -6,7 +6,17 @@ import warnings
from concurrent.futures import Future
from copy import copy as shallow_copy
from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union, cast
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from pydantic import (
UUID4,
@@ -24,6 +34,7 @@ from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.cache import CacheHandler
from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow_trackable import FlowTrackable
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM, BaseLLM
@@ -69,7 +80,7 @@ from crewai.utilities.training_handler import CrewTrainingHandler
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
class Crew(BaseModel):
class Crew(FlowTrackable, BaseModel):
"""
Represents a group of agents, defining how they should collaborate and the tasks they should perform.
@@ -304,7 +315,9 @@ class Crew(BaseModel):
"""Initialize private memory attributes."""
self._external_memory = (
# External memory doesnt support a default value since it was designed to be managed entirely externally
self.external_memory.set_crew(self) if self.external_memory else None
self.external_memory.set_crew(self)
if self.external_memory
else None
)
self._long_term_memory = self.long_term_memory
@@ -333,6 +346,7 @@ class Crew(BaseModel):
embedder=self.embedder,
collection_name="crew",
)
self.knowledge.add_sources()
except Exception as e:
self._logger.log(
@@ -1369,8 +1383,6 @@ class Crew(BaseModel):
else:
self._reset_specific_memory(command_type)
self._logger.log("info", f"{command_type} memory has been reset")
except Exception as e:
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
self._logger.log("error", error_msg)
@@ -1391,8 +1403,14 @@ class Crew(BaseModel):
if system is not None:
try:
system.reset()
self._logger.log(
"info",
f"[Crew ({self.name if self.name else self.id})] {name} memory has been reset",
)
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e
raise RuntimeError(
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
) from e
def _reset_specific_memory(self, memory_type: str) -> None:
"""Reset a specific memory system.
@@ -1421,5 +1439,11 @@ class Crew(BaseModel):
try:
memory_system.reset()
self._logger.log(
"info",
f"[Crew ({self.name if self.name else self.id})] {name} memory has been reset",
)
except Exception as e:
raise RuntimeError(f"Failed to reset {name} memory") from e
raise RuntimeError(
f"[Crew ({self.name if self.name else self.id})] Failed to reset {name} memory: {str(e)}"
) from e

View File

@@ -0,0 +1,44 @@
import inspect
from typing import Optional
from pydantic import BaseModel, Field, InstanceOf, model_validator
from crewai.flow import Flow
class FlowTrackable(BaseModel):
"""Mixin that tracks the Flow instance that instantiated the object, e.g. a
Flow instance that created a Crew or Agent.
Automatically finds and stores a reference to the parent Flow instance by
inspecting the call stack.
"""
parent_flow: Optional[InstanceOf[Flow]] = Field(
default=None,
description="The parent flow of the instance, if it was created inside a flow.",
)
@model_validator(mode="after")
def _set_parent_flow(self, max_depth: int = 5) -> "FlowTrackable":
frame = inspect.currentframe()
try:
if frame is None:
return self
frame = frame.f_back
for _ in range(max_depth):
if frame is None:
break
candidate = frame.f_locals.get("self")
if isinstance(candidate, Flow):
self.parent_flow = candidate
break
frame = frame.f_back
finally:
del frame
return self

View File

@@ -41,7 +41,6 @@ class Knowledge(BaseModel):
)
self.sources = sources
self.storage.initialize_knowledge_storage()
self._add_sources()
def query(
self, query: List[str], results_limit: int = 3, score_threshold: float = 0.35
@@ -63,7 +62,7 @@ class Knowledge(BaseModel):
)
return results
def _add_sources(self):
def add_sources(self):
try:
for source in self.sources:
source.storage = self.storage

View File

@@ -13,6 +13,7 @@ from crewai.agents.parser import (
AgentFinish,
OutputParserException,
)
from crewai.flow.flow_trackable import FlowTrackable
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
@@ -80,7 +81,7 @@ class LiteAgentOutput(BaseModel):
return self.raw
class LiteAgent(BaseModel):
class LiteAgent(FlowTrackable, BaseModel):
"""
A lightweight agent that can process messages and use tools.
@@ -162,7 +163,7 @@ class LiteAgent(BaseModel):
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer)
@model_validator(mode="after")
def setup_llm(self):
"""Set up the LLM and other components after initialization."""

View File

@@ -245,6 +245,9 @@ class AccumulatedToolArgs(BaseModel):
function: FunctionArgs = Field(default_factory=FunctionArgs)
EMPTY = object()
class LLM(BaseLLM):
def __init__(
self,
@@ -253,7 +256,7 @@ class LLM(BaseLLM):
temperature: Optional[float] = None,
top_p: Optional[float] = None,
n: Optional[int] = None,
stop: Optional[Union[str, List[str]]] = None,
stop: Optional[Union[str, List[str], object, None]] = EMPTY,
max_completion_tokens: Optional[int] = None,
max_tokens: Optional[int] = None,
presence_penalty: Optional[float] = None,
@@ -296,15 +299,16 @@ class LLM(BaseLLM):
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
litellm.drop_params = True
# Normalize self.stop to always be a List[str]
if stop is None:
self.stop: List[str] = []
if stop is EMPTY:
self.stop = []
elif stop is None:
self.stop = None
elif isinstance(stop, str):
self.stop = [stop]
else:
elif isinstance(stop, list):
self.stop = stop
self.set_callbacks(callbacks)

View File

@@ -0,0 +1,512 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
"model": "o3", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '111'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.16
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"error\": {\n \"message\": \"Unsupported parameter: 'stop'
is not supported with this model.\",\n \"type\": \"invalid_request_error\",\n
\ \"param\": \"stop\",\n \"code\": \"unsupported_parameter\"\n }\n}"
headers:
CF-RAY:
- 939854b16c1b7e0a-GRU
Connection:
- keep-alive
Content-Length:
- '196'
Content-Type:
- application/json
Date:
- Fri, 02 May 2025 14:50:25 GMT
Server:
- cloudflare
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '40'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999988'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8033d4a9265538858c4b2a8cc7d53fc7
status:
code: 400
message: Bad Request
- request:
body: '{"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
"model": "o3"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '99'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.16
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3RWzW4bRwy++ymIPdmApFiy5NS6OS6cpoGDtE6bFnUgULPU7lSznA1nxms1CKB3
yClAcukT5BnyKHqSYmb1izSXxWJJfsOPHzncd0cAmc6zMWSqRK+q2nSf3Fanf1DTXD3/5Sb82VcX
fUd8PZ2b4e8vz7JOjLDTv0n5TVRP2ao25LXl1qyE0FNE7T8envcvHg8Ho2SobE4mhtmz7uB0MOqe
Drv983VQabUil43hryMAgHfpGdPjnB6yMZx2Nl8qcg4LysZbJ4BMrIlfMnROO4/ss87OqCx74pTx
M8gtr5YfPZR4T+BLApxqo/0CvIW3gWQBRt8TNIS+JIEcPXbAWXgGCttIT8bAwoYUTQ+o4qeqJkEf
hDrQaM7B1UR5B6yAMjbkoOw9CWiGW2S4FmSlnbKAHnypHVS2IvY9uLYCCLWQ0o46EOqut11fUrfS
HDzBzAopdD6ev1p+NAYaZB9zVyWpOSAIGY1TQ+BsEEXggioB3fiO73i1/BdeYJQKDbxeM7wludeK
4HhNuVfY+xOA1nvtFEWOXCLjTdxVicxkAOt6432pVFibO1u/3zgnKcQGbusRMaZBG9/VvC1zBLEM
lXUe6tIyuQ3mq5JgtfzkZqvlZ6hRzbEgqJCxIHFg9DyZG++lp/mRQ57MNtWNETOxFSBMxTaOJOlh
qwo5B6OZYlFel+hbdaGIykdltQOEt0GrORRB5xQL7Be1VmhAWc51rKGDCNPEcG+BHmpSPpX51drz
UOpNPV6i9yTs7rjfg1e7xhnDjTY5LAilm6rVg590UToILqAxC3DaxwaK9Qt1TQKjU9etdA7npw6+
frmG4/6w27+Ar1+uToDuiaO3C1UV1Wi0L8HYxh1CDNcQow3ED93+WULoRQEGsSOLcRJhhpUNDioU
zQQGFyRwvFp+eo5iEuK1LVbLzycg1ph0TFKzTQAqK6y5cI9iYvGlA+RqUjpR0ww/B6bV8sNlKILz
HZgT1ZqLOB1cOFDWmlTuQnABTNj2kbJxFppSGwLNJjqgELqk5jQ2P/MixQ1OE78GpSJJ3M568Ctq
bskJal6AI3SWo/piQ1GaBbyw91RNSVbLDzcoquzBbeLj4jlgZ54Yppapm8sioQ57cKOVWGV0hZ7c
GK52/aJK5ILAeSHycbLbt15K4UY7py1/m/rjdeqJZWR9G9iRhx+186KVj/nObFG03qNR9O7BpWlw
4WAqsYrIQA9esJUtJTrqwWvN+RguZ7EdreVIH6ZC9A85OO6PuoMRVHV5kqjGqbF8IBoamwQiaNCT
zMSyTynYtsOeWpMTw1P0BE9E5wX14nQ8m6URY6K81TCIEPv9uRJdlB7YNp3tGO6mdk7jO1ZBDGyG
/hZ5sp2zO44zHi/DSHy1/HQwhR24uozXgmZAXhxcP402ZncDYJyetElAqLbie/sbRWgWHMZlxsGY
PQMyW59u2LTL3qwt77fba6ZZu3IiqdPiRnLe1lmyvj8CeJO2YThYcFkttqr9xNs5Jdj+sIXLdqt3
Zzzvr1dl5q1Hs2cYbMIO8CY5edTG7e3TTKEqKd+F7pYvhlzbPcPRHrtv0/k/7Ja55mKPz8Xguwfs
DEpR7Smf1EK5Voekd25C8efke27bOqeUM9euvonXJFGLnGYYTPvvkLmF81RNZpoLklp0+oGIch+9
P/oPAAD//wMAuBpQTjoJAAA=
headers:
CF-RAY:
- 939854b31dda7e0a-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 02 May 2025 14:50:43 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '18374'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999988'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_444d8fb9f9d6ad89f75b28e7b0c95844
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
"model": "o3-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '116'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.16
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3STz27bMAzG73kKQme7qJOmf3LrMAwoOuySrcMwFAYj0bEWWRQkOm1XFNhr7PX6
JIOdNHax9qKDfh8/UiT1OAFQ1qgFKF2j6Ca4/MOyKban16by5ve3+8v4EeVmc31TX3725ofKughe
/SItL1FHmpvgSCz7HdaRUKhzLc5OTouLs3lR9KBhQ64L41neWG/z6fF0nh8X+azYR9ZsNSW1gJ8T
AIDH/uxq9Ibu1QKOs5ebhlLCNanFQQSgIrvuRmFKNgl6UdkANXsh35d9BYb985+/AjVuCSKhy8U2
BAYFAbWmlDJIDFegcScMkbfWEEhNoNsYyQvcEUpN8Qi+MqxJoA25cG5QCDR7Y7uWJLAelujhU0Sv
bdKcQXCEiUDXpDeAILFNQubFDhLFrdUEzm7okENzk/XJv2Bniw6+7+XLvfyOVskKZcARHriNECJV
FOPIGUM4GrckUtUm7EbiW+dGAL1n6RP1w7jdk6dD+yvrbarLSJjYdy1NwkH19GkCcNuPs301IRUi
N0FK4Q31tsXJzk4NCzTA2Xmxp8KCbgQu5tkbfqUhQevSaCGURl2TGUKH7cHWWB6Byeh1/5fzlvfu
5davR5VN308wAK0pCJkyRDJWv370IIvUfbH3ZIc+9yWr/cKUYil2szBUYet2y6/SQxJqysr6NcUQ
7e4HVKE8X03PT85mK7NSk6fJPwAAAP//AwAuENFVCwQAAA==
headers:
CF-RAY:
- 939856cecf61f1cd-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 02 May 2025 14:51:54 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2897'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999987'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_978f3c657aa396dbd484126271a59372
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
"model": "o3-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '104'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.16
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//dFNNb9swDL3nVxC6bAPsIJ9Nl9uGYlgu24AW6GEoAlpmbLWyJIhU26zo
fx9sJ3GKdRcd9MjHR/LxZQSgTKnWoHSNoptg86/XzYz2PyeVvrq8sH++J//M9Is296m42qiszfDF
PWk5Zo21b4IlMd71sI6EQi3rdLW4mH5eLZcXHdD4kmyb5ud5Y5zJZ5PZMp9M8/n0kFl7o4nVGn6P
AABeurfV6Ep6VmuYZMefhpixIrU+BQGo6G37o5DZsKATlQ2g9k7IdbI3UHr3QaDGRwKpCTQGLIw1
sgfxgFoTM0RCm4tpCJ4IpaYIJQqO4cZDRdLnpRjJySnAOLhGB98iOm1Y+ww2wKmqiAV0TfrBuAoQ
IlmDhSVgn6Im4KRrQAY8ET1RwUYIPlrzcKrfTjqDL1qn2/4jAx87HT+wHT9aOABwTfHRaPrUBiTu
qx65MQTwDvY+ReAGo4TaOxrDTU18lMTwZKwdBtR4FkghF5+XKATG7Xxsuqotl1ATKKKkSBmESNoE
Ix2aAboS/KGpXoD2rjQtyOPzBUXaJcbWIC5Zewagc75n66xxd0BeT2bYGWe43kZC9q5dMIsPqkNf
RwB3nbnSG7+oEH0TZCv+gTra6aKnU4OdB3C+mB9Q8YL2DFiusnf4tiUJGstn9lQadU3lkDp4GVNp
/BkwOuvuXznvcfedG1cNLLP+6N4tMABaUxAqtyFSafTbpoewSO3B/y/sNOdOsuLeelsxFNtdlLTD
ZPtTVLxnoWa7M66iGKLp73EXtpfF7HKxmhdloUavo78AAAD//wMAQyxLXZkEAAA=
headers:
CF-RAY:
- 939857e48bd27df2-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 02 May 2025 14:52:38 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2594'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999987'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_6daa33da631e130fca25451911beaf3b
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
"model": "o3-mini", "stop": ["\n"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '120'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.16
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//dFNLT9tAEL7nV0z37CDiQBJyRKUSVVuh0sehQtFkPY6n2Ye7O24aIST+
Bn+PX1KtE2LTwmUP8z12nrcDAMWFmoPSFYq2tRmeX9txFcf51cXF228fzu3a/bKfv+bT98ur0UeV
JYVf/iQtT6oj7W1tSNi7HawDoVByHU1PJqOz6SQftYD1BZkk8+OhZcfD/Dg/HR6PhuPRXll51hTV
HH4MAABu2zfl6Ar6o+ZwnD1FLMWIK1LzAwlABW9SRGGMHAWdqKwDtXdCrk378vH+wUL0IWwzWDYC
l6DRPd4/CJQkuoJAaIbClmBDKBUFKFDwCL54WJGAVAS6CYGcHAjs4BodvAvoNEftM5CwBV2RXrNb
AUIgw7g0neWGlpGFwAfAugbDa2qdP2HqJRr4videU/jNmrKnQOp4lmRb3wSoA5UUAhUH47jjH8Fl
mSjgiAqoyNSwYamg9IE0RklpsSt9sO2HybDiKD6wRgPasEWhtvAk+bc6QwKWYO385k2/zYHKJmIa
s2uM6QHonJf2p3bAN3vk7jDSkh3HahEIo3dpTFF8rVr0bgBw065I82zqqg7e1rIQv6bWdnSys1Pd
UnZgPpntUfGCpgfM8uwFv0VBgmxib8mURl1R0Um7jcSmYN8DBr3q/k/nJe9d5exWvXrO8lc/6ACt
qRYqFnWggvXzojtaoHS2r9EOfW5TVvsdWghTSLMoqMTG7A5KxW0UsouS3YpCHXh3VWW9mE40lTSb
nWo1uBv8BQAA//8DAD51aKFfBAAA
headers:
CF-RAY:
- 939859809d7c7ded-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 02 May 2025 14:53:44 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2803'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999987'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c95eeb550e9f00eaa22ba5cad8587985
status:
code: 200
message: OK
version: 1

View File

@@ -18,6 +18,7 @@ from crewai.cli.cli import (
train,
version,
)
from crewai.crew import Crew
@pytest.fixture
@@ -55,81 +56,133 @@ def test_train_invalid_string_iterations(train_crew, runner):
)
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_all_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
@pytest.fixture
def mock_crew():
_mock = mock.Mock(spec=Crew, name="test_crew")
_mock.name = "test_crew"
return _mock
@pytest.fixture
def mock_get_crews(mock_crew):
with mock.patch(
"crewai.cli.reset_memories_command.get_crews", return_value=[mock_crew]
) as mock_get_crew:
yield mock_get_crew
def test_reset_all_memories(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-a"])
mock_crew.reset_memories.assert_called_once_with(command_type="all")
assert result.output == "All memories have been reset.\n"
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="all")
assert (
f"[Crew ({crew.name})] Reset memories command has been completed."
in result.output
)
call_count += 1
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_short_term_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_short_term_memories(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-s"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="short")
assert (
f"[Crew ({crew.name})] Short term memory has been reset." in result.output
)
call_count += 1
mock_crew.reset_memories.assert_called_once_with(command_type="short")
assert result.output == "Short term memory has been reset.\n"
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_entity_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_entity_memories(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-e"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="entity")
assert f"[Crew ({crew.name})] Entity memory has been reset." in result.output
call_count += 1
mock_crew.reset_memories.assert_called_once_with(command_type="entity")
assert result.output == "Entity memory has been reset.\n"
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_long_term_memories(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_long_term_memories(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-l"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="long")
assert f"[Crew ({crew.name})] Long term memory has been reset." in result.output
call_count += 1
mock_crew.reset_memories.assert_called_once_with(command_type="long")
assert result.output == "Long term memory has been reset.\n"
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_kickoff_outputs(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_kickoff_outputs(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-k"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="kickoff_outputs")
assert (
f"[Crew ({crew.name})] Latest Kickoff outputs stored has been reset."
in result.output
)
call_count += 1
mock_crew.reset_memories.assert_called_once_with(command_type="kickoff_outputs")
assert result.output == "Latest Kickoff outputs stored has been reset.\n"
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_multiple_memory_flags(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_multiple_memory_flags(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["-s", "-l"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_has_calls(
[mock.call(command_type="long"), mock.call(command_type="short")]
)
assert (
f"[Crew ({crew.name})] Long term memory has been reset.\n"
f"[Crew ({crew.name})] Short term memory has been reset.\n" in result.output
)
call_count += 1
# Check that reset_memories was called twice with the correct arguments
assert mock_crew.reset_memories.call_count == 2
mock_crew.reset_memories.assert_has_calls(
[mock.call(command_type="long"), mock.call(command_type="short")]
)
assert (
result.output
== "Long term memory has been reset.\nShort term memory has been reset.\n"
)
assert call_count == 1, "reset_memories should have been called once"
@mock.patch("crewai.cli.reset_memories_command.get_crew")
def test_reset_knowledge(mock_get_crew, runner):
mock_crew = mock.Mock()
mock_get_crew.return_value = mock_crew
def test_reset_knowledge(mock_get_crews, runner):
result = runner.invoke(reset_memories, ["--knowledge"])
call_count = 0
for crew in mock_get_crews.return_value:
crew.reset_memories.assert_called_once_with(command_type="knowledge")
assert f"[Crew ({crew.name})] Knowledge has been reset." in result.output
call_count += 1
assert call_count == 1, "reset_memories should have been called once"
def test_reset_memory_from_many_crews(mock_get_crews, runner):
crews = []
for crew_id in ["id-1234", "id-5678"]:
mock_crew = mock.Mock(spec=Crew)
mock_crew.name = None
mock_crew.id = crew_id
crews.append(mock_crew)
mock_get_crews.return_value = crews
# Run the command
result = runner.invoke(reset_memories, ["--knowledge"])
mock_crew.reset_memories.assert_called_once_with(command_type="knowledge")
assert result.output == "Knowledge has been reset.\n"
call_count = 0
for crew in crews:
call_count += 1
crew.reset_memories.assert_called_once_with(command_type="knowledge")
assert f"[Crew ({crew.id})] Knowledge has been reset." in result.output
assert call_count == 2, "reset_memories should have been called twice"
def test_reset_no_memory_flags(runner):

View File

@@ -3,12 +3,13 @@ import tempfile
import unittest
import unittest.mock
from contextlib import contextmanager
from io import StringIO
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
from pytest import raises
from crewai.cli.authentication.utils import TokenManager
from crewai.cli.tools.main import ToolCommand
@@ -23,17 +24,20 @@ def in_temp_dir():
os.chdir(original_dir)
@patch("crewai.cli.tools.main.subprocess.run")
def test_create_success(mock_subprocess):
with in_temp_dir():
tool_command = ToolCommand()
@pytest.fixture
def tool_command():
TokenManager().save_tokens("test-token", 36000)
tool_command = ToolCommand()
with patch.object(tool_command, "login"):
yield tool_command
with (
patch.object(tool_command, "login") as mock_login,
patch("sys.stdout", new=StringIO()) as fake_out,
):
tool_command.create("test-tool")
output = fake_out.getvalue()
@patch("crewai.cli.tools.main.subprocess.run")
def test_create_success(mock_subprocess, capsys, tool_command):
with in_temp_dir():
tool_command.create("test-tool")
output = capsys.readouterr().out
assert "Creating custom tool test_tool..." in output
assert os.path.isdir("test_tool")
assert os.path.isfile(os.path.join("test_tool", "README.md"))
@@ -47,15 +51,12 @@ def test_create_success(mock_subprocess):
content = f.read()
assert "class TestTool" in content
mock_login.assert_called_once()
mock_subprocess.assert_called_once_with(["git", "init"], check=True)
assert "Creating custom tool test_tool..." in output
@patch("crewai.cli.tools.main.subprocess.run")
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_success(mock_get, mock_subprocess_run):
def test_install_success(mock_get, mock_subprocess_run, capsys, tool_command):
mock_get_response = MagicMock()
mock_get_response.status_code = 200
mock_get_response.json.return_value = {
@@ -65,11 +66,9 @@ def test_install_success(mock_get, mock_subprocess_run):
mock_get.return_value = mock_get_response
mock_subprocess_run.return_value = MagicMock(stderr=None)
tool_command = ToolCommand()
with patch("sys.stdout", new=StringIO()) as fake_out:
tool_command.install("sample-tool")
output = fake_out.getvalue()
tool_command.install("sample-tool")
output = capsys.readouterr().out
assert "Successfully installed sample-tool" in output
mock_get.assert_has_calls([mock.call("sample-tool"), mock.call().json()])
mock_subprocess_run.assert_any_call(
@@ -86,54 +85,42 @@ def test_install_success(mock_get, mock_subprocess_run):
env=unittest.mock.ANY,
)
assert "Successfully installed sample-tool" in output
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_tool_not_found(mock_get):
def test_install_tool_not_found(mock_get, capsys, tool_command):
mock_get_response = MagicMock()
mock_get_response.status_code = 404
mock_get.return_value = mock_get_response
tool_command = ToolCommand()
with patch("sys.stdout", new=StringIO()) as fake_out:
try:
tool_command.install("non-existent-tool")
except SystemExit:
pass
output = fake_out.getvalue()
with raises(SystemExit):
tool_command.install("non-existent-tool")
output = capsys.readouterr().out
assert "No tool found with this name" in output
mock_get.assert_called_once_with("non-existent-tool")
assert "No tool found with this name" in output
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
def test_install_api_error(mock_get):
def test_install_api_error(mock_get, capsys, tool_command):
mock_get_response = MagicMock()
mock_get_response.status_code = 500
mock_get.return_value = mock_get_response
tool_command = ToolCommand()
with patch("sys.stdout", new=StringIO()) as fake_out:
try:
tool_command.install("error-tool")
except SystemExit:
pass
output = fake_out.getvalue()
with raises(SystemExit):
tool_command.install("error-tool")
output = capsys.readouterr().out
assert "Failed to get tool details" in output
mock_get.assert_called_once_with("error-tool")
assert "Failed to get tool details" in output
@patch("crewai.cli.tools.main.git.Repository.is_synced", return_value=False)
def test_publish_when_not_in_sync(mock_is_synced):
with patch("sys.stdout", new=StringIO()) as fake_out, raises(SystemExit):
tool_command = ToolCommand()
def test_publish_when_not_in_sync(mock_is_synced, capsys, tool_command):
with raises(SystemExit):
tool_command.publish(is_public=True)
assert "Local changes need to be resolved before publishing" in fake_out.getvalue()
output = capsys.readouterr().out
assert "Local changes need to be resolved before publishing" in output
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@@ -157,13 +144,13 @@ def test_publish_when_not_in_sync_and_force(
mock_get_project_description,
mock_get_project_version,
mock_get_project_name,
tool_command,
):
mock_publish_response = MagicMock()
mock_publish_response.status_code = 200
mock_publish_response.json.return_value = {"handle": "sample-tool"}
mock_publish.return_value = mock_publish_response
tool_command = ToolCommand()
tool_command.publish(is_public=True, force=True)
mock_get_project_name.assert_called_with(require=True)
@@ -205,13 +192,13 @@ def test_publish_success(
mock_get_project_description,
mock_get_project_version,
mock_get_project_name,
tool_command,
):
mock_publish_response = MagicMock()
mock_publish_response.status_code = 200
mock_publish_response.json.return_value = {"handle": "sample-tool"}
mock_publish.return_value = mock_publish_response
tool_command = ToolCommand()
tool_command.publish(is_public=True)
mock_get_project_name.assert_called_with(require=True)
@@ -251,25 +238,22 @@ def test_publish_failure(
mock_get_project_description,
mock_get_project_version,
mock_get_project_name,
capsys,
tool_command,
):
mock_publish_response = MagicMock()
mock_publish_response.status_code = 422
mock_publish_response.json.return_value = {"name": ["is already taken"]}
mock_publish.return_value = mock_publish_response
tool_command = ToolCommand()
with patch("sys.stdout", new=StringIO()) as fake_out:
try:
tool_command.publish(is_public=True)
except SystemExit:
pass
output = fake_out.getvalue()
mock_publish.assert_called_once()
with raises(SystemExit):
tool_command.publish(is_public=True)
output = capsys.readouterr().out
assert "Failed to complete operation" in output
assert "Name is already taken" in output
mock_publish.assert_called_once()
@patch("crewai.cli.tools.main.get_project_name", return_value="sample-tool")
@patch("crewai.cli.tools.main.get_project_version", return_value="1.0.0")
@@ -290,6 +274,8 @@ def test_publish_api_error(
mock_get_project_description,
mock_get_project_version,
mock_get_project_name,
capsys,
tool_command,
):
mock_response = MagicMock()
mock_response.status_code = 500
@@ -297,14 +283,9 @@ def test_publish_api_error(
mock_response.ok = False
mock_publish.return_value = mock_response
tool_command = ToolCommand()
with patch("sys.stdout", new=StringIO()) as fake_out:
try:
tool_command.publish(is_public=True)
except SystemExit:
pass
output = fake_out.getvalue()
with raises(SystemExit):
tool_command.publish(is_public=True)
output = capsys.readouterr().out
assert "Request to Enterprise API failed" in output
mock_publish.assert_called_once()
assert "Request to Enterprise API failed" in output

View File

@@ -17,6 +17,7 @@ from crewai.agents.cache import CacheHandler
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.flow import Flow, listen, start
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
@@ -2164,7 +2165,6 @@ def test_tools_with_custom_caching():
with patch.object(
CacheHandler, "add", wraps=crew._cache_handler.add
) as add_to_cache:
result = crew.kickoff()
# Check that add_to_cache was called exactly twice
@@ -4351,3 +4351,35 @@ def test_crew_copy_with_memory():
raise e # Re-raise other validation errors
except Exception as e:
pytest.fail(f"Copying crew raised an unexpected exception: {e}")
def test_sets_parent_flow_when_outside_flow(researcher, writer):
crew = Crew(
agents=[researcher, writer],
process=Process.sequential,
tasks=[
Task(description="Task 1", expected_output="output", agent=researcher),
Task(description="Task 2", expected_output="output", agent=writer),
],
)
assert crew.parent_flow is None
def test_sets_parent_flow_when_inside_flow(researcher, writer):
class MyFlow(Flow):
@start()
def start(self):
return Crew(
agents=[researcher, writer],
process=Process.sequential,
tasks=[
Task(
description="Task 1", expected_output="output", agent=researcher
),
Task(description="Task 2", expected_output="output", agent=writer),
],
)
flow = MyFlow()
result = flow.kickoff()
assert result.parent_flow is flow

View File

@@ -373,6 +373,7 @@ def get_weather_tool_schema():
},
}
def test_context_window_exceeded_error_handling():
"""Test that litellm.ContextWindowExceededError is converted to LLMContextLengthExceededException."""
from litellm.exceptions import ContextWindowExceededError
@@ -388,7 +389,7 @@ def test_context_window_exceeded_error_handling():
mock_completion.side_effect = ContextWindowExceededError(
"This model's maximum context length is 8192 tokens. However, your messages resulted in 10000 tokens.",
model="gpt-4",
llm_provider="openai"
llm_provider="openai",
)
with pytest.raises(LLMContextLengthExceededException) as excinfo:
@@ -403,7 +404,7 @@ def test_context_window_exceeded_error_handling():
mock_completion.side_effect = ContextWindowExceededError(
"This model's maximum context length is 8192 tokens. However, your messages resulted in 10000 tokens.",
model="gpt-4",
llm_provider="openai"
llm_provider="openai",
)
with pytest.raises(LLMContextLengthExceededException) as excinfo:
@@ -618,3 +619,53 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit):
expected_completed_llm_call=1,
expected_final_chunk_result=response,
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_testing_removing_stop_parameter():
from litellm.exceptions import BadRequestError
# currently o3 does not support stop
llm = LLM(model="o3", temperature=0.5)
with pytest.raises(
BadRequestError,
match="parameter: 'stop' is not supported with this model",
):
llm.call(
messages=[
{"role": "user", "content": "What is the weather in San Francisco?"},
],
)
llm = LLM(model="o3", temperature=0.5, stop=None)
response = llm.call(
messages=[
{"role": "user", "content": "What is the weather in San Francisco?"},
],
)
assert isinstance(response, str)
# testing another model that supports stop
llm = LLM(model="o3-mini", temperature=0.5)
response = llm.call(
messages=[
{"role": "user", "content": "What is the weather in San Francisco?"},
],
)
assert isinstance(response, str)
llm = LLM(model="o3-mini", temperature=0.5, stop=None)
response = llm.call(
messages=[
{"role": "user", "content": "What is the weather in San Francisco?"},
],
)
assert isinstance(response, str)
llm = LLM(model="o3-mini", temperature=0.5, stop=["\n"])
response = llm.call(
messages=[
{"role": "user", "content": "What is the weather in San Francisco?"},
],
)
assert isinstance(response, str)

View File

@@ -1,13 +1,16 @@
import asyncio
from typing import cast
from unittest.mock import Mock
import pytest
from pydantic import BaseModel, Field
from crewai import LLM, Agent
from crewai.flow import Flow, start
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.tools import BaseTool
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import LiteAgentExecutionStartedEvent
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
@@ -255,3 +258,60 @@ async def test_lite_agent_returns_usage_metrics_async():
assert "21 million" in result.raw or "37 million" in result.raw
assert result.usage_metrics is not None
assert result.usage_metrics["total_tokens"] > 0
class TestFlow(Flow):
"""A test flow that creates and runs an agent."""
def __init__(self, llm, tools):
self.llm = llm
self.tools = tools
super().__init__()
@start()
def start(self):
agent = Agent(
role="Test Agent",
goal="Test Goal",
backstory="Test Backstory",
llm=self.llm,
tools=self.tools,
)
return agent.kickoff("Test query")
def verify_agent_parent_flow(result, agent, flow):
"""Verify that both the result and agent have the correct parent flow."""
assert result.parent_flow is flow
assert agent is not None
assert agent.parent_flow is flow
def test_sets_parent_flow_when_inside_flow():
captured_agent = None
mock_llm = Mock(spec=LLM)
mock_llm.call.return_value = "Test response"
class MyFlow(Flow):
@start()
def start(self):
agent = Agent(
role="Test Agent",
goal="Test Goal",
backstory="Test Backstory",
llm=mock_llm,
tools=[WebSearchTool()],
)
return agent.kickoff("Test query")
flow = MyFlow()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LiteAgentExecutionStartedEvent)
def capture_agent(source, event):
nonlocal captured_agent
captured_agent = source
result = flow.kickoff()
assert captured_agent.parent_flow is flow