Compare commits

..

14 Commits

Author SHA1 Message Date
Lorenze Jay
1589230833 Merge branch 'main' of github.com:crewAIInc/crewAI into fix/memory-embedder-config 2024-10-22 12:28:07 -07:00
Lorenze Jay
6d0251224e Merge branch 'fix/memory-embedder-config' of github.com:crewAIInc/crewAI into fix/memory-embedder-config 2024-10-22 12:27:50 -07:00
Lorenze Jay
e2f70cb53f updates to add more docs and correct imports with huggingface embedding server enabled 2024-10-22 12:27:47 -07:00
Brandon Hancock
0dd522ddff Merge branch 'main' into fix/memory-embedder-config 2024-10-21 19:41:45 -04:00
Lorenze Jay
5803b3fb69 fixed run types 2024-10-21 16:04:42 -07:00
Lorenze Jay
31c3082740 fixed docs 2024-10-21 14:29:11 -07:00
Lorenze Jay
21afc46c0d Merge branch 'main' of github.com:crewAIInc/crewAI into fix/memory-embedder-config 2024-10-21 14:24:35 -07:00
Lorenze Jay
78882c6de2 rm prints 2024-10-21 14:24:26 -07:00
Lorenze Jay
2786086974 fixes 2024-10-21 14:24:07 -07:00
Lorenze Jay
6b12ac9c0b Merge branch 'main' of github.com:crewAIInc/crewAI into fix/memory-embedder-config 2024-10-21 09:31:56 -07:00
Lorenze Jay
266ecff395 WIP: brandons notes 2024-10-21 09:31:39 -07:00
Lorenze Jay
34d748d18e raise error on unsupported provider 2024-10-21 08:37:42 -07:00
Lorenze Jay
79f527576b some fixes 2024-10-20 18:26:24 -07:00
Lorenze Jay
3fc83c624b ensure original embedding config works 2024-10-20 18:12:57 -07:00
12 changed files with 86 additions and 65 deletions

View File

@@ -1,6 +1,6 @@
[project]
name = "crewai"
version = "0.75.1"
version = "0.74.2"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<=3.13"

View File

@@ -14,5 +14,5 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.75.1"
__version__ = "0.74.2"
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]

View File

@@ -32,11 +32,10 @@ def crewai():
@crewai.command()
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
@click.argument("name")
@click.option("--provider", type=str, help="The provider to use for the crew")
def create(type, name, provider):
def create(type, name):
"""Create a new crew, pipeline, or flow."""
if type == "crew":
create_crew(name, provider)
create_crew(name)
elif type == "pipeline":
create_pipeline(name)
elif type == "flow":

View File

@@ -70,19 +70,18 @@ def copy_template_files(folder_path, name, class_name, parent_folder):
copy_template(src_file, dst_file, name, class_name, folder_path.name)
def create_crew(name, provider=None, parent_folder=None):
def create_crew(name, parent_folder=None):
folder_path, folder_name, class_name = create_folder_structure(name, parent_folder)
env_vars = load_env_vars(folder_path)
if not provider:
provider_models = get_provider_data()
if not provider_models:
return
provider_models = get_provider_data()
if not provider_models:
return
selected_provider = select_provider(provider_models)
if not selected_provider:
return
provider = selected_provider
selected_provider = select_provider(provider_models)
if not selected_provider:
return
provider = selected_provider
# selected_model = select_model(provider, provider_models)
# if not selected_model:

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.75.1,<1.0.0"
"crewai[tools]>=0.74.2,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.75.1,<1.0.0",
"crewai[tools]>=0.74.2,<1.0.0",
]
[project.scripts]

View File

@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = { extras = ["tools"], version = ">=0.75.1,<1.0.0" }
crewai = { extras = ["tools"], version = ">=0.74.2,<1.0.0" }
asyncio = "*"
[tool.poetry.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.75.1,<1.0.0"
"crewai[tools]>=0.74.2,<1.0.0"
]
[project.scripts]

View File

@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.75.1"
"crewai[tools]>=0.74.2"
]

View File

@@ -435,16 +435,15 @@ class Crew(BaseModel):
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
) -> None:
"""Trains the crew for a given number of iterations."""
train_crew = self.copy()
train_crew._setup_for_training(filename)
self._setup_for_training(filename)
for n_iteration in range(n_iterations):
train_crew._train_iteration = n_iteration
train_crew.kickoff(inputs=inputs)
self._train_iteration = n_iteration
self.kickoff(inputs=inputs)
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
for agent in train_crew.agents:
for agent in self.agents:
result = TaskEvaluator(agent).evaluate_training_data(
training_data=training_data, agent_id=str(agent.id)
)
@@ -988,19 +987,17 @@ class Crew(BaseModel):
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy()
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
self._test_execution_span = self._telemetry.test_execution_span(
self,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
evaluator = CrewEvaluator(self, openai_model_name) # type: ignore[arg-type]
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)
test_crew.kickoff(inputs=inputs)
self.kickoff(inputs=inputs)
evaluator.print_crew_evaluation_result()

View File

@@ -9,7 +9,6 @@ from unittest.mock import MagicMock, patch
import instructor
import pydantic_core
import pytest
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
@@ -498,7 +497,6 @@ def test_cache_hitting_between_agents():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_api_calls_throttling(capsys):
from unittest.mock import patch
from crewai_tools import tool
@tool
@@ -781,14 +779,11 @@ def test_async_task_execution_call_count():
list_important_history.output = mock_task_output
write_article.output = mock_task_output
with (
patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync,
patch.object(
Task, "execute_async", return_value=mock_future
) as mock_execute_async,
):
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync, patch.object(
Task, "execute_async", return_value=mock_future
) as mock_execute_async:
crew.kickoff()
assert mock_execute_async.call_count == 2
@@ -1110,7 +1105,6 @@ def test_dont_set_agents_step_callback_if_already_set():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_function_calling_llm():
from unittest.mock import patch
from crewai_tools import tool
llm = "gpt-4o"
@@ -1454,6 +1448,52 @@ def test_crew_does_not_interpolate_without_inputs():
interpolate_task_inputs.assert_not_called()
# def test_crew_partial_inputs():
# agent = Agent(
# role="{topic} Researcher",
# goal="Express hot takes on {topic}.",
# backstory="You have a lot of experience with {topic}.",
# )
# task = Task(
# description="Give me an analysis around {topic}.",
# expected_output="{points} bullet points about {topic}.",
# )
# crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI"})
# inputs = {"topic": "AI"}
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
# assert crew.tasks[0].description == "Give me an analysis around AI."
# assert crew.tasks[0].expected_output == "{points} bullet points about AI."
# assert crew.agents[0].role == "AI Researcher"
# assert crew.agents[0].goal == "Express hot takes on AI."
# assert crew.agents[0].backstory == "You have a lot of experience with AI."
# def test_crew_invalid_inputs():
# agent = Agent(
# role="{topic} Researcher",
# goal="Express hot takes on {topic}.",
# backstory="You have a lot of experience with {topic}.",
# )
# task = Task(
# description="Give me an analysis around {topic}.",
# expected_output="{points} bullet points about {topic}.",
# )
# crew = Crew(agents=[agent], tasks=[task], inputs={"subject": "AI"})
# inputs = {"subject": "AI"}
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
# assert crew.tasks[0].description == "Give me an analysis around {topic}."
# assert crew.tasks[0].expected_output == "{points} bullet points about {topic}."
# assert crew.agents[0].role == "{topic} Researcher"
# assert crew.agents[0].goal == "Express hot takes on {topic}."
# assert crew.agents[0].backstory == "You have a lot of experience with {topic}."
def test_task_callback_on_crew():
from unittest.mock import MagicMock, patch
@@ -1730,10 +1770,7 @@ def test_manager_agent_with_tools_raises_exception():
@patch("crewai.crew.Crew.kickoff")
@patch("crewai.crew.CrewTrainingHandler")
@patch("crewai.crew.TaskEvaluator")
@patch("crewai.crew.Crew.copy")
def test_crew_train_success(
copy_mock, task_evaluator, crew_training_handler, kickoff_mock
):
def test_crew_train_success(task_evaluator, crew_training_handler, kickoff):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
@@ -1744,19 +1781,9 @@ def test_crew_train_success(
agents=[researcher, writer],
tasks=[task],
)
# Create a mock for the copied crew
copy_mock.return_value = crew
crew.train(
n_iterations=2, inputs={"topic": "AI"}, filename="trained_agents_data.pkl"
)
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
)
task_evaluator.assert_has_calls(
[
mock.call(researcher),
@@ -1795,6 +1822,10 @@ def test_crew_train_success(
]
)
kickoff.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
)
def test_crew_train_error():
task = Task(
@@ -1809,7 +1840,7 @@ def test_crew_train_error():
)
with pytest.raises(TypeError) as e:
crew.train() # type: ignore purposefully throwing err
crew.train()
assert "train() missing 1 required positional argument: 'n_iterations'" in str(
e
)
@@ -2505,9 +2536,8 @@ def test_conditional_should_execute():
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
def test_crew_testing_function(mock_kickoff, crew_evaluator):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
@@ -2518,15 +2548,11 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
agents=[researcher],
tasks=[task],
)
# Create a mock for the copied crew
copy_mock.return_value = crew
n_iterations = 2
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls(
assert len(mock_kickoff.mock_calls) == n_iterations
mock_kickoff.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
)

2
uv.lock generated
View File

@@ -627,7 +627,7 @@ wheels = [
[[package]]
name = "crewai"
version = "0.75.1"
version = "0.74.2"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },