Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
d2c4040dd6 fix: properly complete Future when async task execution fails
This fixes GitHub issue #4072 where an async task that errors would keep
its thread alive because the Future was never completed.

The issue was in the _execute_task_async method which didn't handle
exceptions from _execute_core. When an exception was raised, the
future.set_result() was never called, leaving the Future in an incomplete
state. This caused future.result() to block forever.

The fix wraps the _execute_core call in a try-except block and calls
future.set_exception(e) when an exception occurs, ensuring the Future
is always properly completed.

Added tests:
- test_execute_async_basic: Basic threaded async execution
- test_execute_async_exception_completes_future: Regression test for #4072
- test_execute_async_exception_sets_end_time: Verify end_time is set on error
- test_execute_async_exception_does_not_hang: Verify no hang on error

Co-Authored-By: João <joao@crewai.com>
2025-12-11 14:19:23 +00:00
31 changed files with 766 additions and 854 deletions

View File

@@ -187,97 +187,6 @@ You can also deploy your crews directly through the CrewAI AOP web interface by
</Steps>
## Option 3: Redeploy Using API (CI/CD Integration)
For automated deployments in CI/CD pipelines, you can use the CrewAI API to trigger redeployments of existing crews. This is particularly useful for GitHub Actions, Jenkins, or other automation workflows.
<Steps>
<Step title="Get Your Personal Access Token">
Navigate to your CrewAI AOP account settings to generate an API token:
1. Go to [app.crewai.com](https://app.crewai.com)
2. Click on **Settings** → **Account** → **Personal Access Token**
3. Generate a new token and copy it securely
4. Store this token as a secret in your CI/CD system
</Step>
<Step title="Find Your Automation UUID">
Locate the unique identifier for your deployed crew:
1. Go to **Automations** in your CrewAI AOP dashboard
2. Select your existing automation/crew
3. Click on **Additional Details**
4. Copy the **UUID** - this identifies your specific crew deployment
</Step>
<Step title="Trigger Redeployment via API">
Use the Deploy API endpoint to trigger a redeployment:
```bash
curl -i -X POST \
-H "Authorization: Bearer YOUR_PERSONAL_ACCESS_TOKEN" \
https://app.crewai.com/crewai_plus/api/v1/crews/YOUR-AUTOMATION-UUID/deploy
# HTTP/2 200
# content-type: application/json
#
# {
# "uuid": "your-automation-uuid",
# "status": "Deploy Enqueued",
# "public_url": "https://your-crew-deployment.crewai.com",
# "token": "your-bearer-token"
# }
```
<Info>
If your automation was first created connected to Git, the API will automatically pull the latest changes from your repository before redeploying.
</Info>
</Step>
<Step title="GitHub Actions Integration Example">
Here's a GitHub Actions workflow with more complex deployment triggers:
```yaml
name: Deploy CrewAI Automation
on:
push:
branches: [ main ]
pull_request:
types: [ labeled ]
release:
types: [ published ]
jobs:
deploy:
runs-on: ubuntu-latest
if: |
(github.event_name == 'push' && github.ref == 'refs/heads/main') ||
(github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'deploy')) ||
(github.event_name == 'release')
steps:
- name: Trigger CrewAI Redeployment
run: |
curl -X POST \
-H "Authorization: Bearer ${{ secrets.CREWAI_PAT }}" \
https://app.crewai.com/crewai_plus/api/v1/crews/${{ secrets.CREWAI_AUTOMATION_UUID }}/deploy
```
<Tip>
Add `CREWAI_PAT` and `CREWAI_AUTOMATION_UUID` as repository secrets. For PR deployments, add a "deploy" label to trigger the workflow.
</Tip>
</Step>
</Steps>
## ⚠️ Environment Variable Security Requirements
<Warning>

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"docker~=7.1.0",
"crewai==1.7.1",
"crewai==1.7.0",
"lancedb~=0.5.4",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",

View File

@@ -291,4 +291,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.7.1"
__version__ = "1.7.0"

View File

@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.7.1",
"crewai-tools==1.7.0",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.7.1"
__version__ = "1.7.0"
_telemetry_submitted = False

View File

@@ -16,7 +16,7 @@ from crewai.events.types.knowledge_events import (
KnowledgeSearchQueryFailedEvent,
)
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.converter import generate_model_description
if TYPE_CHECKING:

View File

@@ -5,9 +5,10 @@ from __future__ import annotations
from abc import ABC, abstractmethod
import json
import re
from typing import TYPE_CHECKING, Any, Final, Literal
from typing import TYPE_CHECKING, Final, Literal
from crewai.utilities.converter import generate_model_description
from crewai.utilities.pydantic_schema_utils import generate_model_description
if TYPE_CHECKING:
@@ -41,7 +42,7 @@ class BaseConverterAdapter(ABC):
"""
self.agent_adapter = agent_adapter
self._output_format: Literal["json", "pydantic"] | None = None
self._schema: dict[str, Any] | None = None
self._schema: str | None = None
@abstractmethod
def configure_structured_output(self, task: Task) -> None:
@@ -128,7 +129,7 @@ class BaseConverterAdapter(ABC):
@staticmethod
def _configure_format_from_task(
task: Task,
) -> tuple[Literal["json", "pydantic"] | None, dict[str, Any] | None]:
) -> tuple[Literal["json", "pydantic"] | None, str | None]:
"""Determine output format and schema from task requirements.
This is a helper method that examines the task's output requirements

View File

@@ -4,7 +4,6 @@ This module contains the OpenAIConverterAdapter class that handles structured
output conversion for OpenAI agents, supporting JSON and Pydantic model formats.
"""
import json
from typing import Any
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
@@ -62,7 +61,7 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
output_schema: str = (
get_i18n()
.slice("formatted_task_instructions")
.format(output_format=json.dumps(self._schema, indent=2))
.format(output_format=self._schema)
)
return f"{base_prompt}\n\n{output_schema}"

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.7.1"
"crewai[tools]==1.7.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.7.1"
"crewai[tools]==1.7.0"
]
[project.scripts]

View File

@@ -1,5 +1,4 @@
import base64
from json import JSONDecodeError
import os
from pathlib import Path
import subprocess
@@ -163,19 +162,9 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
if login_response.status_code != 200:
console.print(
"Authentication failed. Verify if the currently active organization can access the tool repository, and run 'crewai login' again.",
"Authentication failed. Verify if the currently active organization access to the tool repository, and run 'crewai login' again. ",
style="bold red",
)
try:
console.print(
f"[{login_response.status_code} error - {login_response.json().get('message', 'Unknown error')}]",
style="bold red italic",
)
except JSONDecodeError:
console.print(
f"[{login_response.status_code} error - Unknown error - Invalid JSON response]",
style="bold red italic",
)
raise SystemExit
login_response_json = login_response.json()

View File

@@ -1017,26 +1017,10 @@ class Crew(FlowTrackable, BaseModel):
tasks=self.tasks, planning_agent_llm=self.planning_llm
)._handle_crew_planning()
plan_map: dict[int, str] = {}
for step_plan in result.list_of_plans_per_task:
if step_plan.task_number in plan_map:
self._logger.log(
"warning",
f"Duplicate plan for Task Number {step_plan.task_number}, "
"using the first plan",
)
else:
plan_map[step_plan.task_number] = step_plan.plan
for idx, task in enumerate(self.tasks):
task_number = idx + 1
if task_number in plan_map:
task.description += plan_map[task_number]
else:
self._logger.log(
"warning",
f"No plan found for Task Number {task_number}",
)
for task, step_plan in zip(
self.tasks, result.list_of_plans_per_task, strict=False
):
task.description += step_plan.plan
def _store_execution_log(
self,

View File

@@ -9,10 +9,10 @@ from pydantic import BaseModel
from typing_extensions import Self
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.converter import generate_model_description
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage

View File

@@ -18,10 +18,10 @@ from crewai.events.types.llm_events import LLMCallType
from crewai.llms.base_llm import BaseLLM
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.converter import generate_model_description
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage

View File

@@ -495,10 +495,11 @@ class Task(BaseModel):
) -> None:
"""Execute the task asynchronously with context handling."""
try:
result = self._execute_core(agent, context, tools)
future.set_result(result)
result = self._execute_core(agent, context, tools)
except Exception as e:
future.set_exception(e)
future.set_exception(e)
return
future.set_result(result)
async def aexecute_sync(
self,

View File

@@ -3,13 +3,15 @@ from __future__ import annotations
from abc import ABC, abstractmethod
import asyncio
from collections.abc import Awaitable, Callable
from inspect import Parameter, signature
import json
from inspect import signature
from typing import (
Any,
Generic,
ParamSpec,
TypeVar,
cast,
get_args,
get_origin,
overload,
)
@@ -25,7 +27,6 @@ from typing_extensions import TypeIs
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
_printer = Printer()
@@ -102,40 +103,20 @@ class BaseTool(BaseModel, ABC):
if v != cls._ArgsSchemaPlaceholder:
return v
run_sig = signature(cls._run)
fields: dict[str, Any] = {}
for param_name, param in run_sig.parameters.items():
if param_name in ("self", "return"):
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
annotation = param.annotation if param.annotation != param.empty else Any
if param.default is param.empty:
fields[param_name] = (annotation, ...)
else:
fields[param_name] = (annotation, param.default)
if not fields:
arun_sig = signature(cls._arun)
for param_name, param in arun_sig.parameters.items():
if param_name in ("self", "return"):
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
annotation = (
param.annotation if param.annotation != param.empty else Any
)
if param.default is param.empty:
fields[param_name] = (annotation, ...)
else:
fields[param_name] = (annotation, param.default)
return create_model(f"{cls.__name__}Schema", **fields)
return cast(
type[PydanticBaseModel],
type(
f"{cls.__name__}Schema",
(PydanticBaseModel,),
{
"__annotations__": {
k: v
for k, v in cls._run.__annotations__.items()
if k != "return"
},
},
),
)
@field_validator("max_usage_count", mode="before")
@classmethod
@@ -245,23 +226,24 @@ class BaseTool(BaseModel, ABC):
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
fields: dict[str, Any] = {}
for name, param in func_signature.parameters.items():
if name == "self":
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
if param.default is param.empty:
fields[name] = (param_annotation, ...)
else:
fields[name] = (param_annotation, param.default)
if fields:
args_schema = create_model(f"{tool.name}Input", **fields)
annotations = func_signature.parameters
args_fields: dict[str, Any] = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
@@ -275,37 +257,53 @@ class BaseTool(BaseModel, ABC):
def _set_args_schema(self) -> None:
if self.args_schema is None:
run_sig = signature(self._run)
fields: dict[str, Any] = {}
for param_name, param in run_sig.parameters.items():
if param_name in ("self", "return"):
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
annotation = (
param.annotation if param.annotation != param.empty else Any
)
if param.default is param.empty:
fields[param_name] = (annotation, ...)
else:
fields[param_name] = (annotation, param.default)
self.args_schema = create_model(
f"{self.__class__.__name__}Schema", **fields
class_name = f"{self.__class__.__name__}Schema"
self.args_schema = cast(
type[PydanticBaseModel],
type(
class_name,
(PydanticBaseModel,),
{
"__annotations__": {
k: v
for k, v in self._run.__annotations__.items()
if k != "return"
},
},
),
)
def _generate_description(self) -> None:
"""Generate the tool description with a JSON schema for arguments."""
schema = generate_model_description(self.args_schema)
args_json = json.dumps(schema["json_schema"]["schema"], indent=2)
self.description = (
f"Tool Name: {self.name}\n"
f"Tool Arguments: {args_json}\n"
f"Tool Description: {self.description}"
)
args_schema = {
name: {
"description": field.description,
"type": BaseTool._get_arg_annotations(field.annotation),
}
for name, field in self.args_schema.model_fields.items()
}
self.description = f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nTool Description: {self.description}"
@staticmethod
def _get_arg_annotations(annotation: type[Any] | None) -> str:
if annotation is None:
return "None"
origin = get_origin(annotation)
args = get_args(annotation)
if origin is None:
return (
annotation.__name__
if hasattr(annotation, "__name__")
else str(annotation)
)
if args:
args_str = ", ".join(BaseTool._get_arg_annotations(arg) for arg in args)
return str(f"{origin.__name__}[{args_str}]")
return str(origin.__name__)
class Tool(BaseTool, Generic[P, R]):
@@ -408,23 +406,24 @@ class Tool(BaseTool, Generic[P, R]):
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
fields: dict[str, Any] = {}
for name, param in func_signature.parameters.items():
if name == "self":
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
if param.default is param.empty:
fields[name] = (param_annotation, ...)
else:
fields[name] = (param_annotation, param.default)
if fields:
args_schema = create_model(f"{tool.name}Input", **fields)
annotations = func_signature.parameters
args_fields: dict[str, Any] = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
@@ -503,38 +502,32 @@ def tool(
def _make_tool(f: Callable[P2, R2]) -> Tool[P2, R2]:
if f.__doc__ is None:
raise ValueError("Function must have a docstring")
if f.__annotations__ is None:
func_annotations = getattr(f, "__annotations__", None)
if func_annotations is None:
raise ValueError("Function must have type annotations")
func_sig = signature(f)
fields: dict[str, Any] = {}
for param_name, param in func_sig.parameters.items():
if param_name == "return":
continue
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
continue
annotation = (
param.annotation if param.annotation != param.empty else Any
)
if param.default is param.empty:
fields[param_name] = (annotation, ...)
else:
fields[param_name] = (annotation, param.default)
class_name = "".join(tool_name.split()).title()
args_schema = create_model(class_name, **fields)
tool_args_schema = cast(
type[PydanticBaseModel],
type(
class_name,
(PydanticBaseModel,),
{
"__annotations__": {
k: v for k, v in func_annotations.items() if k != "return"
},
},
),
)
return Tool(
name=tool_name,
description=f.__doc__,
func=f,
args_schema=args_schema,
args_schema=tool_args_schema,
result_as_answer=result_as_answer,
max_usage_count=max_usage_count,
current_usage_count=0,
)
return _make_tool

View File

@@ -1,5 +1,7 @@
from __future__ import annotations
from collections.abc import Callable
from copy import deepcopy
import json
import re
from typing import TYPE_CHECKING, Any, Final, TypedDict
@@ -11,7 +13,6 @@ from crewai.agents.agent_builder.utilities.base_output_converter import OutputCo
from crewai.utilities.i18n import get_i18n
from crewai.utilities.internal_instructor import InternalInstructor
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
if TYPE_CHECKING:
@@ -420,3 +421,221 @@ def create_converter(
raise Exception("No output converter found or set.")
return converter # type: ignore[no-any-return]
def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]:
"""Recursively resolve all local $refs in the given JSON Schema using $defs as the source.
This is needed because Pydantic generates $ref-based schemas that
some consumers (e.g. LLMs, tool frameworks) don't handle well.
Args:
schema: JSON Schema dict that may contain "$refs" and "$defs".
Returns:
A new schema dictionary with all local $refs replaced by their definitions.
"""
defs = schema.get("$defs", {})
schema_copy = deepcopy(schema)
def _resolve(node: Any) -> Any:
if isinstance(node, dict):
ref = node.get("$ref")
if isinstance(ref, str) and ref.startswith("#/$defs/"):
def_name = ref.replace("#/$defs/", "")
if def_name in defs:
return _resolve(deepcopy(defs[def_name]))
raise KeyError(f"Definition '{def_name}' not found in $defs.")
return {k: _resolve(v) for k, v in node.items()}
if isinstance(node, list):
return [_resolve(i) for i in node]
return node
return _resolve(schema_copy) # type: ignore[no-any-return]
def add_key_in_dict_recursively(
d: dict[str, Any], key: str, value: Any, criteria: Callable[[dict[str, Any]], bool]
) -> dict[str, Any]:
"""Recursively adds a key/value pair to all nested dicts matching `criteria`."""
if isinstance(d, dict):
if criteria(d) and key not in d:
d[key] = value
for v in d.values():
add_key_in_dict_recursively(v, key, value, criteria)
elif isinstance(d, list):
for i in d:
add_key_in_dict_recursively(i, key, value, criteria)
return d
def fix_discriminator_mappings(schema: dict[str, Any]) -> dict[str, Any]:
"""Replace '#/$defs/...' references in discriminator.mapping with just the model name."""
output = schema.get("properties", {}).get("output")
if not output:
return schema
disc = output.get("discriminator")
if not disc or "mapping" not in disc:
return schema
disc["mapping"] = {k: v.split("/")[-1] for k, v in disc["mapping"].items()}
return schema
def add_const_to_oneof_variants(schema: dict[str, Any]) -> dict[str, Any]:
"""Add const fields to oneOf variants for discriminated unions.
The json_schema_to_pydantic library requires each oneOf variant to have
a const field for the discriminator property. This function adds those
const fields based on the discriminator mapping.
Args:
schema: JSON Schema dict that may contain discriminated unions
Returns:
Modified schema with const fields added to oneOf variants
"""
def _process_oneof(node: dict[str, Any]) -> dict[str, Any]:
"""Process a single node that might contain a oneOf with discriminator."""
if not isinstance(node, dict):
return node
if "oneOf" in node and "discriminator" in node:
discriminator = node["discriminator"]
property_name = discriminator.get("propertyName")
mapping = discriminator.get("mapping", {})
if property_name and mapping:
one_of_variants = node.get("oneOf", [])
for variant in one_of_variants:
if isinstance(variant, dict) and "properties" in variant:
variant_title = variant.get("title", "")
matched_disc_value = None
for disc_value, schema_name in mapping.items():
if variant_title == schema_name or variant_title.endswith(
schema_name
):
matched_disc_value = disc_value
break
if matched_disc_value is not None:
props = variant["properties"]
if property_name in props:
props[property_name]["const"] = matched_disc_value
for key, value in node.items():
if isinstance(value, dict):
node[key] = _process_oneof(value)
elif isinstance(value, list):
node[key] = [
_process_oneof(item) if isinstance(item, dict) else item
for item in value
]
return node
return _process_oneof(deepcopy(schema))
def convert_oneof_to_anyof(schema: dict[str, Any]) -> dict[str, Any]:
"""Convert oneOf to anyOf for OpenAI compatibility.
OpenAI's Structured Outputs support anyOf better than oneOf.
This recursively converts all oneOf occurrences to anyOf.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with anyOf instead of oneOf.
"""
if isinstance(schema, dict):
if "oneOf" in schema:
schema["anyOf"] = schema.pop("oneOf")
for value in schema.values():
if isinstance(value, dict):
convert_oneof_to_anyof(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_oneof_to_anyof(item)
return schema
def ensure_all_properties_required(schema: dict[str, Any]) -> dict[str, Any]:
"""Ensure all properties are in the required array for OpenAI strict mode.
OpenAI's strict structured outputs require all properties to be listed
in the required array. This recursively updates all objects to include
all their properties in required.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with all properties marked as required.
"""
if isinstance(schema, dict):
if schema.get("type") == "object" and "properties" in schema:
properties = schema["properties"]
if properties:
schema["required"] = list(properties.keys())
for value in schema.values():
if isinstance(value, dict):
ensure_all_properties_required(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
ensure_all_properties_required(item)
return schema
def generate_model_description(model: type[BaseModel]) -> dict[str, Any]:
"""Generate JSON schema description of a Pydantic model.
This function takes a Pydantic model class and returns its JSON schema,
which includes full type information, discriminators, and all metadata.
The schema is dereferenced to inline all $ref references for better LLM understanding.
Args:
model: A Pydantic model class.
Returns:
A JSON schema dictionary representation of the model.
"""
json_schema = model.model_json_schema(ref_template="#/$defs/{model}")
json_schema = add_key_in_dict_recursively(
json_schema,
key="additionalProperties",
value=False,
criteria=lambda d: d.get("type") == "object"
and "additionalProperties" not in d,
)
json_schema = resolve_refs(json_schema)
json_schema.pop("$defs", None)
json_schema = fix_discriminator_mappings(json_schema)
json_schema = convert_oneof_to_anyof(json_schema)
json_schema = ensure_all_properties_required(json_schema)
return {
"type": "json_schema",
"json_schema": {
"name": model.__name__,
"strict": True,
"schema": json_schema,
},
}

View File

@@ -1,15 +1,14 @@
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, cast
from typing import TYPE_CHECKING, cast
from pydantic import BaseModel, Field
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.task_events import TaskEvaluationEvent
from crewai.llm import LLM
from crewai.utilities.converter import Converter
from crewai.utilities.i18n import get_i18n
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from crewai.utilities.training_converter import TrainingConverter
@@ -63,7 +62,7 @@ class TaskEvaluator:
Args:
original_agent: The agent to evaluate.
"""
self.llm = original_agent.llm
self.llm = cast(LLM, original_agent.llm)
self.original_agent = original_agent
def evaluate(self, task: Task, output: str) -> TaskEvaluation:
@@ -80,8 +79,7 @@ class TaskEvaluator:
- Investigate the Converter.to_pydantic signature, returns BaseModel strictly?
"""
crewai_event_bus.emit(
self,
TaskEvaluationEvent(evaluation_type="task_evaluation", task=task), # type: ignore[no-untyped-call]
self, TaskEvaluationEvent(evaluation_type="task_evaluation", task=task)
)
evaluation_query = (
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
@@ -96,14 +94,9 @@ class TaskEvaluator:
instructions = "Convert all responses into valid JSON output."
if not self.llm.supports_function_calling(): # type: ignore[union-attr]
schema_dict = generate_model_description(TaskEvaluation)
output_schema: str = (
get_i18n()
.slice("formatted_task_instructions")
.format(output_format=json.dumps(schema_dict, indent=2))
)
instructions = f"{instructions}\n\n{output_schema}"
if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema()
instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```"
converter = Converter(
llm=self.llm,
@@ -115,7 +108,7 @@ class TaskEvaluator:
return cast(TaskEvaluation, converter.to_pydantic())
def evaluate_training_data(
self, training_data: dict[str, Any], agent_id: str
self, training_data: dict, agent_id: str
) -> TrainingTaskEvaluation:
"""
Evaluate the training data based on the llm output, human feedback, and improved output.
@@ -128,8 +121,7 @@ class TaskEvaluator:
- Investigate the Converter.to_pydantic signature, returns BaseModel strictly?
"""
crewai_event_bus.emit(
self,
TaskEvaluationEvent(evaluation_type="training_data_evaluation"), # type: ignore[no-untyped-call]
self, TaskEvaluationEvent(evaluation_type="training_data_evaluation")
)
output_training_data = training_data[agent_id]
@@ -172,14 +164,11 @@ class TaskEvaluator:
)
instructions = "I'm gonna convert this raw text into valid JSON."
if not self.llm.supports_function_calling(): # type: ignore[union-attr]
schema_dict = generate_model_description(TrainingTaskEvaluation)
output_schema: str = (
get_i18n()
.slice("formatted_task_instructions")
.format(output_format=json.dumps(schema_dict, indent=2))
)
instructions = f"{instructions}\n\n{output_schema}"
if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser(
model=TrainingTaskEvaluation
).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
converter = TrainingConverter(
llm=self.llm,

View File

@@ -15,12 +15,9 @@ logger = logging.getLogger(__name__)
class PlanPerTask(BaseModel):
"""Represents a plan for a specific task."""
task_number: int = Field(
description="The 1-indexed task number this plan corresponds to",
ge=1,
)
task: str = Field(description="The task for which the plan is created")
task: str = Field(..., description="The task for which the plan is created")
plan: str = Field(
...,
description="The step by step plan on how the agents can execute their tasks using the available tools with mastery",
)

View File

@@ -0,0 +1,103 @@
from typing import Any, Union, get_args, get_origin
from pydantic import BaseModel, Field
class PydanticSchemaParser(BaseModel):
model: type[BaseModel] = Field(..., description="The Pydantic model to parse.")
def get_schema(self) -> str:
"""Public method to get the schema of a Pydantic model.
Returns:
String representation of the model schema.
"""
return "{\n" + self._get_model_schema(self.model) + "\n}"
def _get_model_schema(self, model: type[BaseModel], depth: int = 0) -> str:
"""Recursively get the schema of a Pydantic model, handling nested models and lists.
Args:
model: The Pydantic model to process.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the model schema.
"""
indent: str = " " * 4 * depth
lines: list[str] = [
f"{indent} {field_name}: {self._get_field_type_for_annotation(field.annotation, depth + 1)}"
for field_name, field in model.model_fields.items()
]
return ",\n".join(lines)
def _format_list_type(self, list_item_type: Any, depth: int) -> str:
"""Format a List type, handling nested models if necessary.
Args:
list_item_type: The type of items in the list.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the List type.
"""
if isinstance(list_item_type, type) and issubclass(list_item_type, BaseModel):
nested_schema = self._get_model_schema(list_item_type, depth + 1)
nested_indent = " " * 4 * depth
return f"List[\n{nested_indent}{{\n{nested_schema}\n{nested_indent}}}\n{nested_indent}]"
return f"List[{list_item_type.__name__}]"
def _format_union_type(self, field_type: Any, depth: int) -> str:
"""Format a Union type, handling Optional and nested types.
Args:
field_type: The Union type to format.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the Union type.
"""
args = get_args(field_type)
if type(None) in args:
# It's an Optional type
non_none_args = [arg for arg in args if arg is not type(None)]
if len(non_none_args) == 1:
inner_type = self._get_field_type_for_annotation(
non_none_args[0], depth
)
return f"Optional[{inner_type}]"
# Union with None and multiple other types
inner_types = ", ".join(
self._get_field_type_for_annotation(arg, depth) for arg in non_none_args
)
return f"Optional[Union[{inner_types}]]"
# General Union type
inner_types = ", ".join(
self._get_field_type_for_annotation(arg, depth) for arg in args
)
return f"Union[{inner_types}]"
def _get_field_type_for_annotation(self, annotation: Any, depth: int) -> str:
"""Recursively get the string representation of a field's type annotation.
Args:
annotation: The type annotation to process.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the type annotation.
"""
origin: Any = get_origin(annotation)
if origin is list:
list_item_type = get_args(annotation)[0]
return self._format_list_type(list_item_type, depth)
if origin is dict:
key_type, value_type = get_args(annotation)
return f"Dict[{key_type.__name__}, {value_type.__name__}]"
if origin is Union:
return self._format_union_type(annotation, depth)
if isinstance(annotation, type) and issubclass(annotation, BaseModel):
nested_schema = self._get_model_schema(annotation, depth)
nested_indent = " " * 4 * depth
return f"{annotation.__name__}\n{nested_indent}{{\n{nested_schema}\n{nested_indent}}}"
return annotation.__name__

View File

@@ -1,245 +0,0 @@
"""Utilities for generating JSON schemas from Pydantic models.
This module provides functions for converting Pydantic models to JSON schemas
suitable for use with LLMs and tool definitions.
"""
from collections.abc import Callable
from copy import deepcopy
from typing import Any
from pydantic import BaseModel
def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]:
"""Recursively resolve all local $refs in the given JSON Schema using $defs as the source.
This is needed because Pydantic generates $ref-based schemas that
some consumers (e.g. LLMs, tool frameworks) don't handle well.
Args:
schema: JSON Schema dict that may contain "$refs" and "$defs".
Returns:
A new schema dictionary with all local $refs replaced by their definitions.
"""
defs = schema.get("$defs", {})
schema_copy = deepcopy(schema)
def _resolve(node: Any) -> Any:
if isinstance(node, dict):
ref = node.get("$ref")
if isinstance(ref, str) and ref.startswith("#/$defs/"):
def_name = ref.replace("#/$defs/", "")
if def_name in defs:
return _resolve(deepcopy(defs[def_name]))
raise KeyError(f"Definition '{def_name}' not found in $defs.")
return {k: _resolve(v) for k, v in node.items()}
if isinstance(node, list):
return [_resolve(i) for i in node]
return node
return _resolve(schema_copy) # type: ignore[no-any-return]
def add_key_in_dict_recursively(
d: dict[str, Any], key: str, value: Any, criteria: Callable[[dict[str, Any]], bool]
) -> dict[str, Any]:
"""Recursively adds a key/value pair to all nested dicts matching `criteria`.
Args:
d: The dictionary to modify.
key: The key to add.
value: The value to add.
criteria: A function that returns True for dicts that should receive the key.
Returns:
The modified dictionary.
"""
if isinstance(d, dict):
if criteria(d) and key not in d:
d[key] = value
for v in d.values():
add_key_in_dict_recursively(v, key, value, criteria)
elif isinstance(d, list):
for i in d:
add_key_in_dict_recursively(i, key, value, criteria)
return d
def fix_discriminator_mappings(schema: dict[str, Any]) -> dict[str, Any]:
"""Replace '#/$defs/...' references in discriminator.mapping with just the model name.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with fixed discriminator mappings.
"""
output = schema.get("properties", {}).get("output")
if not output:
return schema
disc = output.get("discriminator")
if not disc or "mapping" not in disc:
return schema
disc["mapping"] = {k: v.split("/")[-1] for k, v in disc["mapping"].items()}
return schema
def add_const_to_oneof_variants(schema: dict[str, Any]) -> dict[str, Any]:
"""Add const fields to oneOf variants for discriminated unions.
The json_schema_to_pydantic library requires each oneOf variant to have
a const field for the discriminator property. This function adds those
const fields based on the discriminator mapping.
Args:
schema: JSON Schema dict that may contain discriminated unions
Returns:
Modified schema with const fields added to oneOf variants
"""
def _process_oneof(node: dict[str, Any]) -> dict[str, Any]:
"""Process a single node that might contain a oneOf with discriminator."""
if not isinstance(node, dict):
return node
if "oneOf" in node and "discriminator" in node:
discriminator = node["discriminator"]
property_name = discriminator.get("propertyName")
mapping = discriminator.get("mapping", {})
if property_name and mapping:
one_of_variants = node.get("oneOf", [])
for variant in one_of_variants:
if isinstance(variant, dict) and "properties" in variant:
variant_title = variant.get("title", "")
matched_disc_value = None
for disc_value, schema_name in mapping.items():
if variant_title == schema_name or variant_title.endswith(
schema_name
):
matched_disc_value = disc_value
break
if matched_disc_value is not None:
props = variant["properties"]
if property_name in props:
props[property_name]["const"] = matched_disc_value
for key, value in node.items():
if isinstance(value, dict):
node[key] = _process_oneof(value)
elif isinstance(value, list):
node[key] = [
_process_oneof(item) if isinstance(item, dict) else item
for item in value
]
return node
return _process_oneof(deepcopy(schema))
def convert_oneof_to_anyof(schema: dict[str, Any]) -> dict[str, Any]:
"""Convert oneOf to anyOf for OpenAI compatibility.
OpenAI's Structured Outputs support anyOf better than oneOf.
This recursively converts all oneOf occurrences to anyOf.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with anyOf instead of oneOf.
"""
if isinstance(schema, dict):
if "oneOf" in schema:
schema["anyOf"] = schema.pop("oneOf")
for value in schema.values():
if isinstance(value, dict):
convert_oneof_to_anyof(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_oneof_to_anyof(item)
return schema
def ensure_all_properties_required(schema: dict[str, Any]) -> dict[str, Any]:
"""Ensure all properties are in the required array for OpenAI strict mode.
OpenAI's strict structured outputs require all properties to be listed
in the required array. This recursively updates all objects to include
all their properties in required.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with all properties marked as required.
"""
if isinstance(schema, dict):
if schema.get("type") == "object" and "properties" in schema:
properties = schema["properties"]
if properties:
schema["required"] = list(properties.keys())
for value in schema.values():
if isinstance(value, dict):
ensure_all_properties_required(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
ensure_all_properties_required(item)
return schema
def generate_model_description(model: type[BaseModel]) -> dict[str, Any]:
"""Generate JSON schema description of a Pydantic model.
This function takes a Pydantic model class and returns its JSON schema,
which includes full type information, discriminators, and all metadata.
The schema is dereferenced to inline all $ref references for better LLM understanding.
Args:
model: A Pydantic model class.
Returns:
A JSON schema dictionary representation of the model.
"""
json_schema = model.model_json_schema(ref_template="#/$defs/{model}")
json_schema = add_key_in_dict_recursively(
json_schema,
key="additionalProperties",
value=False,
criteria=lambda d: d.get("type") == "object"
and "additionalProperties" not in d,
)
json_schema = resolve_refs(json_schema)
json_schema.pop("$defs", None)
json_schema = fix_discriminator_mappings(json_schema)
json_schema = convert_oneof_to_anyof(json_schema)
json_schema = ensure_all_properties_required(json_schema)
return {
"type": "json_schema",
"json_schema": {
"name": model.__name__,
"strict": True,
"schema": json_schema,
},
}

View File

@@ -383,4 +383,94 @@ class TestAsyncTaskOutput:
assert result.description == "Test description"
assert result.expected_output == "Test expected"
assert result.raw == "Test result"
assert result.agent == "Test Agent"
assert result.agent == "Test Agent"
class TestThreadedAsyncExecution:
"""Tests for threaded async task execution (execute_async with Future)."""
@patch("crewai.Agent.execute_task")
def test_execute_async_basic(
self, mock_execute: MagicMock, test_agent: Agent
) -> None:
"""Test basic threaded async task execution."""
mock_execute.return_value = "Async task result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
future = task.execute_async()
result = future.result(timeout=5)
assert result is not None
assert isinstance(result, TaskOutput)
assert result.raw == "Async task result"
assert result.agent == "Test Agent"
mock_execute.assert_called_once()
@patch("crewai.Agent.execute_task")
def test_execute_async_exception_completes_future(
self, mock_execute: MagicMock, test_agent: Agent
) -> None:
"""Test that execute_async properly completes the Future when an exception occurs.
This is a regression test for GitHub issue #4072 where an async task that
errors would keep its thread alive because the Future was never completed.
"""
mock_execute.side_effect = ValueError("Something happened here")
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
future = task.execute_async()
with pytest.raises(ValueError) as exc_info:
future.result(timeout=5)
assert "Something happened here" in str(exc_info.value)
@patch("crewai.Agent.execute_task")
def test_execute_async_exception_sets_end_time(
self, mock_execute: MagicMock, test_agent: Agent
) -> None:
"""Test that execute_async sets end_time even when an exception occurs."""
mock_execute.side_effect = RuntimeError("Test error")
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
future = task.execute_async()
with pytest.raises(RuntimeError):
future.result(timeout=5)
assert task.end_time is not None
@patch("crewai.Agent.execute_task")
def test_execute_async_exception_does_not_hang(
self, mock_execute: MagicMock, test_agent: Agent
) -> None:
"""Test that execute_async does not hang when an exception occurs.
This test verifies that the Future is properly completed with an exception,
allowing future.result() to return immediately instead of blocking forever.
"""
mock_execute.side_effect = Exception("Task execution failed")
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
future = task.execute_async()
with pytest.raises(Exception) as exc_info:
future.result(timeout=1)
assert "Task execution failed" in str(exc_info.value)

View File

@@ -1727,24 +1727,3 @@ def test_task_output_includes_messages():
assert hasattr(task2_output, "messages")
assert isinstance(task2_output.messages, list)
assert len(task2_output.messages) > 0
def test_async_execution_fails():
researcher = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
task = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 interesting ideas.",
async_execution=True,
agent=researcher,
)
with patch.object(Task, "_execute_core", side_effect=RuntimeError("boom!")):
with pytest.raises(RuntimeError, match="boom!"):
execution = task.execute_async(agent=researcher)
execution.result()

View File

@@ -17,11 +17,10 @@ def test_creating_a_tool_using_annotation():
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert "Tool Name: Name of my tool" in my_tool.description
assert "Tool Arguments:" in my_tool.description
assert '"question"' in my_tool.description
assert '"type": "string"' in my_tool.description
assert "Tool Description: Clear description for what this tool is useful for" in my_tool.description
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
@@ -32,9 +31,10 @@ def test_creating_a_tool_using_annotation():
converted_tool = my_tool.to_structured_tool()
assert converted_tool.name == "Name of my tool"
assert "Tool Name: Name of my tool" in converted_tool.description
assert "Tool Arguments:" in converted_tool.description
assert '"question"' in converted_tool.description
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
@@ -56,11 +56,10 @@ def test_creating_a_tool_using_baseclass():
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert "Tool Name: Name of my tool" in my_tool.description
assert "Tool Arguments:" in my_tool.description
assert '"question"' in my_tool.description
assert '"type": "string"' in my_tool.description
assert "Tool Description: Clear description for what this tool is useful for" in my_tool.description
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
@@ -69,9 +68,10 @@ def test_creating_a_tool_using_baseclass():
converted_tool = my_tool.to_structured_tool()
assert converted_tool.name == "Name of my tool"
assert "Tool Name: Name of my tool" in converted_tool.description
assert "Tool Arguments:" in converted_tool.description
assert '"question"' in converted_tool.description
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}

View File

@@ -107,20 +107,25 @@ def test_tool_usage_render():
rendered = tool_usage._render()
# Check that the rendered output contains the expected tool information
# Updated checks to match the actual output
assert "Tool Name: Random Number Generator" in rendered
assert "Tool Arguments:" in rendered
assert (
"'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}"
in rendered
)
assert (
"'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}"
in rendered
)
assert (
"Tool Description: Generates a random number within a specified range"
in rendered
)
# Check that the JSON schema format is used (proper JSON schema types)
assert '"min_value"' in rendered
assert '"max_value"' in rendered
assert '"type": "integer"' in rendered
assert '"description": "The minimum value of the range (inclusive)"' in rendered
assert '"description": "The maximum value of the range (inclusive)"' in rendered
assert (
"Tool Name: Random Number Generator\nTool Arguments: {'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}, 'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}}\nTool Description: Generates a random number within a specified range"
in rendered
)
def test_validate_tool_input_booleans_and_none():

View File

@@ -1,3 +1,4 @@
from unittest import mock
from unittest.mock import MagicMock, patch
from crewai.utilities.converter import ConverterError
@@ -43,26 +44,26 @@ def test_evaluate_training_data(converter_mock):
)
assert result == function_return_value
# Verify the converter was called with correct arguments
converter_mock.assert_called_once()
call_kwargs = converter_mock.call_args.kwargs
assert call_kwargs["llm"] == original_agent.llm
assert call_kwargs["model"] == TrainingTaskEvaluation
assert "Iteration: data1" in call_kwargs["text"]
assert "Iteration: data2" in call_kwargs["text"]
instructions = call_kwargs["instructions"]
assert "I'm gonna convert this raw text into valid JSON." in instructions
assert "OpenAPI schema" in instructions
assert '"type": "json_schema"' in instructions
assert '"name": "TrainingTaskEvaluation"' in instructions
assert '"suggestions"' in instructions
assert '"quality"' in instructions
assert '"final_summary"' in instructions
converter_mock.return_value.to_pydantic.assert_called_once()
converter_mock.assert_has_calls(
[
mock.call(
llm=original_agent.llm,
text="Assess the quality of the training data based on the llm output, human feedback , and llm "
"output improved result.\n\nIteration: data1\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
"1\n\nImproved Output:\nImproved output 1\n\n------------------------------------------------\n\nIteration: data2\nInitial Output:\nInitial output 2\n\nHuman "
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\n------------------------------------------------\n\nPlease provide:\n- Provide "
"a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's "
"performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific "
"action items for future tasks. Ensure all key and specificpoints from the human feedback are "
"incorporated into these instructions.\n- A score from 0 to 10 evaluating on completion, quality, and "
"overall performance from the improved output to the initial output based on the human feedback\n",
model=TrainingTaskEvaluation,
instructions="I'm gonna convert this raw text into valid JSON.\n\nThe json should have the "
"following structure, with the following keys:\n{\n suggestions: List[str],\n quality: float,\n final_summary: str\n}",
),
mock.call().to_pydantic(),
]
)
@patch("crewai.utilities.converter.Converter.to_pydantic")

View File

@@ -16,6 +16,7 @@ from crewai.utilities.converter import (
handle_partial_json,
validate_model,
)
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from pydantic import BaseModel
import pytest

View File

@@ -1,11 +1,7 @@
"""Tests for the planning handler module."""
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
@@ -17,7 +13,7 @@ from crewai.utilities.planning_handler import (
)
class TestInternalCrewPlanner:
class InternalCrewPlanner:
@pytest.fixture
def crew_planner(self):
tasks = [
@@ -53,9 +49,9 @@ class TestInternalCrewPlanner:
def test_handle_crew_planning(self, crew_planner):
list_of_plans_per_task = [
PlanPerTask(task_number=1, task="Task1", plan="Plan 1"),
PlanPerTask(task_number=2, task="Task2", plan="Plan 2"),
PlanPerTask(task_number=3, task="Task3", plan="Plan 3"),
PlanPerTask(task="Task1", plan="Plan 1"),
PlanPerTask(task="Task2", plan="Plan 2"),
PlanPerTask(task="Task3", plan="Plan 3"),
]
with patch.object(Task, "execute_sync") as execute:
execute.return_value = TaskOutput(
@@ -101,12 +97,12 @@ class TestInternalCrewPlanner:
# Knowledge field should not be present when empty
assert '"agent_knowledge"' not in tasks_summary
@patch("crewai.knowledge.knowledge.Knowledge.add_sources")
@patch("crewai.knowledge.storage.knowledge_storage.KnowledgeStorage")
def test_create_tasks_summary_with_knowledge_and_tools(
self, mock_storage, mock_add_sources
):
@patch("crewai.knowledge.storage.knowledge_storage.chromadb")
def test_create_tasks_summary_with_knowledge_and_tools(self, mock_chroma):
"""Test task summary generation with both knowledge and tools present."""
# Mock ChromaDB collection
mock_collection = mock_chroma.return_value.get_or_create_collection.return_value
mock_collection.add.return_value = None
# Create mock tools with proper string descriptions and structured tool support
class MockTool(BaseTool):
@@ -170,9 +166,7 @@ class TestInternalCrewPlanner:
description="Description",
agent="agent",
pydantic=PlannerTaskPydanticOutput(
list_of_plans_per_task=[
PlanPerTask(task_number=1, task="Task1", plan="Plan 1")
]
list_of_plans_per_task=[PlanPerTask(task="Task1", plan="Plan 1")]
),
)
result = crew_planner_different_llm._handle_crew_planning()
@@ -183,181 +177,3 @@ class TestInternalCrewPlanner:
crew_planner_different_llm.tasks
)
execute.assert_called_once()
def test_plan_per_task_requires_task_number(self):
"""Test that PlanPerTask model requires task_number field."""
with pytest.raises(ValueError):
PlanPerTask(task="Task1", plan="Plan 1")
def test_plan_per_task_with_task_number(self):
"""Test PlanPerTask model with task_number field."""
plan = PlanPerTask(task_number=5, task="Task5", plan="Plan for task 5")
assert plan.task_number == 5
assert plan.task == "Task5"
assert plan.plan == "Plan for task 5"
class TestCrewPlanningIntegration:
"""Tests for Crew._handle_crew_planning integration with task_number matching."""
def test_crew_planning_with_out_of_order_plans(self):
"""Test that plans are correctly matched to tasks even when returned out of order.
This test verifies the fix for issue #3953 where plans returned by the LLM
in a different order than the tasks would be incorrectly assigned.
"""
agent1 = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
agent2 = Agent(role="Agent 2", goal="Goal 2", backstory="Backstory 2")
agent3 = Agent(role="Agent 3", goal="Goal 3", backstory="Backstory 3")
task1 = Task(
description="First task description",
expected_output="Output 1",
agent=agent1,
)
task2 = Task(
description="Second task description",
expected_output="Output 2",
agent=agent2,
)
task3 = Task(
description="Third task description",
expected_output="Output 3",
agent=agent3,
)
crew = Crew(
agents=[agent1, agent2, agent3],
tasks=[task1, task2, task3],
planning=True,
)
out_of_order_plans = [
PlanPerTask(task_number=3, task="Task 3", plan=" [PLAN FOR TASK 3]"),
PlanPerTask(task_number=1, task="Task 1", plan=" [PLAN FOR TASK 1]"),
PlanPerTask(task_number=2, task="Task 2", plan=" [PLAN FOR TASK 2]"),
]
mock_planner_result = PlannerTaskPydanticOutput(
list_of_plans_per_task=out_of_order_plans
)
with patch.object(
CrewPlanner, "_handle_crew_planning", return_value=mock_planner_result
):
crew._handle_crew_planning()
assert "[PLAN FOR TASK 1]" in task1.description
assert "[PLAN FOR TASK 2]" in task2.description
assert "[PLAN FOR TASK 3]" in task3.description
assert "[PLAN FOR TASK 3]" not in task1.description
assert "[PLAN FOR TASK 1]" not in task2.description
assert "[PLAN FOR TASK 2]" not in task3.description
def test_crew_planning_with_missing_plan(self):
"""Test that missing plans are handled gracefully with a warning."""
agent1 = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
agent2 = Agent(role="Agent 2", goal="Goal 2", backstory="Backstory 2")
task1 = Task(
description="First task description",
expected_output="Output 1",
agent=agent1,
)
task2 = Task(
description="Second task description",
expected_output="Output 2",
agent=agent2,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[task1, task2],
planning=True,
)
original_task1_desc = task1.description
original_task2_desc = task2.description
incomplete_plans = [
PlanPerTask(task_number=1, task="Task 1", plan=" [PLAN FOR TASK 1]"),
]
mock_planner_result = PlannerTaskPydanticOutput(
list_of_plans_per_task=incomplete_plans
)
with patch.object(
CrewPlanner, "_handle_crew_planning", return_value=mock_planner_result
):
crew._handle_crew_planning()
assert "[PLAN FOR TASK 1]" in task1.description
assert task2.description == original_task2_desc
def test_crew_planning_preserves_original_description(self):
"""Test that planning appends to the original task description."""
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
task = Task(
description="Original task description",
expected_output="Output 1",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
planning=True,
)
plans = [
PlanPerTask(task_number=1, task="Task 1", plan=" - Additional plan steps"),
]
mock_planner_result = PlannerTaskPydanticOutput(list_of_plans_per_task=plans)
with patch.object(
CrewPlanner, "_handle_crew_planning", return_value=mock_planner_result
):
crew._handle_crew_planning()
assert "Original task description" in task.description
assert "Additional plan steps" in task.description
def test_crew_planning_with_duplicate_task_numbers(self):
"""Test that duplicate task numbers use the first plan and log a warning."""
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
task = Task(
description="Task description",
expected_output="Output 1",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
planning=True,
)
# Two plans with the same task_number - should use the first one
duplicate_plans = [
PlanPerTask(task_number=1, task="Task 1", plan=" [FIRST PLAN]"),
PlanPerTask(task_number=1, task="Task 1", plan=" [SECOND PLAN]"),
]
mock_planner_result = PlannerTaskPydanticOutput(
list_of_plans_per_task=duplicate_plans
)
with patch.object(
CrewPlanner, "_handle_crew_planning", return_value=mock_planner_result
):
crew._handle_crew_planning()
# Should use the first plan, not the second
assert "[FIRST PLAN]" in task.description
assert "[SECOND PLAN]" not in task.description

View File

@@ -0,0 +1,94 @@
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import pytest
from pydantic import BaseModel, Field
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
def test_simple_model():
class SimpleModel(BaseModel):
field1: int
field2: str
parser = PydanticSchemaParser(model=SimpleModel)
schema = parser.get_schema()
expected_schema = """{
field1: int,
field2: str
}"""
assert schema.strip() == expected_schema.strip()
def test_nested_model():
class NestedModel(BaseModel):
nested_field: int
class ParentModel(BaseModel):
parent_field: str
nested: NestedModel
parser = PydanticSchemaParser(model=ParentModel)
schema = parser.get_schema()
expected_schema = """{
parent_field: str,
nested: NestedModel
{
nested_field: int
}
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_list():
class ListModel(BaseModel):
list_field: List[int]
parser = PydanticSchemaParser(model=ListModel)
schema = parser.get_schema()
expected_schema = """{
list_field: List[int]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_optional_field():
class OptionalModel(BaseModel):
optional_field: Optional[str]
parser = PydanticSchemaParser(model=OptionalModel)
schema = parser.get_schema()
expected_schema = """{
optional_field: Optional[str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_union():
class UnionModel(BaseModel):
union_field: Union[int, str]
parser = PydanticSchemaParser(model=UnionModel)
schema = parser.get_schema()
expected_schema = """{
union_field: Union[int, str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_dict():
class DictModel(BaseModel):
dict_field: Dict[str, int]
parser = PydanticSchemaParser(model=DictModel)
schema = parser.get_schema()
expected_schema = """{
dict_field: Dict[str, int]
}"""
assert schema.strip() == expected_schema.strip()

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.7.1"
__version__ = "1.7.0"

View File

@@ -323,17 +323,13 @@ def cli() -> None:
"--dry-run", is_flag=True, help="Show what would be done without making changes"
)
@click.option("--no-push", is_flag=True, help="Don't push changes to remote")
@click.option(
"--no-commit", is_flag=True, help="Don't commit changes (just update files)"
)
def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None:
def bump(version: str, dry_run: bool, no_push: bool) -> None:
"""Bump version across all packages in lib/.
Args:
version: New version to set (e.g., 1.0.0, 1.0.0a1).
dry_run: Show what would be done without making changes.
no_push: Don't push changes to remote.
no_commit: Don't commit changes (just update files).
"""
try:
# Check prerequisites
@@ -401,60 +397,51 @@ def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None:
else:
console.print("[dim][DRY RUN][/dim] Would run: uv sync")
if no_commit:
console.print("\nSkipping git operations (--no-commit flag set)")
branch_name = f"feat/bump-version-{version}"
if not dry_run:
console.print(f"\nCreating branch {branch_name}...")
run_command(["git", "checkout", "-b", branch_name])
console.print("[green]✓[/green] Branch created")
console.print("\nCommitting changes...")
run_command(["git", "add", "."])
run_command(["git", "commit", "-m", f"feat: bump versions to {version}"])
console.print("[green]✓[/green] Changes committed")
if not no_push:
console.print("\nPushing branch...")
run_command(["git", "push", "-u", "origin", branch_name])
console.print("[green]✓[/green] Branch pushed")
else:
branch_name = f"feat/bump-version-{version}"
if not dry_run:
console.print(f"\nCreating branch {branch_name}...")
run_command(["git", "checkout", "-b", branch_name])
console.print("[green]✓[/green] Branch created")
console.print(f"[dim][DRY RUN][/dim] Would create branch: {branch_name}")
console.print(
f"[dim][DRY RUN][/dim] Would commit: feat: bump versions to {version}"
)
if not no_push:
console.print(f"[dim][DRY RUN][/dim] Would push branch: {branch_name}")
console.print("\nCommitting changes...")
run_command(["git", "add", "."])
run_command(
["git", "commit", "-m", f"feat: bump versions to {version}"]
)
console.print("[green]✓[/green] Changes committed")
if not no_push:
console.print("\nPushing branch...")
run_command(["git", "push", "-u", "origin", branch_name])
console.print("[green]✓[/green] Branch pushed")
else:
console.print(
f"[dim][DRY RUN][/dim] Would create branch: {branch_name}"
)
console.print(
f"[dim][DRY RUN][/dim] Would commit: feat: bump versions to {version}"
)
if not no_push:
console.print(
f"[dim][DRY RUN][/dim] Would push branch: {branch_name}"
)
if not dry_run and not no_push:
console.print("\nCreating pull request...")
run_command(
[
"gh",
"pr",
"create",
"--base",
"main",
"--title",
f"feat: bump versions to {version}",
"--body",
"",
]
)
console.print("[green]✓[/green] Pull request created")
elif dry_run:
console.print(
f"[dim][DRY RUN][/dim] Would create PR: feat: bump versions to {version}"
)
else:
console.print("\nSkipping PR creation (--no-push flag set)")
if not dry_run and not no_push:
console.print("\nCreating pull request...")
run_command(
[
"gh",
"pr",
"create",
"--base",
"main",
"--title",
f"feat: bump versions to {version}",
"--body",
"",
]
)
console.print("[green]✓[/green] Pull request created")
elif dry_run:
console.print(
f"[dim][DRY RUN][/dim] Would create PR: feat: bump versions to {version}"
)
else:
console.print("\nSkipping PR creation (--no-push flag set)")
console.print(f"\n[green]✓[/green] Version bump to {version} complete!")