Compare commits

..

27 Commits

Author SHA1 Message Date
Greyson Lalonde
5b5aa309a0 refactor: replace DisclosureLevel enum with Literal and Final constants 2026-03-12 10:14:30 -04:00
Greyson Lalonde
6a2e5b5906 fix: deduplicate coerce_skill_paths and activate pre-loaded METADATA skills 2026-03-12 01:11:01 -04:00
Greyson LaLonde
10c65272f2 Merge branch 'main' into gl/feat/agent-skills 2026-03-12 01:00:45 -04:00
Greyson LaLonde
20321f3ef8 refactor: improve skill models with ConfigDict, Final, and Field descriptions 2026-03-10 19:07:15 -04:00
Greyson LaLonde
7f7fe840d9 fix: guard set_skills against serialized crew dict during agent copy 2026-03-10 18:14:22 -04:00
Greyson LaLonde
8b024fffd8 fix: inject skill context in Agent.kickoff() via _prepare_kickoff 2026-03-10 17:31:52 -04:00
Greyson LaLonde
b0909dcc02 chore: log skill body char excess 2026-03-10 17:24:47 -04:00
Greyson LaLonde
1a519adea9 fix: correct crew_skills type annotation and activate pre-loaded Skills 2026-03-09 02:18:01 -04:00
Greyson LaLonde
083231c9fa address PR review: add skill logging, body size warning, align Crew.skills type 2026-03-09 02:10:15 -04:00
Greyson LaLonde
7bca12ab2a add logging to skill loader and body size warning to parser 2026-03-09 02:07:13 -04:00
Greyson LaLonde
d6519db75a merge: resolve conflict in unified_memory.py with main 2026-03-09 02:01:46 -04:00
João Moura
22504c4a91 Merge branch 'main' into gl/feat/agent-skills 2026-03-08 14:07:56 -07:00
Greyson Lalonde
c2eca17619 perf: skip redundant skill resolution when already resolved 2026-03-06 01:08:03 -05:00
Greyson Lalonde
01c1bf4bcc fix: improve skills typing, path validation, and event tracing 2026-03-06 00:07:23 -05:00
Greyson Lalonde
d84a7f32bd chore: dedup files 2026-03-05 21:12:16 -05:00
Greyson Lalonde
2baebb935a fix: use Path for pydantic validation 2026-03-05 21:08:30 -05:00
Greyson Lalonde
15f7a2ccd8 chore: add docs translations 2026-03-05 20:41:31 -05:00
Greyson Lalonde
a940623672 chore: cleanup dead code, docs 2026-03-05 20:37:32 -05:00
Greyson Lalonde
bb126d54e5 chore: add docs 2026-03-05 20:19:12 -05:00
Greyson Lalonde
00d3d435e2 feat: support str for paths 2026-03-05 20:17:09 -05:00
Greyson Lalonde
fe8060f719 fix: frontmatter parser match 2026-03-05 20:09:58 -05:00
Greyson Lalonde
4627c345c9 chore: linter on previous files 2026-03-05 19:56:12 -05:00
Greyson Lalonde
218084d625 Merge branch 'main' into gl/feat/agent-skills 2026-03-05 19:52:10 -05:00
Greyson Lalonde
69240c013d test: add skills unit and integration tests 2026-03-05 19:33:31 -05:00
Greyson Lalonde
4758da71e9 feat: integrate skill discovery and activation into agent execution 2026-03-05 19:33:02 -05:00
Greyson Lalonde
4200d94161 feat: add skill lifecycle events 2026-03-05 19:32:17 -05:00
Greyson Lalonde
b2a2e667bf feat: add skills models, parser, and validation 2026-03-05 19:30:20 -05:00
43 changed files with 2002 additions and 111 deletions

View File

@@ -148,6 +148,7 @@
"en/concepts/flows",
"en/concepts/production-architecture",
"en/concepts/knowledge",
"en/concepts/skills",
"en/concepts/llms",
"en/concepts/files",
"en/concepts/processes",
@@ -607,6 +608,7 @@
"en/concepts/flows",
"en/concepts/production-architecture",
"en/concepts/knowledge",
"en/concepts/skills",
"en/concepts/llms",
"en/concepts/files",
"en/concepts/processes",
@@ -1089,6 +1091,7 @@
"pt-BR/concepts/flows",
"pt-BR/concepts/production-architecture",
"pt-BR/concepts/knowledge",
"pt-BR/concepts/skills",
"pt-BR/concepts/llms",
"pt-BR/concepts/files",
"pt-BR/concepts/processes",
@@ -1526,6 +1529,7 @@
"pt-BR/concepts/flows",
"pt-BR/concepts/production-architecture",
"pt-BR/concepts/knowledge",
"pt-BR/concepts/skills",
"pt-BR/concepts/llms",
"pt-BR/concepts/files",
"pt-BR/concepts/processes",
@@ -1993,6 +1997,7 @@
"ko/concepts/flows",
"ko/concepts/production-architecture",
"ko/concepts/knowledge",
"ko/concepts/skills",
"ko/concepts/llms",
"ko/concepts/files",
"ko/concepts/processes",
@@ -2442,6 +2447,7 @@
"ko/concepts/flows",
"ko/concepts/production-architecture",
"ko/concepts/knowledge",
"ko/concepts/skills",
"ko/concepts/llms",
"ko/concepts/files",
"ko/concepts/processes",

116
docs/en/concepts/skills.mdx Normal file
View File

@@ -0,0 +1,116 @@
---
title: Skills
description: Filesystem-based skill packages that inject context into agent prompts.
icon: bolt
mode: "wide"
---
## Overview
Skills are self-contained directories that provide agents with domain-specific instructions, references, and assets. Each skill is defined by a `SKILL.md` file with YAML frontmatter and a markdown body.
Skills use **progressive disclosure** — metadata is loaded first, full instructions only when activated, and resource catalogs only when needed.
## Directory Structure
```
my-skill/
├── SKILL.md # Required — frontmatter + instructions
├── scripts/ # Optional — executable scripts
├── references/ # Optional — reference documents
└── assets/ # Optional — static files (configs, data)
```
The directory name must match the `name` field in `SKILL.md`.
## SKILL.md Format
```markdown
---
name: my-skill
description: Short description of what this skill does and when to use it.
license: Apache-2.0 # optional
compatibility: crewai>=0.1.0 # optional
metadata: # optional
author: your-name
version: "1.0"
allowed-tools: web-search file-read # optional, space-delimited
---
Instructions for the agent go here. This markdown body is injected
into the agent's prompt when the skill is activated.
```
### Frontmatter Fields
| Field | Required | Constraints |
| :-------------- | :------- | :----------------------------------------------------------------------- |
| `name` | Yes | 164 chars. Lowercase alphanumeric and hyphens. No leading/trailing/consecutive hyphens. Must match directory name. |
| `description` | Yes | 11024 chars. Describes what the skill does and when to use it. |
| `license` | No | License name or reference to a bundled license file. |
| `compatibility` | No | Max 500 chars. Environment requirements (products, packages, network). |
| `metadata` | No | Arbitrary string key-value mapping. |
| `allowed-tools` | No | Space-delimited list of pre-approved tools. Experimental. |
## Usage
### Agent-level Skills
Pass skill directory paths to an agent:
```python
from crewai import Agent
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=["./skills"], # discovers all skills in this directory
)
```
### Crew-level Skills
Skill paths on a crew are merged into every agent:
```python
from crewai import Crew
crew = Crew(
agents=[agent],
tasks=[task],
skills=["./skills"],
)
```
### Pre-loaded Skills
You can also pass `Skill` objects directly:
```python
from pathlib import Path
from crewai.skills import discover_skills, activate_skill
skills = discover_skills(Path("./skills"))
activated = [activate_skill(s) for s in skills]
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=activated,
)
```
## Disclosure Levels
Skills load progressively through three levels:
| Level | What's loaded | When |
| :--------------- | :------------------------------------------------ | :----------------- |
| `METADATA` | Name, description, frontmatter fields | `discover_skills()` |
| `INSTRUCTIONS` | Full SKILL.md body text | `activate_skill()` |
| `RESOURCES` | File listings from scripts/, references/, assets/ | `load_resources()` |
During normal agent execution, skills are automatically discovered and activated (promoted to `INSTRUCTIONS`). Use `load_resources()` only when you need to inspect available files programmatically.

115
docs/ko/concepts/skills.mdx Normal file
View File

@@ -0,0 +1,115 @@
---
title: 스킬
description: 에이전트 프롬프트에 컨텍스트를 주입하는 파일 시스템 기반 스킬 패키지.
icon: bolt
mode: "wide"
---
## 개요
스킬은 에이전트에게 도메인별 지침, 참조 자료, 에셋을 제공하는 자체 포함 디렉터리입니다. 각 스킬은 YAML 프론트매터와 마크다운 본문이 포함된 `SKILL.md` 파일로 정의됩니다.
스킬은 **점진적 공개**를 사용합니다 — 메타데이터가 먼저 로드되고, 활성화 시에만 전체 지침이 로드되며, 필요할 때만 리소스 카탈로그가 로드됩니다.
## 디렉터리 구조
```
my-skill/
├── SKILL.md # 필수 — 프론트매터 + 지침
├── scripts/ # 선택 — 실행 가능한 스크립트
├── references/ # 선택 — 참조 문서
└── assets/ # 선택 — 정적 파일 (설정, 데이터)
```
디렉터리 이름은 `SKILL.md`의 `name` 필드와 일치해야 합니다.
## SKILL.md 형식
```markdown
---
name: my-skill
description: 이 스킬이 무엇을 하고 언제 사용하는지에 대한 간단한 설명.
license: Apache-2.0 # 선택
compatibility: crewai>=0.1.0 # 선택
metadata: # 선택
author: your-name
version: "1.0"
allowed-tools: web-search file-read # 선택, 공백으로 구분
---
에이전트를 위한 지침이 여기에 들어갑니다. 이 마크다운 본문은
스킬이 활성화되면 에이전트의 프롬프트에 주입됩니다.
```
### 프론트매터 필드
| 필드 | 필수 | 제약 조건 |
| :-------------- | :----- | :----------------------------------------------------------------------- |
| `name` | 예 | 164자. 소문자 영숫자와 하이픈. 선행/후행/연속 하이픈 불가. 디렉터리 이름과 일치 필수. |
| `description` | 예 | 11024자. 스킬이 무엇을 하고 언제 사용하는지 설명. |
| `license` | 아니오 | 라이선스 이름 또는 번들된 라이선스 파일 참조. |
| `compatibility` | 아니오 | 최대 500자. 환경 요구 사항 (제품, 패키지, 네트워크). |
| `metadata` | 아니오 | 임의의 문자열 키-값 매핑. |
| `allowed-tools` | 아니오 | 공백으로 구분된 사전 승인 도구 목록. 실험적. |
## 사용법
### 에이전트 레벨 스킬
에이전트에 스킬 디렉터리 경로를 전달합니다:
```python
from crewai import Agent
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=["./skills"], # 이 디렉터리의 모든 스킬을 검색
)
```
### 크루 레벨 스킬
크루의 스킬 경로는 모든 에이전트에 병합됩니다:
```python
from crewai import Crew
crew = Crew(
agents=[agent],
tasks=[task],
skills=["./skills"],
)
```
### 사전 로드된 스킬
`Skill` 객체를 직접 전달할 수도 있습니다:
```python
from pathlib import Path
from crewai.skills import discover_skills, activate_skill
skills = discover_skills(Path("./skills"))
activated = [activate_skill(s) for s in skills]
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=activated,
)
```
## 공개 레벨
스킬은 세 단계를 통해 점진적으로 로드됩니다:
| 레벨 | 로드되는 내용 | 시점 |
| :--------------- | :------------------------------------------------ | :----------------- |
| `METADATA` | 이름, 설명, 프론트매터 필드 | `discover_skills()` |
| `INSTRUCTIONS` | 전체 SKILL.md 본문 텍스트 | `activate_skill()` |
| `RESOURCES` | scripts/, references/, assets/의 파일 목록 | `load_resources()` |
일반적인 에이전트 실행 중에 스킬은 자동으로 검색되고 활성화됩니다 (`INSTRUCTIONS`로 승격). 프로그래밍 방식으로 사용 가능한 파일을 검사해야 하는 경우에만 `load_resources()`를 사용하세요.

View File

@@ -0,0 +1,115 @@
---
title: Skills
description: Pacotes de skills baseados em sistema de arquivos que injetam contexto nos prompts dos agentes.
icon: bolt
mode: "wide"
---
## Visão Geral
Skills são diretórios autocontidos que fornecem aos agentes instruções, referências e assets específicos de domínio. Cada skill é definida por um arquivo `SKILL.md` com frontmatter YAML e um corpo em markdown.
Skills usam **divulgação progressiva** — metadados são carregados primeiro, instruções completas apenas quando ativadas, e catálogos de recursos apenas quando necessário.
## Estrutura de Diretório
```
my-skill/
├── SKILL.md # Obrigatório — frontmatter + instruções
├── scripts/ # Opcional — scripts executáveis
├── references/ # Opcional — documentos de referência
└── assets/ # Opcional — arquivos estáticos (configs, dados)
```
O nome do diretório deve corresponder ao campo `name` no `SKILL.md`.
## Formato do SKILL.md
```markdown
---
name: my-skill
description: Descrição curta do que esta skill faz e quando usá-la.
license: Apache-2.0 # opcional
compatibility: crewai>=0.1.0 # opcional
metadata: # opcional
author: your-name
version: "1.0"
allowed-tools: web-search file-read # opcional, delimitado por espaços
---
Instruções para o agente vão aqui. Este corpo em markdown é injetado
no prompt do agente quando a skill é ativada.
```
### Campos do Frontmatter
| Campo | Obrigatório | Restrições |
| :-------------- | :---------- | :----------------------------------------------------------------------- |
| `name` | Sim | 164 chars. Alfanumérico minúsculo e hifens. Sem hifens iniciais/finais/consecutivos. Deve corresponder ao nome do diretório. |
| `description` | Sim | 11024 chars. Descreve o que a skill faz e quando usá-la. |
| `license` | Não | Nome da licença ou referência a um arquivo de licença incluído. |
| `compatibility` | Não | Máx 500 chars. Requisitos de ambiente (produtos, pacotes, rede). |
| `metadata` | Não | Mapeamento arbitrário de chave-valor string. |
| `allowed-tools` | Não | Lista de ferramentas pré-aprovadas delimitada por espaços. Experimental. |
## Uso
### Skills no Nível do Agente
Passe caminhos de diretório de skills para um agente:
```python
from crewai import Agent
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=["./skills"], # descobre todas as skills neste diretório
)
```
### Skills no Nível do Crew
Caminhos de skills no crew são mesclados em todos os agentes:
```python
from crewai import Crew
crew = Crew(
agents=[agent],
tasks=[task],
skills=["./skills"],
)
```
### Skills Pré-carregadas
Você também pode passar objetos `Skill` diretamente:
```python
from pathlib import Path
from crewai.skills import discover_skills, activate_skill
skills = discover_skills(Path("./skills"))
activated = [activate_skill(s) for s in skills]
agent = Agent(
role="Researcher",
goal="Find relevant information",
backstory="An expert researcher.",
skills=activated,
)
```
## Níveis de Divulgação
Skills carregam progressivamente através de três níveis:
| Nível | O que é carregado | Quando |
| :--------------- | :------------------------------------------------ | :------------------ |
| `METADATA` | Nome, descrição, campos do frontmatter | `discover_skills()` |
| `INSTRUCTIONS` | Texto completo do corpo do SKILL.md | `activate_skill()` |
| `RESOURCES` | Listagem de arquivos de scripts/, references/, assets/ | `load_resources()` |
Durante a execução normal do agente, skills são automaticamente descobertas e ativadas (promovidas para `INSTRUCTIONS`). Use `load_resources()` apenas quando precisar inspecionar arquivos disponíveis programaticamente.

View File

@@ -42,6 +42,7 @@ dependencies = [
"mcp~=1.26.0",
"uv~=0.9.13",
"aiosqlite~=0.21.0",
"pyyaml~=6.0",
"lancedb>=0.29.2",
]

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine, Sequence
from pathlib import Path
import shutil
import subprocess
import time
@@ -24,6 +25,7 @@ from typing_extensions import Self
from crewai.agent.utils import (
ahandle_knowledge_retrieval,
append_skill_context,
apply_training_data,
build_task_prompt_with_schema,
format_task_with_context,
@@ -63,6 +65,8 @@ from crewai.mcp import MCPServerConfig
from crewai.mcp.tool_resolver import MCPToolResolver
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.fingerprint import Fingerprint
from crewai.skills.loader import activate_skill, discover_skills
from crewai.skills.models import INSTRUCTIONS, Skill as SkillModel
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities.agent_utils import (
get_tool_names,
@@ -264,6 +268,8 @@ class Agent(BaseAgent):
if self.allow_code_execution:
self._validate_docker_installation()
self.set_skills()
return self
def _setup_agent_executor(self) -> None:
@@ -289,6 +295,52 @@ class Agent(BaseAgent):
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid Knowledge Configuration: {e!s}") from e
def set_skills(self) -> None:
"""Resolve skill paths and activate skills to INSTRUCTIONS level.
Path entries trigger discovery and activation. Pre-loaded Skill objects
below INSTRUCTIONS level are activated. Crew-level skills are merged in.
"""
from crewai.crew import Crew
crew_skills: list[Path | SkillModel] | None = (
self.crew.skills
if isinstance(self.crew, Crew) and isinstance(self.crew.skills, list)
else None
)
if not self.skills and not crew_skills:
return
needs_work = self.skills and any(
isinstance(s, Path)
or (isinstance(s, SkillModel) and s.disclosure_level < INSTRUCTIONS)
for s in self.skills
)
if not needs_work and not crew_skills:
return
seen: set[str] = set()
resolved: list[Path | SkillModel] = []
items: list[Path | SkillModel] = list(self.skills) if self.skills else []
if crew_skills:
items.extend(crew_skills)
for item in items:
if isinstance(item, Path):
discovered = discover_skills(item, source=self)
for skill in discovered:
if skill.name not in seen:
seen.add(skill.name)
resolved.append(activate_skill(skill, source=self))
elif isinstance(item, SkillModel):
if item.name not in seen:
seen.add(item.name)
resolved.append(activate_skill(item, source=self))
self.skills = resolved if resolved else None
def _is_any_available_memory(self) -> bool:
"""Check if unified memory is available (agent or crew)."""
if getattr(self, "memory", None):
@@ -405,6 +457,8 @@ class Agent(BaseAgent):
self.crew.query_knowledge if self.crew else lambda *a, **k: None,
)
task_prompt = append_skill_context(self, task_prompt)
prepare_tools(self, tools, task)
task_prompt = apply_training_data(self, task_prompt)
@@ -638,6 +692,8 @@ class Agent(BaseAgent):
self, task, task_prompt, knowledge_config
)
task_prompt = append_skill_context(self, task_prompt)
prepare_tools(self, tools, task)
task_prompt = apply_training_data(self, task_prompt)
@@ -1299,6 +1355,8 @@ class Agent(BaseAgent):
),
)
formatted_messages = append_skill_context(self, formatted_messages)
# Build the input dict for the executor
inputs: dict[str, Any] = {
"input": formatted_messages,

View File

@@ -203,6 +203,30 @@ def _combine_knowledge_context(agent: Agent) -> str:
return agent_ctx + separator + crew_ctx
def append_skill_context(agent: Agent, task_prompt: str) -> str:
"""Append activated skill context sections to the task prompt.
Args:
agent: The agent with optional skills.
task_prompt: The current task prompt.
Returns:
The task prompt with skill context appended.
"""
if not agent.skills:
return task_prompt
from crewai.skills.loader import format_skill_context
from crewai.skills.models import Skill
skill_sections = [
format_skill_context(s) for s in agent.skills if isinstance(s, Skill)
]
if skill_sections:
task_prompt += "\n\n" + "\n\n".join(skill_sections)
return task_prompt
def apply_training_data(agent: Agent, task_prompt: str) -> str:
"""Apply training data to the task prompt.

View File

@@ -4,6 +4,7 @@ from abc import ABC, abstractmethod
from collections.abc import Callable
from copy import copy as shallow_copy
from hashlib import md5
from pathlib import Path
import re
from typing import Any, Final, Literal
import uuid
@@ -29,6 +30,8 @@ from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.mcp.config import MCPServerConfig
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.security.security_config import SecurityConfig
from crewai.skills.models import Skill
from crewai.skills.validation import coerce_skill_paths as _coerce_skill_paths
from crewai.tools.base_tool import BaseTool, Tool
from crewai.utilities.config import process_config
from crewai.utilities.i18n import I18N, get_i18n
@@ -213,6 +216,17 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
"If not set, falls back to crew memory."
),
)
skills: list[Path | Skill] | None = Field(
default=None,
description="Agent Skills. Accepts paths for discovery or pre-loaded Skill objects.",
min_length=1,
)
@field_validator("skills", mode="before")
@classmethod
def coerce_skill_paths(cls, v: list[Any] | None) -> list[Path | Skill] | None:
"""Coerce string entries to Path objects."""
return _coerce_skill_paths(v)
@model_validator(mode="before")
@classmethod
@@ -496,3 +510,6 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
def set_knowledge(self, crew_embedder: EmbedderConfig | None = None) -> None:
pass
def set_skills(self) -> None:
pass

View File

@@ -895,7 +895,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
ToolUsageStartedEvent,
)
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id, original_tool)
args_dict, parse_error = parse_tool_call_args(
func_args, func_name, call_id, original_tool
)
if parse_error is not None:
return parse_error

View File

@@ -182,15 +182,24 @@ def log_tasks_outputs() -> None:
@crewai.command()
@click.option("-m", "--memory", is_flag=True, help="Reset MEMORY")
@click.option(
"-l", "--long", is_flag=True, hidden=True,
"-l",
"--long",
is_flag=True,
hidden=True,
help="[Deprecated: use --memory] Reset memory",
)
@click.option(
"-s", "--short", is_flag=True, hidden=True,
"-s",
"--short",
is_flag=True,
hidden=True,
help="[Deprecated: use --memory] Reset memory",
)
@click.option(
"-e", "--entities", is_flag=True, hidden=True,
"-e",
"--entities",
is_flag=True,
hidden=True,
help="[Deprecated: use --memory] Reset memory",
)
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
@@ -218,7 +227,13 @@ def reset_memories(
# Treat legacy flags as --memory with a deprecation warning
if long or short or entities:
legacy_used = [
f for f, v in [("--long", long), ("--short", short), ("--entities", entities)] if v
f
for f, v in [
("--long", long),
("--short", short),
("--entities", entities),
]
if v
]
click.echo(
f"Warning: {', '.join(legacy_used)} {'is' if len(legacy_used) == 1 else 'are'} "
@@ -238,9 +253,7 @@ def reset_memories(
"Please specify at least one memory type to reset using the appropriate flags."
)
return
reset_memories_command(
memory, knowledge, agent_knowledge, kickoff_outputs, all
)
reset_memories_command(memory, knowledge, agent_knowledge, kickoff_outputs, all)
except Exception as e:
click.echo(f"An error occurred while resetting memories: {e}", err=True)

View File

@@ -125,13 +125,19 @@ class MemoryTUI(App[None]):
from crewai.memory.storage.lancedb_storage import LanceDBStorage
from crewai.memory.unified_memory import Memory
storage = LanceDBStorage(path=storage_path) if storage_path else LanceDBStorage()
storage = (
LanceDBStorage(path=storage_path) if storage_path else LanceDBStorage()
)
embedder = None
if embedder_config is not None:
from crewai.rag.embeddings.factory import build_embedder
embedder = build_embedder(embedder_config)
self._memory = Memory(storage=storage, embedder=embedder) if embedder else Memory(storage=storage)
self._memory = (
Memory(storage=storage, embedder=embedder)
if embedder
else Memory(storage=storage)
)
except Exception as e:
self._init_error = str(e)
@@ -200,11 +206,7 @@ class MemoryTUI(App[None]):
if len(record.content) > 80
else record.content
)
label = (
f"{date_str} "
f"[bold]{record.importance:.1f}[/] "
f"{preview}"
)
label = f"{date_str} [bold]{record.importance:.1f}[/] {preview}"
option_list.add_option(label)
def _populate_recall_list(self) -> None:
@@ -220,9 +222,7 @@ class MemoryTUI(App[None]):
else m.record.content
)
label = (
f"[bold]\\[{m.score:.2f}][/] "
f"{preview} "
f"[dim]scope={m.record.scope}[/]"
f"[bold]\\[{m.score:.2f}][/] {preview} [dim]scope={m.record.scope}[/]"
)
option_list.add_option(label)
@@ -251,8 +251,7 @@ class MemoryTUI(App[None]):
lines.append(f"[dim]Scope:[/] [bold]{record.scope}[/]")
lines.append(f"[dim]Importance:[/] [bold]{record.importance:.2f}[/]")
lines.append(
f"[dim]Created:[/] "
f"{record.created_at.strftime('%Y-%m-%d %H:%M:%S')}"
f"[dim]Created:[/] {record.created_at.strftime('%Y-%m-%d %H:%M:%S')}"
)
lines.append(
f"[dim]Last accessed:[/] "
@@ -362,17 +361,11 @@ class MemoryTUI(App[None]):
panel = self.query_one("#info-panel", Static)
panel.loading = True
try:
scope = (
self._selected_scope
if self._selected_scope != "/"
else None
)
scope = self._selected_scope if self._selected_scope != "/" else None
loop = asyncio.get_event_loop()
matches = await loop.run_in_executor(
None,
lambda: self._memory.recall(
query, scope=scope, limit=10, depth="deep"
),
lambda: self._memory.recall(query, scope=scope, limit=10, depth="deep"),
)
self._recall_matches = matches or []
self._view_mode = "recall"

View File

@@ -95,9 +95,7 @@ def reset_memories_command(
continue
if memory:
_reset_flow_memory(flow)
click.echo(
f"[Flow ({flow_name})] Memory has been reset."
)
click.echo(f"[Flow ({flow_name})] Memory has been reset.")
except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while resetting the memories: {e}", err=True)

View File

@@ -442,9 +442,7 @@ def get_flows(flow_path: str = "main.py") -> list[Flow]:
for search_path in search_paths:
for root, dirs, files in os.walk(search_path):
dirs[:] = [
d
for d in dirs
if d not in _SKIP_DIRS and not d.startswith(".")
d for d in dirs if d not in _SKIP_DIRS and not d.startswith(".")
]
if flow_path in files and "cli/templates" not in root:
file_os_path = os.path.join(root, flow_path)
@@ -464,9 +462,7 @@ def get_flows(flow_path: str = "main.py") -> list[Flow]:
for attr_name in dir(module):
module_attr = getattr(module, attr_name)
try:
if flow_instance := get_flow_instance(
module_attr
):
if flow_instance := get_flow_instance(module_attr):
flow_instances.append(flow_instance)
except Exception: # noqa: S112
continue

View File

@@ -6,6 +6,7 @@ from concurrent.futures import Future
from copy import copy as shallow_copy
from hashlib import md5
import json
from pathlib import Path
import re
from typing import (
TYPE_CHECKING,
@@ -88,6 +89,8 @@ from crewai.rag.embeddings.types import EmbedderConfig
from crewai.rag.types import SearchResult
from crewai.security.fingerprint import Fingerprint
from crewai.security.security_config import SecurityConfig
from crewai.skills.models import Skill
from crewai.skills.validation import coerce_skill_paths as _coerce_skill_paths
from crewai.task import Task
from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.task_output import TaskOutput
@@ -291,6 +294,17 @@ class Crew(FlowTrackable, BaseModel):
default=None,
description="Knowledge for the crew.",
)
skills: list[Path | Skill] | None = Field(
default=None,
description="Skill search paths or pre-loaded Skill objects applied to all agents in the crew.",
)
@field_validator("skills", mode="before")
@classmethod
def coerce_skill_paths(cls, v: list[Any] | None) -> list[Path | Skill] | None:
"""Coerce string entries to Path objects, pass through Skill instances."""
return _coerce_skill_paths(v)
security_config: SecurityConfig = Field(
default_factory=SecurityConfig,
description="Security configuration for the crew, including fingerprinting.",
@@ -362,7 +376,7 @@ class Crew(FlowTrackable, BaseModel):
if self.embedder is not None:
from crewai.rag.embeddings.factory import build_embedder
embedder = build_embedder(self.embedder)
embedder = build_embedder(cast(dict[str, Any], self.embedder))
self._memory = Memory(embedder=embedder)
elif self.memory:
# User passed a Memory / MemoryScope / MemorySlice instance
@@ -1410,9 +1424,7 @@ class Crew(FlowTrackable, BaseModel):
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
return tools
def _add_memory_tools(
self, tools: list[BaseTool], memory: Any
) -> list[BaseTool]:
def _add_memory_tools(self, tools: list[BaseTool], memory: Any) -> list[BaseTool]:
"""Add recall and remember tools when memory is available.
Args:

View File

@@ -70,6 +70,7 @@ def setup_agents(
for agent in agents:
agent.crew = crew
agent.set_knowledge(crew_embedder=embedder)
agent.set_skills()
if not agent.function_calling_llm: # type: ignore[attr-defined]
agent.function_calling_llm = function_calling_llm # type: ignore[attr-defined]
if not agent.step_callback: # type: ignore[attr-defined]

View File

@@ -88,6 +88,14 @@ from crewai.events.types.reasoning_events import (
AgentReasoningStartedEvent,
ReasoningEvent,
)
from crewai.events.types.skill_events import (
SkillActivatedEvent,
SkillDiscoveryCompletedEvent,
SkillDiscoveryStartedEvent,
SkillEvent,
SkillLoadFailedEvent,
SkillLoadedEvent,
)
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskEvaluationEvent,
@@ -186,6 +194,12 @@ __all__ = [
"MethodExecutionFinishedEvent",
"MethodExecutionStartedEvent",
"ReasoningEvent",
"SkillActivatedEvent",
"SkillDiscoveryCompletedEvent",
"SkillDiscoveryStartedEvent",
"SkillEvent",
"SkillLoadFailedEvent",
"SkillLoadedEvent",
"TaskCompletedEvent",
"TaskEvaluationEvent",
"TaskFailedEvent",

View File

@@ -0,0 +1,62 @@
"""Skill lifecycle events for the Agent Skills standard.
Events emitted during skill discovery, loading, and activation.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any
from crewai.events.base_events import BaseEvent
class SkillEvent(BaseEvent):
"""Base event for skill operations."""
skill_name: str = ""
skill_path: Path | None = None
from_agent: Any | None = None
from_task: Any | None = None
def __init__(self, **data: Any) -> None:
super().__init__(**data)
self._set_agent_params(data)
self._set_task_params(data)
class SkillDiscoveryStartedEvent(SkillEvent):
"""Event emitted when skill discovery begins."""
type: str = "skill_discovery_started"
search_path: Path
class SkillDiscoveryCompletedEvent(SkillEvent):
"""Event emitted when skill discovery completes."""
type: str = "skill_discovery_completed"
search_path: Path
skills_found: int
skill_names: list[str]
class SkillLoadedEvent(SkillEvent):
"""Event emitted when a skill is loaded at metadata level."""
type: str = "skill_loaded"
disclosure_level: int = 1
class SkillActivatedEvent(SkillEvent):
"""Event emitted when a skill is activated (promoted to instructions level)."""
type: str = "skill_activated"
disclosure_level: int = 2
class SkillLoadFailedEvent(SkillEvent):
"""Event emitted when skill loading fails."""
type: str = "skill_load_failed"
error: str

View File

@@ -729,7 +729,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
max_workers = min(8, len(runnable_tool_calls))
with ThreadPoolExecutor(max_workers=max_workers) as pool:
future_to_idx = {
pool.submit(contextvars.copy_context().run, self._execute_single_native_tool_call, tool_call): idx
pool.submit(
contextvars.copy_context().run,
self._execute_single_native_tool_call,
tool_call,
): idx
for idx, tool_call in enumerate(runnable_tool_calls)
}
ordered_results: list[dict[str, Any] | None] = [None] * len(

View File

@@ -34,6 +34,7 @@ class ConsoleProvider:
```python
from crewai.flow.async_feedback import ConsoleProvider
@human_feedback(
message="Review this:",
provider=ConsoleProvider(),
@@ -46,6 +47,7 @@ class ConsoleProvider:
```python
from crewai.flow import Flow, start
class MyFlow(Flow):
@start()
def gather_info(self):

View File

@@ -188,7 +188,7 @@ def human_feedback(
metadata: dict[str, Any] | None = None,
provider: HumanFeedbackProvider | None = None,
learn: bool = False,
learn_source: str = "hitl"
learn_source: str = "hitl",
) -> Callable[[F], F]:
"""Decorator for Flow methods that require human feedback.
@@ -328,9 +328,7 @@ def human_feedback(
"""Recall past HITL lessons and use LLM to pre-review the output."""
try:
query = f"human feedback lessons for {func.__name__}: {method_output!s}"
matches = flow_instance.memory.recall(
query, source=learn_source
)
matches = flow_instance.memory.recall(query, source=learn_source)
if not matches:
return method_output
@@ -341,7 +339,10 @@ def human_feedback(
lessons=lessons,
)
messages = [
{"role": "system", "content": _get_hitl_prompt("hitl_pre_review_system")},
{
"role": "system",
"content": _get_hitl_prompt("hitl_pre_review_system"),
},
{"role": "user", "content": prompt},
]
if getattr(llm_inst, "supports_function_calling", lambda: False)():
@@ -366,7 +367,10 @@ def human_feedback(
feedback=raw_feedback,
)
messages = [
{"role": "system", "content": _get_hitl_prompt("hitl_distill_system")},
{
"role": "system",
"content": _get_hitl_prompt("hitl_distill_system"),
},
{"role": "user", "content": prompt},
]
@@ -487,7 +491,11 @@ def human_feedback(
result = _process_feedback(self, method_output, raw_feedback)
# Distill: extract lessons from output + feedback, store in memory
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
if (
learn
and getattr(self, "memory", None) is not None
and raw_feedback.strip()
):
_distill_and_store_lessons(self, method_output, raw_feedback)
return result
@@ -507,7 +515,11 @@ def human_feedback(
result = _process_feedback(self, method_output, raw_feedback)
# Distill: extract lessons from output + feedback, store in memory
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
if (
learn
and getattr(self, "memory", None) is not None
and raw_feedback.strip()
):
_distill_and_store_lessons(self, method_output, raw_feedback)
return result
@@ -534,7 +546,7 @@ def human_feedback(
metadata=metadata,
provider=provider,
learn=learn,
learn_source=learn_source
learn_source=learn_source,
)
wrapper.__is_flow_method__ = True

View File

@@ -308,7 +308,9 @@ def analyze_for_save(
return MemoryAnalysis.model_validate(response)
except Exception as e:
_logger.warning(
"Memory save analysis failed, using defaults: %s", e, exc_info=False,
"Memory save analysis failed, using defaults: %s",
e,
exc_info=False,
)
return _SAVE_DEFAULTS
@@ -366,6 +368,8 @@ def analyze_for_consolidation(
return ConsolidationPlan.model_validate(response)
except Exception as e:
_logger.warning(
"Consolidation analysis failed, defaulting to insert: %s", e, exc_info=False,
"Consolidation analysis failed, defaulting to insert: %s",
e,
exc_info=False,
)
return _CONSOLIDATION_DEFAULT

View File

@@ -164,7 +164,11 @@ class EncodingFlow(Flow[EncodingState]):
def parallel_find_similar(self) -> None:
"""Search storage for similar records, concurrently for all active items."""
items = list(self.state.items)
active = [(i, item) for i, item in enumerate(items) if not item.dropped and item.embedding]
active = [
(i, item)
for i, item in enumerate(items)
if not item.dropped and item.embedding
]
if not active:
return
@@ -186,7 +190,9 @@ class EncodingFlow(Flow[EncodingState]):
item.top_similarity = float(raw[0][1]) if raw else 0.0
else:
with ThreadPoolExecutor(max_workers=min(len(active), 8)) as pool:
futures = [(i, item, pool.submit(_search_one, item)) for i, item in active]
futures = [
(i, item, pool.submit(_search_one, item)) for i, item in active
]
for _, item, future in futures:
raw = future.result()
item.similar_records = [r for r, _ in raw]
@@ -251,23 +257,33 @@ class EncodingFlow(Flow[EncodingState]):
self._apply_defaults(item)
consol_futures[i] = pool.submit(
analyze_for_consolidation,
item.content, list(item.similar_records), self._llm,
item.content,
list(item.similar_records),
self._llm,
)
elif not fields_provided and not has_similar:
# Group C: field resolution only
save_futures[i] = pool.submit(
analyze_for_save,
item.content, existing_scopes, existing_categories, self._llm,
item.content,
existing_scopes,
existing_categories,
self._llm,
)
else:
# Group D: both in parallel
save_futures[i] = pool.submit(
analyze_for_save,
item.content, existing_scopes, existing_categories, self._llm,
item.content,
existing_scopes,
existing_categories,
self._llm,
)
consol_futures[i] = pool.submit(
analyze_for_consolidation,
item.content, list(item.similar_records), self._llm,
item.content,
list(item.similar_records),
self._llm,
)
# Collect field-resolution results
@@ -339,7 +355,9 @@ class EncodingFlow(Flow[EncodingState]):
# similar_records overlap). Collect one action per record_id, first wins.
# Also build a map from record_id to the original MemoryRecord for updates.
dedup_deletes: set[str] = set() # record_ids to delete
dedup_updates: dict[str, tuple[int, str]] = {} # record_id -> (item_idx, new_content)
dedup_updates: dict[
str, tuple[int, str]
] = {} # record_id -> (item_idx, new_content)
all_similar: dict[str, MemoryRecord] = {} # record_id -> MemoryRecord
for i, item in enumerate(items):
@@ -350,13 +368,24 @@ class EncodingFlow(Flow[EncodingState]):
all_similar[r.id] = r
for action in item.plan.actions:
rid = action.record_id
if action.action == "delete" and rid not in dedup_deletes and rid not in dedup_updates:
if (
action.action == "delete"
and rid not in dedup_deletes
and rid not in dedup_updates
):
dedup_deletes.add(rid)
elif action.action == "update" and action.new_content and rid not in dedup_deletes and rid not in dedup_updates:
elif (
action.action == "update"
and action.new_content
and rid not in dedup_deletes
and rid not in dedup_updates
):
dedup_updates[rid] = (i, action.new_content)
# --- Batch re-embed all update contents in ONE call ---
update_list = list(dedup_updates.items()) # [(record_id, (item_idx, new_content)), ...]
update_list = list(
dedup_updates.items()
) # [(record_id, (item_idx, new_content)), ...]
update_embeddings: list[list[float]] = []
if update_list:
update_contents = [content for _, (_, content) in update_list]
@@ -377,16 +406,21 @@ class EncodingFlow(Flow[EncodingState]):
if item.dropped or item.plan is None:
continue
if item.plan.insert_new:
to_insert.append((i, MemoryRecord(
content=item.content,
scope=item.resolved_scope,
categories=item.resolved_categories,
metadata=item.resolved_metadata,
importance=item.resolved_importance,
embedding=item.embedding if item.embedding else None,
source=item.resolved_source,
private=item.resolved_private,
)))
to_insert.append(
(
i,
MemoryRecord(
content=item.content,
scope=item.resolved_scope,
categories=item.resolved_categories,
metadata=item.resolved_metadata,
importance=item.resolved_importance,
embedding=item.embedding if item.embedding else None,
source=item.resolved_source,
private=item.resolved_private,
),
)
)
# All storage mutations under one lock so no other pipeline can
# interleave and cause version conflicts. The lock is reentrant

View File

@@ -103,13 +103,12 @@ class RecallFlow(Flow[RecallState]):
)
# Post-filter by time cutoff
if self.state.time_cutoff and raw:
raw = [
(r, s) for r, s in raw if r.created_at >= self.state.time_cutoff
]
raw = [(r, s) for r, s in raw if r.created_at >= self.state.time_cutoff]
# Privacy filter
if not self.state.include_private and raw:
raw = [
(r, s) for r, s in raw
(r, s)
for r, s in raw
if not r.private or r.source == self.state.source
]
return scope, raw
@@ -130,16 +129,17 @@ class RecallFlow(Flow[RecallState]):
top_composite, _ = compute_composite_score(
results[0][0], results[0][1], self._config
)
findings.append({
"scope": scope,
"results": results,
"top_score": top_composite,
})
findings.append(
{
"scope": scope,
"results": results,
"top_score": top_composite,
}
)
else:
with ThreadPoolExecutor(max_workers=min(len(tasks), 4)) as pool:
futures = {
pool.submit(_search_one, emb, sc): (emb, sc)
for emb, sc in tasks
pool.submit(_search_one, emb, sc): (emb, sc) for emb, sc in tasks
}
for future in as_completed(futures):
scope, results = future.result()
@@ -147,16 +147,16 @@ class RecallFlow(Flow[RecallState]):
top_composite, _ = compute_composite_score(
results[0][0], results[0][1], self._config
)
findings.append({
"scope": scope,
"results": results,
"top_score": top_composite,
})
findings.append(
{
"scope": scope,
"results": results,
"top_score": top_composite,
}
)
self.state.chunk_findings = findings
self.state.confidence = max(
(f["top_score"] for f in findings), default=0.0
)
self.state.confidence = max((f["top_score"] for f in findings), default=0.0)
return findings
# ------------------------------------------------------------------
@@ -210,12 +210,16 @@ class RecallFlow(Flow[RecallState]):
# Parse time_filter into a datetime cutoff
if analysis.time_filter:
try:
self.state.time_cutoff = datetime.fromisoformat(analysis.time_filter)
self.state.time_cutoff = datetime.fromisoformat(
analysis.time_filter
)
except ValueError:
pass
# Batch-embed all sub-queries in ONE call
queries = analysis.recall_queries if analysis.recall_queries else [self.state.query]
queries = (
analysis.recall_queries if analysis.recall_queries else [self.state.query]
)
queries = queries[:3]
embeddings = embed_texts(self._embedder, queries)
pairs: list[tuple[str, list[float]]] = [
@@ -296,17 +300,21 @@ class RecallFlow(Flow[RecallState]):
response = self._llm.call([{"role": "user", "content": prompt}])
if isinstance(response, str) and "missing" in response.lower():
self.state.evidence_gaps.append(response[:200])
enhanced.append({
"scope": finding["scope"],
"extraction": response,
"results": finding["results"],
})
enhanced.append(
{
"scope": finding["scope"],
"extraction": response,
"results": finding["results"],
}
)
except Exception:
enhanced.append({
"scope": finding["scope"],
"extraction": "",
"results": finding["results"],
})
enhanced.append(
{
"scope": finding["scope"],
"extraction": "",
"results": finding["results"],
}
)
self.state.chunk_findings = enhanced
return enhanced

View File

@@ -0,0 +1,40 @@
"""Agent Skills standard implementation for crewAI.
Provides filesystem-based skill packaging with progressive disclosure.
"""
from crewai.skills.loader import (
activate_skill,
discover_skills,
format_skill_context,
load_resources,
)
from crewai.skills.models import (
INSTRUCTIONS,
METADATA,
RESOURCES,
DisclosureLevel,
ResourceDirName,
Skill,
SkillFrontmatter,
)
from crewai.skills.parser import SkillParseError, parse_skill_md
from crewai.skills.validation import coerce_skill_paths
__all__ = [
"INSTRUCTIONS",
"METADATA",
"RESOURCES",
"DisclosureLevel",
"ResourceDirName",
"Skill",
"SkillFrontmatter",
"SkillParseError",
"activate_skill",
"coerce_skill_paths",
"discover_skills",
"format_skill_context",
"load_resources",
"parse_skill_md",
]

View File

@@ -0,0 +1,184 @@
"""Filesystem discovery and progressive loading for Agent Skills.
Provides functions to discover skills in directories, activate them
for agent use, and format skill context for prompt injection.
"""
from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.skill_events import (
SkillActivatedEvent,
SkillDiscoveryCompletedEvent,
SkillDiscoveryStartedEvent,
SkillLoadFailedEvent,
SkillLoadedEvent,
)
from crewai.skills.models import INSTRUCTIONS, RESOURCES, Skill
from crewai.skills.parser import (
SKILL_FILENAME,
load_skill_instructions,
load_skill_metadata,
load_skill_resources,
)
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
_logger = logging.getLogger(__name__)
def discover_skills(
search_path: Path,
source: BaseAgent | None = None,
) -> list[Skill]:
"""Scan a directory for skill directories containing SKILL.md.
Loads each discovered skill at METADATA disclosure level.
Args:
search_path: Directory to scan for skill subdirectories.
source: Optional event source (agent or crew) for event emission.
Returns:
List of Skill instances at METADATA level.
"""
if not search_path.is_dir():
msg = f"Skill search path does not exist or is not a directory: {search_path}"
raise FileNotFoundError(msg)
skills: list[Skill] = []
if source is not None:
crewai_event_bus.emit(
source,
event=SkillDiscoveryStartedEvent(
from_agent=source,
search_path=search_path,
),
)
for child in sorted(search_path.iterdir()):
if not child.is_dir():
continue
skill_md = child / SKILL_FILENAME
if not skill_md.is_file():
continue
try:
skill = load_skill_metadata(child)
skills.append(skill)
if source is not None:
crewai_event_bus.emit(
source,
event=SkillLoadedEvent(
from_agent=source,
skill_name=skill.name,
skill_path=skill.path,
disclosure_level=skill.disclosure_level,
),
)
except Exception as e:
_logger.warning("Failed to load skill from %s: %s", child, e)
if source is not None:
crewai_event_bus.emit(
source,
event=SkillLoadFailedEvent(
from_agent=source,
skill_name=child.name,
skill_path=child,
error=str(e),
),
)
if source is not None:
crewai_event_bus.emit(
source,
event=SkillDiscoveryCompletedEvent(
from_agent=source,
search_path=search_path,
skills_found=len(skills),
skill_names=[s.name for s in skills],
),
)
return skills
def activate_skill(
skill: Skill,
source: BaseAgent | None = None,
) -> Skill:
"""Promote a skill to INSTRUCTIONS disclosure level.
Idempotent: returns the skill unchanged if already at or above INSTRUCTIONS.
Args:
skill: Skill to activate.
source: Optional event source for event emission.
Returns:
Skill at INSTRUCTIONS level or higher.
"""
if skill.disclosure_level >= INSTRUCTIONS:
return skill
activated = load_skill_instructions(skill)
if source is not None:
crewai_event_bus.emit(
source,
event=SkillActivatedEvent(
from_agent=source,
skill_name=activated.name,
skill_path=activated.path,
disclosure_level=activated.disclosure_level,
),
)
return activated
def load_resources(skill: Skill) -> Skill:
"""Promote a skill to RESOURCES disclosure level.
Args:
skill: Skill to promote.
Returns:
Skill at RESOURCES level.
"""
return load_skill_resources(skill)
def format_skill_context(skill: Skill) -> str:
"""Format skill information for agent prompt injection.
At METADATA level: returns name and description only.
At INSTRUCTIONS level or above: returns full SKILL.md body.
Args:
skill: The skill to format.
Returns:
Formatted skill context string.
"""
if skill.disclosure_level >= INSTRUCTIONS and skill.instructions:
parts = [
f"## Skill: {skill.name}",
skill.description,
"",
skill.instructions,
]
if skill.disclosure_level >= RESOURCES and skill.resource_files:
parts.append("")
parts.append("### Available Resources")
for dir_name, files in sorted(skill.resource_files.items()):
if files:
parts.append(f"- **{dir_name}/**: {', '.join(files)}")
return "\n".join(parts)
return f"## Skill: {skill.name}\n{skill.description}"

View File

@@ -0,0 +1,175 @@
"""Pydantic data models for the Agent Skills standard.
Defines DisclosureLevel, SkillFrontmatter, and Skill models for
progressive disclosure of skill information.
"""
from __future__ import annotations
from pathlib import Path
from typing import Annotated, Any, Final, Literal
from pydantic import BaseModel, ConfigDict, Field, model_validator
from crewai.skills.validation import (
MAX_SKILL_NAME_LENGTH,
MIN_SKILL_NAME_LENGTH,
SKILL_NAME_PATTERN,
)
MAX_DESCRIPTION_LENGTH: Final[int] = 1024
ResourceDirName = Literal["scripts", "references", "assets"]
DisclosureLevel = Annotated[
Literal[1, 2, 3], "Progressive disclosure levels for skill loading."
]
METADATA: Final[
Annotated[
DisclosureLevel, "Only frontmatter metadata is loaded (name, description)."
]
] = 1
INSTRUCTIONS: Final[Annotated[DisclosureLevel, "Full SKILL.md body is loaded."]] = 2
RESOURCES: Final[
Annotated[
DisclosureLevel,
"Resource directories (scripts, references, assets) are cataloged.",
]
] = 3
class SkillFrontmatter(BaseModel):
"""YAML frontmatter from a SKILL.md file.
Attributes:
name: Unique skill identifier (1-64 chars, lowercase alphanumeric + hyphens).
description: Human-readable description (1-1024 chars).
license: Optional license name or reference.
compatibility: Optional compatibility information (max 500 chars).
metadata: Optional additional metadata as string key-value pairs.
allowed_tools: Optional space-delimited list of pre-approved tools.
"""
model_config = ConfigDict(frozen=True, populate_by_name=True)
name: str = Field(
min_length=MIN_SKILL_NAME_LENGTH,
max_length=MAX_SKILL_NAME_LENGTH,
pattern=SKILL_NAME_PATTERN,
)
description: str = Field(min_length=1, max_length=MAX_DESCRIPTION_LENGTH)
license: str | None = Field(
default=None,
description="SPDX license identifier or free-text license reference, e.g. 'MIT', 'Apache-2.0'.",
)
compatibility: str | None = Field(
default=None,
max_length=500,
description="Version or platform constraints for the skill, e.g. 'crewai >= 0.80'.",
)
metadata: dict[str, str] | None = Field(
default=None,
description="Arbitrary string key-value pairs for custom skill metadata.",
)
allowed_tools: list[str] | None = Field(
default=None,
alias="allowed-tools",
description="Pre-approved tool names the skill may use, parsed from a space-delimited string in frontmatter.",
)
@model_validator(mode="before")
@classmethod
def parse_allowed_tools(cls, values: dict[str, Any]) -> dict[str, Any]:
"""Parse space-delimited allowed-tools string into a list."""
key = "allowed-tools"
alt_key = "allowed_tools"
raw = values.get(key) or values.get(alt_key)
if isinstance(raw, str):
values[key] = raw.split()
return values
class Skill(BaseModel):
"""A loaded Agent Skill with progressive disclosure support.
Attributes:
frontmatter: Parsed YAML frontmatter.
instructions: Full SKILL.md body text (populated at INSTRUCTIONS level).
path: Filesystem path to the skill directory.
disclosure_level: Current disclosure level of the skill.
resource_files: Cataloged resource files (populated at RESOURCES level).
"""
frontmatter: SkillFrontmatter = Field(
description="Parsed YAML frontmatter from SKILL.md.",
)
instructions: str | None = Field(
default=None,
description="Full SKILL.md body text, populated at INSTRUCTIONS level.",
)
path: Path = Field(
description="Filesystem path to the skill directory.",
)
disclosure_level: DisclosureLevel = Field(
default=METADATA,
description="Current progressive disclosure level of the skill.",
)
resource_files: dict[ResourceDirName, list[str]] | None = Field(
default=None,
description="Cataloged resource files by directory, populated at RESOURCES level.",
)
@property
def name(self) -> str:
"""Skill name from frontmatter."""
return self.frontmatter.name
@property
def description(self) -> str:
"""Skill description from frontmatter."""
return self.frontmatter.description
@property
def scripts_dir(self) -> Path:
"""Path to the scripts directory."""
return self.path / "scripts"
@property
def references_dir(self) -> Path:
"""Path to the references directory."""
return self.path / "references"
@property
def assets_dir(self) -> Path:
"""Path to the assets directory."""
return self.path / "assets"
def with_disclosure_level(
self,
level: DisclosureLevel,
instructions: str | None = None,
resource_files: dict[ResourceDirName, list[str]] | None = None,
) -> Skill:
"""Create a new Skill at a different disclosure level.
Args:
level: The new disclosure level.
instructions: Optional instructions body text.
resource_files: Optional cataloged resource files.
Returns:
A new Skill instance at the specified disclosure level.
"""
return Skill(
frontmatter=self.frontmatter,
instructions=instructions
if instructions is not None
else self.instructions,
path=self.path,
disclosure_level=level,
resource_files=(
resource_files if resource_files is not None else self.resource_files
),
)

View File

@@ -0,0 +1,194 @@
"""SKILL.md file parsing for the Agent Skills standard.
Parses YAML frontmatter and markdown body from SKILL.md files,
and provides progressive loading functions for skill data.
"""
from __future__ import annotations
import logging
from pathlib import Path
import re
from typing import Any, Final
import yaml
from crewai.skills.models import (
INSTRUCTIONS,
METADATA,
RESOURCES,
ResourceDirName,
Skill,
SkillFrontmatter,
)
from crewai.skills.validation import validate_directory_name
_logger = logging.getLogger(__name__)
SKILL_FILENAME: Final[str] = "SKILL.md"
_CLOSING_DELIMITER: Final[re.Pattern[str]] = re.compile(r"\n---[ \t]*(?:\n|$)")
_MAX_BODY_CHARS: Final[int] = 50_000
class SkillParseError(ValueError):
"""Error raised when SKILL.md parsing fails."""
def parse_frontmatter(content: str) -> tuple[dict[str, Any], str]:
"""Split SKILL.md content into frontmatter dict and body text.
Args:
content: Raw SKILL.md file content.
Returns:
Tuple of (frontmatter dict, body text).
Raises:
SkillParseError: If frontmatter delimiters are missing or YAML is invalid.
"""
if not content.startswith("---"):
msg = "SKILL.md must start with '---' frontmatter delimiter"
raise SkillParseError(msg)
match = _CLOSING_DELIMITER.search(content, pos=3)
if match is None:
msg = "SKILL.md missing closing '---' frontmatter delimiter"
raise SkillParseError(msg)
yaml_content = content[3 : match.start()].strip()
body = content[match.end() :].strip()
try:
frontmatter = yaml.safe_load(yaml_content)
except yaml.YAMLError as e:
msg = f"Invalid YAML in frontmatter: {e}"
raise SkillParseError(msg) from e
if not isinstance(frontmatter, dict):
msg = "Frontmatter must be a YAML mapping"
raise SkillParseError(msg)
return frontmatter, body
def parse_skill_md(path: Path) -> tuple[SkillFrontmatter, str]:
"""Read and parse a SKILL.md file.
Args:
path: Path to the SKILL.md file.
Returns:
Tuple of (SkillFrontmatter, body text).
Raises:
FileNotFoundError: If the file does not exist.
SkillParseError: If parsing fails.
"""
content = path.read_text(encoding="utf-8")
frontmatter_dict, body = parse_frontmatter(content)
frontmatter = SkillFrontmatter(**frontmatter_dict)
return frontmatter, body
def load_skill_metadata(skill_dir: Path) -> Skill:
"""Load a skill at METADATA disclosure level.
Parses SKILL.md frontmatter only and validates directory name.
Args:
skill_dir: Path to the skill directory.
Returns:
Skill instance at METADATA level.
Raises:
FileNotFoundError: If SKILL.md is missing.
SkillParseError: If parsing fails.
ValueError: If directory name doesn't match skill name.
"""
skill_md_path = skill_dir / SKILL_FILENAME
frontmatter, body = parse_skill_md(skill_md_path)
validate_directory_name(skill_dir, frontmatter.name)
if len(body) > _MAX_BODY_CHARS:
_logger.warning(
"SKILL.md body for '%s' is %d chars (threshold: %d). "
"Large bodies may consume significant context window when injected into prompts.",
frontmatter.name,
len(body),
_MAX_BODY_CHARS,
)
return Skill(
frontmatter=frontmatter,
path=skill_dir,
disclosure_level=METADATA,
)
def load_skill_instructions(skill: Skill) -> Skill:
"""Promote a skill to INSTRUCTIONS disclosure level.
Reads the full SKILL.md body text.
Args:
skill: Skill at METADATA level.
Returns:
New Skill instance at INSTRUCTIONS level.
"""
if skill.disclosure_level >= INSTRUCTIONS:
return skill
skill_md_path = skill.path / SKILL_FILENAME
_, body = parse_skill_md(skill_md_path)
if len(body) > _MAX_BODY_CHARS:
_logger.warning(
"SKILL.md body for '%s' is %d chars (threshold: %d). "
"Large bodies may consume significant context window when injected into prompts.",
skill.name,
len(body),
_MAX_BODY_CHARS,
)
return skill.with_disclosure_level(
level=INSTRUCTIONS,
instructions=body,
)
def load_skill_resources(skill: Skill) -> Skill:
"""Promote a skill to RESOURCES disclosure level.
Catalogs available resource directories (scripts, references, assets).
Args:
skill: Skill at any level.
Returns:
New Skill instance at RESOURCES level.
"""
if skill.disclosure_level >= RESOURCES:
return skill
if skill.disclosure_level < INSTRUCTIONS:
skill = load_skill_instructions(skill)
resource_dirs: list[tuple[ResourceDirName, Path]] = [
("scripts", skill.scripts_dir),
("references", skill.references_dir),
("assets", skill.assets_dir),
]
resource_files: dict[ResourceDirName, list[str]] = {}
for dir_name, resource_dir in resource_dirs:
if resource_dir.is_dir():
resource_files[dir_name] = sorted(
str(f.relative_to(resource_dir))
for f in resource_dir.rglob("*")
if f.is_file()
)
return skill.with_disclosure_level(
level=RESOURCES,
instructions=skill.instructions,
resource_files=resource_files,
)

View File

@@ -0,0 +1,45 @@
"""Validation functions for Agent Skills specification constraints.
Validates skill names and directory structures per the Agent Skills standard.
"""
from __future__ import annotations
from pathlib import Path
import re
from typing import Any, Final
MAX_SKILL_NAME_LENGTH: Final[int] = 64
MIN_SKILL_NAME_LENGTH: Final[int] = 1
SKILL_NAME_PATTERN: Final[re.Pattern[str]] = re.compile(r"^[a-z0-9]+(?:-[a-z0-9]+)*$")
def coerce_skill_paths(v: list[Any] | None) -> list[Any] | None:
"""Coerce string entries to Path objects, pass through other types.
Args:
v: List of skill paths or Skill objects, or None.
Returns:
The list with string entries converted to Path objects, or None.
"""
if not v:
return v
return [Path(item) if isinstance(item, str) else item for item in v]
def validate_directory_name(skill_dir: Path, skill_name: str) -> None:
"""Validate that a directory name matches the skill name.
Args:
skill_dir: Path to the skill directory.
skill_name: The declared skill name from frontmatter.
Raises:
ValueError: If the directory name does not match the skill name.
"""
dir_name = skill_dir.name
if dir_name != skill_name:
msg = f"Directory name '{dir_name}' does not match skill name '{skill_name}'"
raise ValueError(msg)

View File

@@ -100,7 +100,12 @@ class I18N(BaseModel):
def retrieve(
self,
kind: Literal[
"slices", "errors", "tools", "reasoning", "hierarchical_manager_agent", "memory"
"slices",
"errors",
"tools",
"reasoning",
"hierarchical_manager_agent",
"memory",
],
key: str,
) -> str:

View File

@@ -657,7 +657,10 @@ def _json_schema_to_pydantic_field(
A tuple of (type, Field) for use with create_model.
"""
type_ = _json_schema_to_pydantic_type(
json_schema, root_schema, name_=name.title(), enrich_descriptions=enrich_descriptions
json_schema,
root_schema,
name_=name.title(),
enrich_descriptions=enrich_descriptions,
)
is_required = name in required
@@ -806,7 +809,10 @@ def _json_schema_to_pydantic_type(
if ref:
ref_schema = _resolve_ref(ref, root_schema)
return _json_schema_to_pydantic_type(
ref_schema, root_schema, name_=name_, enrich_descriptions=enrich_descriptions
ref_schema,
root_schema,
name_=name_,
enrich_descriptions=enrich_descriptions,
)
enum_values = json_schema.get("enum")
@@ -835,12 +841,16 @@ def _json_schema_to_pydantic_type(
if all_of_schemas:
if len(all_of_schemas) == 1:
return _json_schema_to_pydantic_type(
all_of_schemas[0], root_schema, name_=name_,
all_of_schemas[0],
root_schema,
name_=name_,
enrich_descriptions=enrich_descriptions,
)
merged = _merge_all_of_schemas(all_of_schemas, root_schema)
return _json_schema_to_pydantic_type(
merged, root_schema, name_=name_,
merged,
root_schema,
name_=name_,
enrich_descriptions=enrich_descriptions,
)
@@ -858,7 +868,9 @@ def _json_schema_to_pydantic_type(
items_schema = json_schema.get("items")
if items_schema:
item_type = _json_schema_to_pydantic_type(
items_schema, root_schema, name_=name_,
items_schema,
root_schema,
name_=name_,
enrich_descriptions=enrich_descriptions,
)
return list[item_type] # type: ignore[valid-type]
@@ -870,7 +882,8 @@ def _json_schema_to_pydantic_type(
if json_schema_.get("title") is None:
json_schema_["title"] = name_ or "DynamicModel"
return create_model_from_schema(
json_schema_, root_schema=root_schema,
json_schema_,
root_schema=root_schema,
enrich_descriptions=enrich_descriptions,
)
return dict

View File

View File

@@ -0,0 +1,4 @@
---
name: Invalid--Name
description: This skill has an invalid name.
---

View File

@@ -0,0 +1,4 @@
---
name: minimal-skill
description: A minimal skill with only required fields.
---

View File

@@ -0,0 +1,22 @@
---
name: valid-skill
description: A complete test skill with all optional directories.
license: Apache-2.0
compatibility: crewai>=0.1.0
metadata:
author: test
version: "1.0"
allowed-tools: web-search file-read
---
## Instructions
This skill provides comprehensive instructions for the agent.
### Usage
Follow these steps to use the skill effectively.
### Notes
Additional context for the agent.

View File

@@ -0,0 +1 @@
{"key": "value"}

View File

@@ -0,0 +1,3 @@
# Reference Guide
This is a reference document for the skill.

View File

@@ -0,0 +1,2 @@
#!/bin/bash
echo "setup"

View File

@@ -0,0 +1,78 @@
"""Integration tests for the skills system."""
from pathlib import Path
import pytest
from crewai.skills.loader import activate_skill, discover_skills, format_skill_context
from crewai.skills.models import INSTRUCTIONS, METADATA
def _create_skill_dir(parent: Path, name: str, body: str = "Body.") -> Path:
"""Helper to create a skill directory with SKILL.md."""
skill_dir = parent / name
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
f"---\nname: {name}\ndescription: Skill {name}\n---\n{body}"
)
return skill_dir
class TestSkillDiscoveryAndActivation:
"""End-to-end tests for discover + activate workflow."""
def test_discover_and_activate(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "my-skill", body="Use this skill.")
skills = discover_skills(tmp_path)
assert len(skills) == 1
assert skills[0].disclosure_level == METADATA
activated = activate_skill(skills[0])
assert activated.disclosure_level == INSTRUCTIONS
assert activated.instructions == "Use this skill."
context = format_skill_context(activated)
assert "## Skill: my-skill" in context
assert "Use this skill." in context
def test_filter_by_skill_names(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "alpha")
_create_skill_dir(tmp_path, "beta")
_create_skill_dir(tmp_path, "gamma")
all_skills = discover_skills(tmp_path)
wanted = {"alpha", "gamma"}
filtered = [s for s in all_skills if s.name in wanted]
assert {s.name for s in filtered} == {"alpha", "gamma"}
def test_full_fixture_skill(self) -> None:
fixtures = Path(__file__).parent / "fixtures"
valid_dir = fixtures / "valid-skill"
if not valid_dir.exists():
pytest.skip("Fixture not found")
skills = discover_skills(fixtures)
valid_skills = [s for s in skills if s.name == "valid-skill"]
assert len(valid_skills) == 1
skill = valid_skills[0]
assert skill.frontmatter.license == "Apache-2.0"
assert skill.frontmatter.allowed_tools == ["web-search", "file-read"]
activated = activate_skill(skill)
assert "Instructions" in (activated.instructions or "")
def test_multiple_search_paths(self, tmp_path: Path) -> None:
path_a = tmp_path / "a"
path_a.mkdir()
_create_skill_dir(path_a, "skill-a")
path_b = tmp_path / "b"
path_b.mkdir()
_create_skill_dir(path_b, "skill-b")
all_skills = []
for search_path in [path_a, path_b]:
all_skills.extend(discover_skills(search_path))
names = {s.name for s in all_skills}
assert names == {"skill-a", "skill-b"}

View File

@@ -0,0 +1,161 @@
"""Tests for skills/loader.py."""
from pathlib import Path
import pytest
from crewai.skills.loader import (
activate_skill,
discover_skills,
format_skill_context,
load_resources,
)
from crewai.skills.models import INSTRUCTIONS, METADATA, RESOURCES, Skill, SkillFrontmatter
from crewai.skills.parser import load_skill_metadata
def _create_skill_dir(parent: Path, name: str, body: str = "Body.") -> Path:
"""Helper to create a skill directory with SKILL.md."""
skill_dir = parent / name
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
f"---\nname: {name}\ndescription: Skill {name}\n---\n{body}"
)
return skill_dir
class TestDiscoverSkills:
"""Tests for discover_skills."""
def test_finds_valid_skills(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "alpha")
_create_skill_dir(tmp_path, "beta")
skills = discover_skills(tmp_path)
names = {s.name for s in skills}
assert names == {"alpha", "beta"}
def test_skips_dirs_without_skill_md(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "valid")
(tmp_path / "no-skill").mkdir()
skills = discover_skills(tmp_path)
assert len(skills) == 1
assert skills[0].name == "valid"
def test_skips_invalid_skills(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "good-skill")
bad_dir = tmp_path / "bad-skill"
bad_dir.mkdir()
(bad_dir / "SKILL.md").write_text(
"---\nname: Wrong-Name\ndescription: bad\n---\n"
)
skills = discover_skills(tmp_path)
assert len(skills) == 1
def test_empty_directory(self, tmp_path: Path) -> None:
skills = discover_skills(tmp_path)
assert skills == []
def test_nonexistent_path(self, tmp_path: Path) -> None:
with pytest.raises(FileNotFoundError):
discover_skills(tmp_path / "nonexistent")
def test_sorted_by_name(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "zebra")
_create_skill_dir(tmp_path, "alpha")
skills = discover_skills(tmp_path)
assert [s.name for s in skills] == ["alpha", "zebra"]
class TestActivateSkill:
"""Tests for activate_skill."""
def test_promotes_to_instructions(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "my-skill", body="Instructions.")
skill = load_skill_metadata(tmp_path / "my-skill")
activated = activate_skill(skill)
assert activated.disclosure_level == INSTRUCTIONS
assert activated.instructions == "Instructions."
def test_idempotent(self, tmp_path: Path) -> None:
_create_skill_dir(tmp_path, "my-skill")
skill = load_skill_metadata(tmp_path / "my-skill")
activated = activate_skill(skill)
again = activate_skill(activated)
assert again is activated
class TestLoadResources:
"""Tests for load_resources."""
def test_promotes_to_resources(self, tmp_path: Path) -> None:
skill_dir = _create_skill_dir(tmp_path, "my-skill")
(skill_dir / "scripts").mkdir()
(skill_dir / "scripts" / "run.sh").write_text("#!/bin/bash")
skill = load_skill_metadata(skill_dir)
full = load_resources(skill)
assert full.disclosure_level == RESOURCES
class TestFormatSkillContext:
"""Tests for format_skill_context."""
def test_metadata_level(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="A skill")
skill = Skill(
frontmatter=fm, path=tmp_path, disclosure_level=METADATA
)
ctx = format_skill_context(skill)
assert "## Skill: test-skill" in ctx
assert "A skill" in ctx
def test_instructions_level(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="A skill")
skill = Skill(
frontmatter=fm,
path=tmp_path,
disclosure_level=INSTRUCTIONS,
instructions="Do these things.",
)
ctx = format_skill_context(skill)
assert "## Skill: test-skill" in ctx
assert "Do these things." in ctx
def test_no_instructions_at_instructions_level(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="A skill")
skill = Skill(
frontmatter=fm,
path=tmp_path,
disclosure_level=INSTRUCTIONS,
instructions=None,
)
ctx = format_skill_context(skill)
assert ctx == "## Skill: test-skill\nA skill"
def test_resources_level(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="A skill")
skill = Skill(
frontmatter=fm,
path=tmp_path,
disclosure_level=RESOURCES,
instructions="Do things.",
resource_files={
"scripts": ["run.sh"],
"assets": ["data.json", "config.yaml"],
},
)
ctx = format_skill_context(skill)
assert "### Available Resources" in ctx
assert "**assets/**: data.json, config.yaml" in ctx
assert "**scripts/**: run.sh" in ctx
def test_resources_level_empty_files(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="A skill")
skill = Skill(
frontmatter=fm,
path=tmp_path,
disclosure_level=RESOURCES,
instructions="Do things.",
resource_files={},
)
ctx = format_skill_context(skill)
assert "### Available Resources" not in ctx

View File

@@ -0,0 +1,91 @@
"""Tests for skills/models.py."""
from pathlib import Path
import pytest
from crewai.skills.models import (
INSTRUCTIONS,
METADATA,
RESOURCES,
Skill,
SkillFrontmatter,
)
class TestDisclosureLevel:
"""Tests for DisclosureLevel constants."""
def test_ordering(self) -> None:
assert METADATA < INSTRUCTIONS
assert INSTRUCTIONS < RESOURCES
def test_values(self) -> None:
assert METADATA == 1
assert INSTRUCTIONS == 2
assert RESOURCES == 3
class TestSkillFrontmatter:
"""Tests for SkillFrontmatter model."""
def test_required_fields(self) -> None:
fm = SkillFrontmatter(name="my-skill", description="A test skill")
assert fm.name == "my-skill"
assert fm.description == "A test skill"
assert fm.license is None
assert fm.metadata is None
assert fm.allowed_tools is None
def test_all_fields(self) -> None:
fm = SkillFrontmatter(
name="web-search",
description="Search the web",
license="Apache-2.0",
compatibility="crewai>=0.1.0",
metadata={"author": "test"},
allowed_tools=["browser"],
)
assert fm.license == "Apache-2.0"
assert fm.metadata == {"author": "test"}
assert fm.allowed_tools == ["browser"]
def test_frozen(self) -> None:
fm = SkillFrontmatter(name="my-skill", description="desc")
with pytest.raises(Exception):
fm.name = "other" # type: ignore[misc]
def test_invalid_name_rejected(self) -> None:
with pytest.raises(ValueError):
SkillFrontmatter(name="Invalid--Name", description="bad")
class TestSkill:
"""Tests for Skill model."""
def test_properties(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="desc")
skill = Skill(frontmatter=fm, path=tmp_path / "test-skill")
assert skill.name == "test-skill"
assert skill.description == "desc"
assert skill.disclosure_level == METADATA
def test_resource_dirs(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "test-skill"
skill_dir.mkdir()
fm = SkillFrontmatter(name="test-skill", description="desc")
skill = Skill(frontmatter=fm, path=skill_dir)
assert skill.scripts_dir == skill_dir / "scripts"
assert skill.references_dir == skill_dir / "references"
assert skill.assets_dir == skill_dir / "assets"
def test_with_disclosure_level(self, tmp_path: Path) -> None:
fm = SkillFrontmatter(name="test-skill", description="desc")
skill = Skill(frontmatter=fm, path=tmp_path)
promoted = skill.with_disclosure_level(
INSTRUCTIONS,
instructions="Do this.",
)
assert promoted.disclosure_level == INSTRUCTIONS
assert promoted.instructions == "Do this."
assert skill.disclosure_level == METADATA

View File

@@ -0,0 +1,167 @@
"""Tests for skills/parser.py."""
from pathlib import Path
import pytest
from crewai.skills.models import INSTRUCTIONS, METADATA, RESOURCES
from crewai.skills.parser import (
SkillParseError,
load_skill_instructions,
load_skill_metadata,
load_skill_resources,
parse_frontmatter,
parse_skill_md,
)
class TestParseFrontmatter:
"""Tests for parse_frontmatter."""
def test_valid_frontmatter_and_body(self) -> None:
content = "---\nname: test\ndescription: A test\n---\n\nBody text here."
fm, body = parse_frontmatter(content)
assert fm["name"] == "test"
assert fm["description"] == "A test"
assert body == "Body text here."
def test_empty_body(self) -> None:
content = "---\nname: test\ndescription: A test\n---"
fm, body = parse_frontmatter(content)
assert fm["name"] == "test"
assert body == ""
def test_missing_opening_delimiter(self) -> None:
with pytest.raises(SkillParseError, match="must start with"):
parse_frontmatter("name: test\n---\nBody")
def test_missing_closing_delimiter(self) -> None:
with pytest.raises(SkillParseError, match="missing closing"):
parse_frontmatter("---\nname: test\n")
def test_invalid_yaml(self) -> None:
with pytest.raises(SkillParseError, match="Invalid YAML"):
parse_frontmatter("---\n: :\n bad: [yaml\n---\nBody")
def test_triple_dash_in_body(self) -> None:
content = "---\nname: test\ndescription: desc\n---\n\nBody with --- inside."
fm, body = parse_frontmatter(content)
assert "---" in body
def test_inline_triple_dash_in_yaml_value(self) -> None:
content = '---\nname: test\ndescription: "Use---carefully"\n---\n\nBody.'
fm, body = parse_frontmatter(content)
assert fm["description"] == "Use---carefully"
assert body == "Body."
def test_unicode_content(self) -> None:
content = "---\nname: test\ndescription: Beschreibung\n---\n\nUnicode: \u00e4\u00f6\u00fc\u00df"
fm, body = parse_frontmatter(content)
assert fm["description"] == "Beschreibung"
assert "\u00e4\u00f6\u00fc\u00df" in body
def test_non_mapping_frontmatter(self) -> None:
with pytest.raises(SkillParseError, match="must be a YAML mapping"):
parse_frontmatter("---\n- item1\n- item2\n---\nBody")
class TestParseSkillMd:
"""Tests for parse_skill_md."""
def test_valid_file(self, tmp_path: Path) -> None:
skill_md = tmp_path / "SKILL.md"
skill_md.write_text(
"---\nname: my-skill\ndescription: desc\n---\nInstructions here."
)
fm, body = parse_skill_md(skill_md)
assert fm.name == "my-skill"
assert body == "Instructions here."
def test_file_not_found(self, tmp_path: Path) -> None:
with pytest.raises(FileNotFoundError):
parse_skill_md(tmp_path / "nonexistent" / "SKILL.md")
class TestLoadSkillMetadata:
"""Tests for load_skill_metadata."""
def test_valid_skill(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test skill\n---\nBody"
)
skill = load_skill_metadata(skill_dir)
assert skill.name == "my-skill"
assert skill.disclosure_level == METADATA
assert skill.instructions is None
def test_directory_name_mismatch(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "wrong-name"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test skill\n---\n"
)
with pytest.raises(ValueError, match="does not match"):
load_skill_metadata(skill_dir)
class TestLoadSkillInstructions:
"""Tests for load_skill_instructions."""
def test_promotes_to_instructions(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test\n---\nFull body."
)
skill = load_skill_metadata(skill_dir)
promoted = load_skill_instructions(skill)
assert promoted.disclosure_level == INSTRUCTIONS
assert promoted.instructions == "Full body."
def test_idempotent(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test\n---\nBody."
)
skill = load_skill_metadata(skill_dir)
promoted = load_skill_instructions(skill)
again = load_skill_instructions(promoted)
assert again is promoted
class TestLoadSkillResources:
"""Tests for load_skill_resources."""
def test_catalogs_resources(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test\n---\nBody."
)
(skill_dir / "scripts").mkdir()
(skill_dir / "scripts" / "run.sh").write_text("#!/bin/bash")
(skill_dir / "assets").mkdir()
(skill_dir / "assets" / "data.json").write_text("{}")
skill = load_skill_metadata(skill_dir)
full = load_skill_resources(skill)
assert full.disclosure_level == RESOURCES
assert full.instructions == "Body."
assert full.resource_files is not None
assert "scripts" in full.resource_files
assert "run.sh" in full.resource_files["scripts"]
assert "assets" in full.resource_files
assert "data.json" in full.resource_files["assets"]
def test_no_resource_dirs(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: Test\n---\nBody."
)
skill = load_skill_metadata(skill_dir)
full = load_skill_resources(skill)
assert full.resource_files == {}

View File

@@ -0,0 +1,93 @@
"""Tests for skills validation."""
from pathlib import Path
import pytest
from crewai.skills.models import SkillFrontmatter
from crewai.skills.validation import (
MAX_SKILL_NAME_LENGTH,
validate_directory_name,
)
def _make(name: str) -> SkillFrontmatter:
"""Create a SkillFrontmatter with the given name."""
return SkillFrontmatter(name=name, description="desc")
class TestSkillNameValidation:
"""Tests for skill name constraints via SkillFrontmatter."""
def test_simple_name(self) -> None:
assert _make("web-search").name == "web-search"
def test_single_word(self) -> None:
assert _make("search").name == "search"
def test_numeric(self) -> None:
assert _make("tool3").name == "tool3"
def test_all_digits(self) -> None:
assert _make("123").name == "123"
def test_single_char(self) -> None:
assert _make("a").name == "a"
def test_max_length(self) -> None:
name = "a" * MAX_SKILL_NAME_LENGTH
assert _make(name).name == name
def test_multi_hyphen_segments(self) -> None:
assert _make("my-cool-skill").name == "my-cool-skill"
def test_empty_raises(self) -> None:
with pytest.raises(ValueError):
_make("")
def test_too_long_raises(self) -> None:
with pytest.raises(ValueError):
_make("a" * (MAX_SKILL_NAME_LENGTH + 1))
def test_uppercase_raises(self) -> None:
with pytest.raises(ValueError):
_make("MySkill")
def test_leading_hyphen_raises(self) -> None:
with pytest.raises(ValueError):
_make("-skill")
def test_trailing_hyphen_raises(self) -> None:
with pytest.raises(ValueError):
_make("skill-")
def test_consecutive_hyphens_raises(self) -> None:
with pytest.raises(ValueError):
_make("my--skill")
def test_underscore_raises(self) -> None:
with pytest.raises(ValueError):
_make("my_skill")
def test_space_raises(self) -> None:
with pytest.raises(ValueError):
_make("my skill")
def test_special_chars_raises(self) -> None:
with pytest.raises(ValueError):
_make("skill@v1")
class TestValidateDirectoryName:
"""Tests for validate_directory_name."""
def test_matching_names(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
validate_directory_name(skill_dir, "my-skill")
def test_mismatched_names(self, tmp_path: Path) -> None:
skill_dir = tmp_path / "other-name"
skill_dir.mkdir()
with pytest.raises(ValueError, match="does not match"):
validate_directory_name(skill_dir, "my-skill")

2
uv.lock generated
View File

@@ -1115,6 +1115,7 @@ dependencies = [
{ name = "pydantic-settings" },
{ name = "pyjwt" },
{ name = "python-dotenv" },
{ name = "pyyaml" },
{ name = "regex" },
{ name = "textual" },
{ name = "tokenizers" },
@@ -1222,6 +1223,7 @@ requires-dist = [
{ name = "pydantic-settings", specifier = "~=2.10.1" },
{ name = "pyjwt", specifier = ">=2.9.0,<3" },
{ name = "python-dotenv", specifier = "~=1.1.1" },
{ name = "pyyaml", specifier = "~=6.0" },
{ name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" },
{ name = "regex", specifier = "~=2026.1.15" },
{ name = "textual", specifier = ">=7.5.0" },