From 4867dced0e8ef81e3e991565f6699fef512f9ad2 Mon Sep 17 00:00:00 2001 From: tonykipkemboi Date: Wed, 20 Aug 2025 10:51:51 -0400 Subject: [PATCH] docs: fix API Reference OpenAPI sources and redirects; clarify training data usage; add Mermaid diagram; correct CLI usage and notes --- docs/docs.json | 22 +++++++-- docs/en/concepts/training.mdx | 85 ++++++++++++++++++++++++++++++----- 2 files changed, 94 insertions(+), 13 deletions(-) diff --git a/docs/docs.json b/docs/docs.json index 907c9d174..f68eba3f1 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -345,7 +345,7 @@ }, { "group": "Endpoints", - "openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml" + "openapi": "enterprise-api.en.yaml" } ] }, @@ -684,7 +684,7 @@ }, { "group": "Endpoints", - "openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml" + "openapi": "enterprise-api.pt-BR.yaml" } ] }, @@ -1030,7 +1030,7 @@ }, { "group": "Endpoints", - "openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml" + "openapi": "enterprise-api.ko.yaml" } ] }, @@ -1081,6 +1081,10 @@ "indexing": "all" }, "redirects": [ + { + "source": "/api-reference", + "destination": "/en/api-reference/introduction" + }, { "source": "/introduction", "destination": "/en/introduction" @@ -1133,6 +1137,18 @@ "source": "/api-reference/:path*", "destination": "/en/api-reference/:path*" }, + { + "source": "/en/api-reference", + "destination": "/en/api-reference/introduction" + }, + { + "source": "/pt-BR/api-reference", + "destination": "/pt-BR/api-reference/introduction" + }, + { + "source": "/ko/api-reference", + "destination": "/ko/api-reference/introduction" + }, { "source": "/examples/:path*", "destination": "/en/examples/:path*" diff --git a/docs/en/concepts/training.mdx b/docs/en/concepts/training.mdx index a6a3fd447..a70506ab7 100644 --- a/docs/en/concepts/training.mdx +++ b/docs/en/concepts/training.mdx @@ -21,13 +21,17 @@ To use the training feature, follow these steps: 3. Run the following command: ```shell -crewai train -n (optional) +crewai train -n -f ``` Replace `` with the desired number of training iterations and `` with the appropriate filename ending with `.pkl`. -### Training Your Crew Programmatically + + If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written. + + +### Training your Crew programmatically To train your crew programmatically, use the following steps: @@ -51,19 +55,65 @@ except Exception as e: raise Exception(f"An error occurred while training the crew: {e}") ``` -### Key Points to Note +## How trained data is used by agents -- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met. -- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met. -- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user. +CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions. -It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration. +### Training data flow -Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights. +```mermaid +flowchart TD + A["Start training
CLI: crewai train -n -f
or Python: crew.train(...)"] --> B["Setup training mode
- task.human_input = true
- disable delegation
- init training_data.pkl + trained file"] -Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field. + subgraph "Iterations" + direction LR + C["Iteration i
initial_output"] --> D["User human_feedback"] + D --> E["improved_output"] + E --> F["Append to training_data.pkl
by agent_id and iteration"] + end -Happy training with CrewAI! 🚀 + B --> C + F --> G{"More iterations?"} + G -- "Yes" --> C + G -- "No" --> H["Evaluate per agent
aggregate iterations"] + + H --> I["Consolidate
suggestions[] + quality + final_summary"] + I --> J["Save by agent role to trained file
(default: trained_agents_data.pkl)"] + + J --> K["Normal (non-training) runs"] + K --> L["Auto-load suggestions
from trained_agents_data.pkl"] + L --> M["Append to prompt
for consistent improvements"] +``` + +### During training runs + +- On each iteration, the system records for every agent: + - `initial_output`: the agent’s first answer + - `human_feedback`: your inline feedback when prompted + - `improved_output`: the agent’s follow-up answer after feedback +- This data is stored in a working file named `training_data.pkl` keyed by the agent’s internal ID and iteration. +- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session. + Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input. + +### After training completes + +- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing: + - `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs + - `quality`: a 0–10 score capturing improvement + - `final_summary`: a step-by-step set of action items for future tasks +- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agent’s `role` so they can be applied across sessions. +- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions. + +### File summary + +- `training_data.pkl` (ephemeral, per-session): + - Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }` + - Purpose: capture raw data and human feedback during training + - Location: saved in the current working directory (CWD) +- `trained_agents_data.pkl` (or your custom filename): + - Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }` + - Purpose: persist consolidated guidance for future runs + - Location: written to the CWD by default; use `-f` to set a custom (including absolute) path ## Small Language Model Considerations @@ -129,3 +179,18 @@ Happy training with CrewAI! 🚀 + +### Key Points to Note + +- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met. +- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met. +- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user. +- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration. +- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code. +- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD. + +It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration. + +Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights. + +Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.