From c8ec03424a30d5721aad6d75ac8cfe3c54314699 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Tue, 6 May 2025 22:07:57 +0300 Subject: [PATCH] Fix typos in documentation and configuration files (#2712) * Update test_lite_agent_structured_output.yaml * Update install_crew.py * Update llms.mdx --------- Co-authored-by: Lucas Gomide --- docs/concepts/llms.mdx | 2 +- src/crewai/cli/install_crew.py | 2 +- tests/cassettes/test_lite_agent_structured_output.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/concepts/llms.mdx b/docs/concepts/llms.mdx index cefc2705a..643ebfe16 100644 --- a/docs/concepts/llms.mdx +++ b/docs/concepts/llms.mdx @@ -378,7 +378,7 @@ In this section, you'll find detailed examples that help you select, configure, | microsoft/phi-3-medium-4k-instruct | 4,096 tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. | | microsoft/phi-3-medium-128k-instruct | 128K tokens | Lightweight, state-of-the-art open LLM with strong math and logical reasoning skills. | | microsoft/phi-3.5-mini-instruct | 128K tokens | Lightweight multilingual LLM powering AI applications in latency bound, memory/compute constrained environments | - | microsoft/phi-3.5-moe-instruct | 128K tokens | Advanced LLM based on Mixture of Experts architecure to deliver compute efficient content generation | + | microsoft/phi-3.5-moe-instruct | 128K tokens | Advanced LLM based on Mixture of Experts architecture to deliver compute efficient content generation | | microsoft/kosmos-2 | 1,024 tokens | Groundbreaking multimodal model designed to understand and reason about visual elements in images. | | microsoft/phi-3-vision-128k-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. | | microsoft/phi-3.5-vision-instruct | 128k tokens | Cutting-edge open multimodal model exceling in high-quality reasoning from images. | diff --git a/src/crewai/cli/install_crew.py b/src/crewai/cli/install_crew.py index 9491932f1..bd0f35879 100644 --- a/src/crewai/cli/install_crew.py +++ b/src/crewai/cli/install_crew.py @@ -4,7 +4,7 @@ import click # Be mindful about changing this. -# on some enviorments we don't use this command but instead uv sync directly +# on some environments we don't use this command but instead uv sync directly # so if you expect this to support more things you will need to replicate it there # ask @joaomdmoura if you are unsure def install_crew(proxy_options: list[str]) -> None: diff --git a/tests/cassettes/test_lite_agent_structured_output.yaml b/tests/cassettes/test_lite_agent_structured_output.yaml index de3885cdd..86718712f 100644 --- a/tests/cassettes/test_lite_agent_structured_output.yaml +++ b/tests/cassettes/test_lite_agent_structured_output.yaml @@ -16,7 +16,7 @@ interactions: answer MUST contain all the information requested in the following format: {\n \"summary\": str,\n \"confidence\": int\n}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."}, {"role": "user", - "content": "What is the population of Tokyo? Return your strucutred output in + "content": "What is the population of Tokyo? Return your structured output in JSON format with the following fields: summary, confidence"}], "model": "gpt-4o-mini", "stop": []}' headers: