mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
Merge branch 'main' into feat/remove-langchain
This commit is contained in:
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
|
||||
8
.github/workflows/mkdocs.yml
vendored
8
.github/workflows/mkdocs.yml
vendored
@@ -13,10 +13,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
@@ -42,4 +42,4 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and deploy MkDocs
|
||||
run: mkdocs gh-deploy --force
|
||||
run: mkdocs gh-deploy --force
|
||||
|
||||
2
.github/workflows/security-checker.yml
vendored
2
.github/workflows/security-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
2
.github/workflows/type-checker.yml
vendored
2
.github/workflows/type-checker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ researcher:
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -205,7 +205,7 @@ class LatestAiDevelopmentCrew():
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**main.py**
|
||||
@@ -357,7 +357,7 @@ uv run pytest .
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
uvx mypy
|
||||
uvx mypy src
|
||||
```
|
||||
|
||||
### Packaging
|
||||
|
||||
@@ -11,10 +11,12 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
@@ -124,7 +126,7 @@ class Agent(BaseAgent):
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
unnacepted_attributes = [
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
@@ -158,28 +160,23 @@ class Agent(BaseAgent):
|
||||
for provider, env_vars in ENV_VARS.items():
|
||||
if provider == set_provider:
|
||||
for env_var in env_vars:
|
||||
if env_var["key_name"] in unnacepted_attributes:
|
||||
continue
|
||||
# Check if the environment variable is set
|
||||
if "key_name" in env_var:
|
||||
env_value = os.environ.get(env_var["key_name"])
|
||||
key_name = env_var.get("key_name")
|
||||
if key_name and key_name not in unaccepted_attributes:
|
||||
env_value = os.environ.get(key_name)
|
||||
if env_value:
|
||||
# Map key names containing "API_KEY" to "api_key"
|
||||
key_name = (
|
||||
"api_key"
|
||||
if "API_KEY" in env_var["key_name"]
|
||||
else env_var["key_name"]
|
||||
"api_key" if "API_KEY" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_BASE" to "api_base"
|
||||
key_name = (
|
||||
"api_base"
|
||||
if "API_BASE" in env_var["key_name"]
|
||||
else key_name
|
||||
"api_base" if "API_BASE" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_VERSION" to "api_version"
|
||||
key_name = (
|
||||
"api_version"
|
||||
if "API_VERSION" in env_var["key_name"]
|
||||
if "API_VERSION" in key_name
|
||||
else key_name
|
||||
)
|
||||
llm_params[key_name] = env_value
|
||||
@@ -237,7 +234,7 @@ class Agent(BaseAgent):
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
@@ -256,6 +253,22 @@ class Agent(BaseAgent):
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
# If the task requires output in JSON or Pydantic format,
|
||||
# append specific instructions to the task prompt to ensure
|
||||
# that the final answer does not include any code block markers
|
||||
if task.output_json or task.output_pydantic:
|
||||
# Generate the schema based on the output format
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
|
||||
@@ -279,9 +279,7 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
@@ -21,7 +21,8 @@
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared."
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Optional, Type, Union
|
||||
from typing import Any, Optional, Type, Union, get_args, get_origin
|
||||
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
@@ -214,3 +214,38 @@ def create_converter(
|
||||
raise Exception("No output converter found or set.")
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def generate_model_description(model: Type[BaseModel]) -> str:
|
||||
"""
|
||||
Generate a string description of a Pydantic model's fields and their types.
|
||||
|
||||
This function takes a Pydantic model class and returns a string that describes
|
||||
the model's fields and their respective types. The description includes handling
|
||||
of complex types such as `Optional`, `List`, and `Dict`, as well as nested Pydantic
|
||||
models.
|
||||
"""
|
||||
|
||||
def describe_field(field_type):
|
||||
origin = get_origin(field_type)
|
||||
args = get_args(field_type)
|
||||
|
||||
if origin is Union and type(None) in args:
|
||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||
return f"Optional[{describe_field(non_none_args[0])}]"
|
||||
elif origin is list:
|
||||
return f"List[{describe_field(args[0])}]"
|
||||
elif origin is dict:
|
||||
key_type = describe_field(args[0])
|
||||
value_type = describe_field(args[1])
|
||||
return f"Dict[{key_type}, {value_type}]"
|
||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||
return generate_model_description(field_type)
|
||||
else:
|
||||
return field_type.__name__
|
||||
|
||||
fields = model.__annotations__
|
||||
field_descriptions = [
|
||||
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
|
||||
]
|
||||
return "{\n " + ",\n ".join(field_descriptions) + "\n}"
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import json
|
||||
from typing import Dict, List, Optional
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
@@ -9,12 +12,11 @@ from crewai.utilities.converter import (
|
||||
convert_to_model,
|
||||
convert_with_instructions,
|
||||
create_converter,
|
||||
generate_model_description,
|
||||
get_conversion_instructions,
|
||||
handle_partial_json,
|
||||
validate_model,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
@@ -269,3 +271,45 @@ def test_create_converter_fails_without_agent_or_converter_cls():
|
||||
create_converter(
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
|
||||
)
|
||||
|
||||
|
||||
def test_generate_model_description_simple_model():
|
||||
description = generate_model_description(SimpleModel)
|
||||
expected_description = '{\n "name": str,\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_nested_model():
|
||||
description = generate_model_description(NestedModel)
|
||||
expected_description = (
|
||||
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
|
||||
)
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_optional_field():
|
||||
class ModelWithOptionalField(BaseModel):
|
||||
name: Optional[str]
|
||||
age: int
|
||||
|
||||
description = generate_model_description(ModelWithOptionalField)
|
||||
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_list_field():
|
||||
class ModelWithListField(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
description = generate_model_description(ModelWithListField)
|
||||
expected_description = '{\n "items": List[int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_dict_field():
|
||||
class ModelWithDictField(BaseModel):
|
||||
attributes: Dict[str, int]
|
||||
|
||||
description = generate_model_description(ModelWithDictField)
|
||||
expected_description = '{\n "attributes": Dict[str, int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
Reference in New Issue
Block a user