mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 07:38:29 +00:00
Compare commits
1 Commits
lg-fix-age
...
devin/1746
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e128125428 |
25
.github/workflows/linter.yml
vendored
25
.github/workflows/linter.yml
vendored
@@ -5,29 +5,12 @@ on: [pull_request]
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch Target Branch
|
||||
run: git fetch origin $TARGET_BRANCH --depth=1
|
||||
|
||||
- name: Install Ruff
|
||||
run: pip install ruff
|
||||
|
||||
- name: Get Changed Python Files
|
||||
id: changed-files
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
merge_base=$(git merge-base origin/"$TARGET_BRANCH" HEAD)
|
||||
changed_files=$(git diff --name-only --diff-filter=ACMRTUB "$merge_base" | grep '\.py$' || true)
|
||||
echo "files<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$changed_files" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
pip install ruff
|
||||
|
||||
- name: Run Ruff on Changed Files
|
||||
if: ${{ steps.changed-files.outputs.files != '' }}
|
||||
run: |
|
||||
echo "${{ steps.changed-files.outputs.files }}" | tr " " "\n" | xargs -I{} ruff check "{}"
|
||||
- name: Run Ruff Linter
|
||||
run: ruff check
|
||||
|
||||
@@ -2,3 +2,8 @@ exclude = [
|
||||
"templates",
|
||||
"__init__.py",
|
||||
]
|
||||
|
||||
[lint]
|
||||
select = [
|
||||
"I", # isort rules
|
||||
]
|
||||
|
||||
@@ -700,11 +700,4 @@ recent_news = SpaceNewsKnowledgeSource(
|
||||
- Configure appropriate embedding models
|
||||
- Consider using local embedding providers for faster processing
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="One Time Knowledge">
|
||||
- With the typical file structure provided by CrewAI, knowledge sources are embedded every time the kickoff is triggered.
|
||||
- If the knowledge sources are large, this leads to inefficiency and increased latency, as the same data is embedded each time.
|
||||
- To resolve this, directly initialize the knowledge parameter instead of the knowledge_sources parameter.
|
||||
- Link to the issue to get complete idea [Github Issue](https://github.com/crewAIInc/crewAI/issues/2755)
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
@@ -20,7 +20,6 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.agent_utils import (
|
||||
get_tool_names,
|
||||
load_agent_from_repository,
|
||||
parse_tools,
|
||||
render_text_description_and_args,
|
||||
)
|
||||
@@ -135,16 +134,6 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="Knowledge search query for the agent dynamically generated by the agent.",
|
||||
)
|
||||
from_repository: Optional[str] = Field(
|
||||
default=None,
|
||||
description="The Agent's role to be used from your repository.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def validate_from_repository(cls, v):
|
||||
if v is not None and (from_repository := v.get("from_repository")):
|
||||
return load_agent_from_repository(from_repository) | v
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
|
||||
@@ -5,5 +5,5 @@ def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception("No token found, make sure you are logged in")
|
||||
raise Exception()
|
||||
return access_token
|
||||
|
||||
@@ -14,7 +14,6 @@ class PlusAPI:
|
||||
|
||||
TOOLS_RESOURCE = "/crewai_plus/api/v1/tools"
|
||||
CREWS_RESOURCE = "/crewai_plus/api/v1/crews"
|
||||
AGENTS_RESOURCE = "/crewai_plus/api/v1/agents"
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
self.api_key = api_key
|
||||
@@ -38,9 +37,6 @@ class PlusAPI:
|
||||
def get_tool(self, handle: str):
|
||||
return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}")
|
||||
|
||||
def get_agent(self, handle: str):
|
||||
return self._make_request("GET", f"{self.AGENTS_RESOURCE}/{handle}")
|
||||
|
||||
def publish_tool(
|
||||
self,
|
||||
handle: str,
|
||||
|
||||
@@ -52,7 +52,7 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.events.crew_events import (
|
||||
@@ -478,7 +478,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
separated by a synchronous task.
|
||||
"""
|
||||
for i, task in enumerate(self.tasks):
|
||||
if task.async_execution and isinstance(task.context, list):
|
||||
if task.async_execution and task.context:
|
||||
for context_task in task.context:
|
||||
if context_task.async_execution:
|
||||
for j in range(i - 1, -1, -1):
|
||||
@@ -496,7 +496,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
task_indices = {id(task): i for i, task in enumerate(self.tasks)}
|
||||
|
||||
for task in self.tasks:
|
||||
if isinstance(task.context, list):
|
||||
if task.context:
|
||||
for context_task in task.context:
|
||||
if id(context_task) not in task_indices:
|
||||
continue # Skip context tasks not in the main tasks list
|
||||
@@ -1034,14 +1034,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
|
||||
def _get_context(self, task: Task, task_outputs: List[TaskOutput]) -> str:
|
||||
if not task.context:
|
||||
return ""
|
||||
|
||||
def _get_context(self, task: Task, task_outputs: List[TaskOutput]):
|
||||
context = (
|
||||
aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
if task.context is NOT_SPECIFIED
|
||||
else aggregate_raw_outputs_from_tasks(task.context)
|
||||
aggregate_raw_outputs_from_tasks(task.context)
|
||||
if task.context
|
||||
else aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
)
|
||||
return context
|
||||
|
||||
@@ -1229,7 +1226,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
task_mapping[task.key] = cloned_task
|
||||
|
||||
for cloned_task, original_task in zip(cloned_tasks, self.tasks):
|
||||
if isinstance(original_task.context, list):
|
||||
if original_task.context:
|
||||
cloned_context = [
|
||||
task_mapping[context_task.key]
|
||||
for context_task in original_task.context
|
||||
|
||||
@@ -5,7 +5,8 @@ import sys
|
||||
import threading
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from contextlib import contextmanager, redirect_stderr, redirect_stdout
|
||||
from contextlib import contextmanager
|
||||
from types import SimpleNamespace
|
||||
from typing import (
|
||||
Any,
|
||||
DefaultDict,
|
||||
@@ -30,6 +31,7 @@ from crewai.utilities.events.llm_events import (
|
||||
LLMCallType,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
@@ -43,9 +45,6 @@ with warnings.catch_warnings():
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
|
||||
import io
|
||||
from typing import TextIO
|
||||
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
@@ -55,17 +54,12 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class FilteredStream(io.TextIOBase):
|
||||
_lock = None
|
||||
|
||||
def __init__(self, original_stream: TextIO):
|
||||
class FilteredStream:
|
||||
def __init__(self, original_stream):
|
||||
self._original_stream = original_stream
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def write(self, s: str) -> int:
|
||||
if not self._lock:
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def write(self, s) -> int:
|
||||
with self._lock:
|
||||
# Filter out extraneous messages from LiteLLM
|
||||
if (
|
||||
@@ -220,11 +214,15 @@ def suppress_warnings():
|
||||
)
|
||||
|
||||
# Redirect stdout and stderr
|
||||
with (
|
||||
redirect_stdout(FilteredStream(sys.stdout)),
|
||||
redirect_stderr(FilteredStream(sys.stderr)),
|
||||
):
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
sys.stdout = FilteredStream(old_stdout)
|
||||
sys.stderr = FilteredStream(old_stderr)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
|
||||
|
||||
class Delta(TypedDict):
|
||||
|
||||
@@ -2,6 +2,7 @@ import datetime
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
@@ -40,7 +41,6 @@ from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.constants import NOT_SPECIFIED
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.events import (
|
||||
TaskCompletedEvent,
|
||||
@@ -97,7 +97,7 @@ class Task(BaseModel):
|
||||
)
|
||||
context: Optional[List["Task"]] = Field(
|
||||
description="Other tasks that will have their output used as context for this task.",
|
||||
default=NOT_SPECIFIED,
|
||||
default=None,
|
||||
)
|
||||
async_execution: Optional[bool] = Field(
|
||||
description="Whether the task should be executed asynchronously or not.",
|
||||
@@ -321,8 +321,11 @@ class Task(BaseModel):
|
||||
@model_validator(mode="after")
|
||||
def check_tools(self):
|
||||
"""Check if the tools are set."""
|
||||
if not self.tools and self.agent and self.agent.tools:
|
||||
self.tools.extend(self.agent.tools)
|
||||
if self.agent and self.agent.tools:
|
||||
existing_tool_names = {tool.name for tool in self.tools}
|
||||
for tool in self.agent.tools:
|
||||
if tool.name not in existing_tool_names:
|
||||
self.tools.append(tool)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -643,7 +646,7 @@ class Task(BaseModel):
|
||||
|
||||
cloned_context = (
|
||||
[task_mapping[context_task.key] for context_task in self.context]
|
||||
if isinstance(self.context, list)
|
||||
if self.context
|
||||
else None
|
||||
)
|
||||
|
||||
|
||||
@@ -10,18 +10,6 @@ from contextlib import contextmanager
|
||||
from importlib.metadata import version
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
BatchSpanProcessor,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
from crewai.telemetry.constants import (
|
||||
CREWAI_TELEMETRY_BASE_URL,
|
||||
CREWAI_TELEMETRY_SERVICE_NAME,
|
||||
@@ -37,6 +25,18 @@ def suppress_warnings():
|
||||
yield
|
||||
|
||||
|
||||
from opentelemetry import trace # noqa: E402
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter, # noqa: E402
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource # noqa: E402
|
||||
from opentelemetry.sdk.trace import TracerProvider # noqa: E402
|
||||
from opentelemetry.sdk.trace.export import ( # noqa: E402
|
||||
BatchSpanProcessor,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.trace import Span, Status, StatusCode # noqa: E402
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
@@ -232,7 +232,7 @@ class Telemetry:
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if isinstance(task.context, list)
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
@@ -748,7 +748,7 @@ class Telemetry:
|
||||
"agent_key": task.agent.key if task.agent else None,
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if isinstance(task.context, list)
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
|
||||
@@ -16,7 +16,6 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.tools.tool_types import ToolResult
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.errors import AgentRepositoryError
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -429,41 +428,3 @@ def show_agent_logs(
|
||||
printer.print(
|
||||
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
|
||||
)
|
||||
|
||||
|
||||
def load_agent_from_repository(from_repository: str) -> Dict[str, Any]:
|
||||
attributes: Dict[str, Any] = {}
|
||||
if from_repository:
|
||||
import importlib
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
|
||||
client = PlusAPI(api_key=get_auth_token())
|
||||
response = client.get_agent(from_repository)
|
||||
if response.status_code == 404:
|
||||
raise AgentRepositoryError(
|
||||
f"Agent {from_repository} does not exist, make sure the name is correct or the agent is available on your organization"
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise AgentRepositoryError(
|
||||
f"Agent {from_repository} could not be loaded: {response.text}"
|
||||
)
|
||||
|
||||
agent = response.json()
|
||||
for key, value in agent.items():
|
||||
if key == "tools":
|
||||
attributes[key] = []
|
||||
for tool in value:
|
||||
try:
|
||||
module = importlib.import_module("crewai_tools")
|
||||
tool_class = getattr(module, tool["name"])
|
||||
attributes[key].append(tool_class())
|
||||
except Exception as e:
|
||||
raise AgentRepositoryError(
|
||||
f"Tool {tool['name']} could not be loaded: {e}"
|
||||
) from e
|
||||
else:
|
||||
attributes[key] = value
|
||||
return attributes
|
||||
|
||||
@@ -5,14 +5,3 @@ KNOWLEDGE_DIRECTORY = "knowledge"
|
||||
MAX_LLM_RETRY = 3
|
||||
MAX_FILE_NAME_LENGTH = 255
|
||||
EMITTER_COLOR = "bold_blue"
|
||||
|
||||
|
||||
class _NotSpecified:
|
||||
def __repr__(self):
|
||||
return "NOT_SPECIFIED"
|
||||
|
||||
|
||||
# Sentinel value used to detect when no value has been explicitly provided.
|
||||
# Unlike `None`, which might be a valid value from the user, `NOT_SPECIFIED` allows
|
||||
# us to distinguish between "not passed at all" and "explicitly passed None" or "[]".
|
||||
NOT_SPECIFIED = _NotSpecified()
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
"""Error message definitions for CrewAI database operations."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@@ -38,9 +37,3 @@ class DatabaseError:
|
||||
The formatted error message
|
||||
"""
|
||||
return template.format(str(error))
|
||||
|
||||
|
||||
class AgentRepositoryError(Exception):
|
||||
"""Exception raised when an agent repository is not found."""
|
||||
|
||||
...
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import re
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -17,11 +17,6 @@ def aggregate_raw_outputs_from_task_outputs(task_outputs: List["TaskOutput"]) ->
|
||||
|
||||
def aggregate_raw_outputs_from_tasks(tasks: List["Task"]) -> str:
|
||||
"""Generate string context from the tasks."""
|
||||
|
||||
task_outputs = (
|
||||
[task.output for task in tasks if task.output is not None]
|
||||
if isinstance(tasks, list)
|
||||
else []
|
||||
)
|
||||
task_outputs = [task.output for task in tasks if task.output is not None]
|
||||
|
||||
return aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import os
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -18,7 +18,6 @@ from crewai.tools import tool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
from crewai.utilities.errors import AgentRepositoryError
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
|
||||
|
||||
@@ -309,7 +308,9 @@ def test_cache_hitting():
|
||||
def handle_tool_end(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
with (patch.object(CacheHandler, "read") as read,):
|
||||
with (
|
||||
patch.object(CacheHandler, "read") as read,
|
||||
):
|
||||
read.return_value = "0"
|
||||
task = Task(
|
||||
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool, you must use the tool.",
|
||||
@@ -1039,7 +1040,7 @@ def test_agent_human_input():
|
||||
CrewAgentExecutor,
|
||||
"_invoke_loop",
|
||||
return_value=AgentFinish(output="Hello", thought="", text=""),
|
||||
),
|
||||
) as mock_invoke_loop,
|
||||
):
|
||||
# Execute the task
|
||||
output = agent.execute_task(task)
|
||||
@@ -2024,99 +2025,3 @@ def test_get_knowledge_search_query():
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_get_auth_token():
|
||||
with patch(
|
||||
"crewai.cli.authentication.token.get_auth_token", return_value="test_token"
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
mock_get_response.json.return_value = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [{"name": "SerperDevTool"}],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
agent = Agent(from_repository="test_agent")
|
||||
|
||||
assert agent.role == "test role"
|
||||
assert agent.goal == "test goal"
|
||||
assert agent.backstory == "test backstory"
|
||||
assert len(agent.tools) == 1
|
||||
assert isinstance(agent.tools[0], SerperDevTool)
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth_token):
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
mock_get_response.json.return_value = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [{"name": "SerperDevTool"}],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
agent = Agent(from_repository="test_agent", role="Custom Role")
|
||||
|
||||
assert agent.role == "Custom Role"
|
||||
assert agent.goal == "test goal"
|
||||
assert agent.backstory == "test backstory"
|
||||
assert len(agent.tools) == 1
|
||||
assert isinstance(agent.tools[0], SerperDevTool)
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository_with_invalid_tools(mock_get_agent, mock_get_auth_token):
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 200
|
||||
mock_get_response.json.return_value = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"tools": [{"name": "DoesNotExist"}],
|
||||
}
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
with pytest.raises(
|
||||
AgentRepositoryError,
|
||||
match="Tool DoesNotExist could not be loaded: module 'crewai_tools' has no attribute 'DoesNotExist'",
|
||||
):
|
||||
Agent(from_repository="test_agent")
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository_internal_error(mock_get_agent, mock_get_auth_token):
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 500
|
||||
mock_get_response.text = "Internal server error"
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
with pytest.raises(
|
||||
AgentRepositoryError,
|
||||
match="Agent test_agent could not be loaded: Internal server error",
|
||||
):
|
||||
Agent(from_repository="test_agent")
|
||||
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
|
||||
def test_agent_from_repository_agent_not_found(mock_get_agent, mock_get_auth_token):
|
||||
mock_get_response = MagicMock()
|
||||
mock_get_response.status_code = 404
|
||||
mock_get_response.text = "Agent not found"
|
||||
mock_get_agent.return_value = mock_get_response
|
||||
with pytest.raises(
|
||||
AgentRepositoryError,
|
||||
match="Agent test_agent does not exist, make sure the name is correct or the agent is available on your organization",
|
||||
):
|
||||
Agent(from_repository="test_agent")
|
||||
|
||||
520
tests/cassettes/test_agent_human_input.yaml
Normal file
520
tests/cassettes/test_agent_human_input.yaml
Normal file
@@ -0,0 +1,520 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: !!binary |
|
||||
CqcXCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS/hYKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRJ5ChBuJJtOdNaB05mOW/p3915eEgj2tkAd3rZcASoQVG9vbCBVc2FnZSBFcnJvcjAB
|
||||
OYa7/URvKBUYQUpcFEVvKBUYShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuODYuMEoPCgNsbG0SCAoG
|
||||
Z3B0LTRvegIYAYUBAAEAABLJBwoQifhX01E5i+5laGdALAlZBBIIBuGM1aN+OPgqDENyZXcgQ3Jl
|
||||
YXRlZDABORVGruBvKBUYQaipwOBvKBUYShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuODYuMEoaCg5w
|
||||
eXRob25fdmVyc2lvbhIICgYzLjEyLjdKLgoIY3Jld19rZXkSIgogN2U2NjA4OTg5ODU5YTY3ZWVj
|
||||
ODhlZWY3ZmNlODUyMjVKMQoHY3Jld19pZBImCiRiOThiNWEwMC01YTI1LTQxMDctYjQwNS1hYmYz
|
||||
MjBhOGYzYThKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAA
|
||||
ShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgB
|
||||
SuQCCgtjcmV3X2FnZW50cxLUAgrRAlt7ImtleSI6ICIyMmFjZDYxMWU0NGVmNWZhYzA1YjUzM2Q3
|
||||
NWU4ODkzYiIsICJpZCI6ICJkNWIyMzM1YS0yMmIyLTQyZWEtYmYwNS03OTc3NmU3MmYzOTIiLCAi
|
||||
cm9sZSI6ICJEYXRhIFNjaWVudGlzdCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAy
|
||||
MCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJn
|
||||
cHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4
|
||||
ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFsi
|
||||
Z2V0IGdyZWV0aW5ncyJdfV1KkgIKCmNyZXdfdGFza3MSgwIKgAJbeyJrZXkiOiAiYTI3N2IzNGIy
|
||||
YzE0NmYwYzU2YzVlMTM1NmU4ZjhhNTciLCAiaWQiOiAiMjJiZWMyMzEtY2QyMS00YzU4LTgyN2Ut
|
||||
MDU4MWE4ZjBjMTExIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6
|
||||
IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJEYXRhIFNjaWVudGlzdCIsICJhZ2VudF9rZXkiOiAiMjJh
|
||||
Y2Q2MTFlNDRlZjVmYWMwNWI1MzNkNzVlODg5M2IiLCAidG9vbHNfbmFtZXMiOiBbImdldCBncmVl
|
||||
dGluZ3MiXX1degIYAYUBAAEAABKOAgoQ5WYoxRtTyPjge4BduhL0rRIIv2U6rvWALfwqDFRhc2sg
|
||||
Q3JlYXRlZDABOX068uBvKBUYQZkv8+BvKBUYSi4KCGNyZXdfa2V5EiIKIDdlNjYwODk4OTg1OWE2
|
||||
N2VlYzg4ZWVmN2ZjZTg1MjI1SjEKB2NyZXdfaWQSJgokYjk4YjVhMDAtNWEyNS00MTA3LWI0MDUt
|
||||
YWJmMzIwYThmM2E4Si4KCHRhc2tfa2V5EiIKIGEyNzdiMzRiMmMxNDZmMGM1NmM1ZTEzNTZlOGY4
|
||||
YTU3SjEKB3Rhc2tfaWQSJgokMjJiZWMyMzEtY2QyMS00YzU4LTgyN2UtMDU4MWE4ZjBjMTExegIY
|
||||
AYUBAAEAABKQAQoQXyeDtJDFnyp2Fjk9YEGTpxIIaNE7gbhPNYcqClRvb2wgVXNhZ2UwATkaXTvj
|
||||
bygVGEGvx0rjbygVGEoaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjg2LjBKHAoJdG9vbF9uYW1lEg8K
|
||||
DUdldCBHcmVldGluZ3NKDgoIYXR0ZW1wdHMSAhgBegIYAYUBAAEAABLVBwoQMWfznt0qwauEzl7T
|
||||
UOQxRBII9q+pUS5EdLAqDENyZXcgQ3JlYXRlZDABORONPORvKBUYQSAoS+RvKBUYShoKDmNyZXdh
|
||||
aV92ZXJzaW9uEggKBjAuODYuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjEyLjdKLgoIY3Jld19r
|
||||
ZXkSIgogYzMwNzYwMDkzMjY3NjE0NDRkNTdjNzFkMWRhM2YyN2NKMQoHY3Jld19pZBImCiQ3OTQw
|
||||
MTkyNS1iOGU5LTQ3MDgtODUzMC00NDhhZmEzYmY4YjBKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVl
|
||||
bnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVj
|
||||
cmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSuoCCgtjcmV3X2FnZW50cxLaAgrXAlt7ImtleSI6ICI5
|
||||
OGYzYjFkNDdjZTk2OWNmMDU3NzI3Yjc4NDE0MjVjZCIsICJpZCI6ICI5OTJkZjYyZi1kY2FiLTQy
|
||||
OTUtOTIwNi05MDBkNDExNGIxZTkiLCAicm9sZSI6ICJGcmllbmRseSBOZWlnaGJvciIsICJ2ZXJi
|
||||
b3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25f
|
||||
Y2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJs
|
||||
ZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9s
|
||||
aW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFsiZGVjaWRlIGdyZWV0aW5ncyJdfV1KmAIKCmNyZXdf
|
||||
dGFza3MSiQIKhgJbeyJrZXkiOiAiODBkN2JjZDQ5MDk5MjkwMDgzODMyZjBlOTgzMzgwZGYiLCAi
|
||||
aWQiOiAiMmZmNjE5N2UtYmEyNy00YjczLWI0YTctNGZhMDQ4ZTYyYjQ3IiwgImFzeW5jX2V4ZWN1
|
||||
dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJGcmll
|
||||
bmRseSBOZWlnaGJvciIsICJhZ2VudF9rZXkiOiAiOThmM2IxZDQ3Y2U5NjljZjA1NzcyN2I3ODQx
|
||||
NDI1Y2QiLCAidG9vbHNfbmFtZXMiOiBbImRlY2lkZSBncmVldGluZ3MiXX1degIYAYUBAAEAABKO
|
||||
AgoQnjTp5boK7/+DQxztYIpqihIIgGnMUkBtzHEqDFRhc2sgQ3JlYXRlZDABOcpYcuRvKBUYQalE
|
||||
c+RvKBUYSi4KCGNyZXdfa2V5EiIKIGMzMDc2MDA5MzI2NzYxNDQ0ZDU3YzcxZDFkYTNmMjdjSjEK
|
||||
B2NyZXdfaWQSJgokNzk0MDE5MjUtYjhlOS00NzA4LTg1MzAtNDQ4YWZhM2JmOGIwSi4KCHRhc2tf
|
||||
a2V5EiIKIDgwZDdiY2Q0OTA5OTI5MDA4MzgzMmYwZTk4MzM4MGRmSjEKB3Rhc2tfaWQSJgokMmZm
|
||||
NjE5N2UtYmEyNy00YjczLWI0YTctNGZhMDQ4ZTYyYjQ3egIYAYUBAAEAABKTAQoQ26H9pLUgswDN
|
||||
p9XhJwwL6BIIx3bw7mAvPYwqClRvb2wgVXNhZ2UwATmy7NPlbygVGEEvb+HlbygVGEoaCg5jcmV3
|
||||
YWlfdmVyc2lvbhIICgYwLjg2LjBKHwoJdG9vbF9uYW1lEhIKEERlY2lkZSBHcmVldGluZ3NKDgoI
|
||||
YXR0ZW1wdHMSAhgBegIYAYUBAAEAAA==
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2986'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.27.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Fri, 27 Dec 2024 22:14:53 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis
|
||||
is the expect criteria for your final answer: The word: Hi\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '824'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AjCtZLLrWi8ZASpP9bz6HaCV7xBIn\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1735337693,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Hi\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
158,\n \"completion_tokens\": 12,\n \"total_tokens\": 170,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_0aa8d3e20b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8f8caa83deca756b-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 27 Dec 2024 22:14:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=wJkq_yLkzE3OdxE0aMJz.G0kce969.9JxRmZ0ratl4c-1735337693-1.0.1.1-OKpUoRrSPFGvWv5Hp5ET1PNZ7iZNHPKEAuakpcQUxxPSeisUIIR3qIOZ31MGmYugqB5.wkvidgbxOAagqJvmnw;
|
||||
path=/; expires=Fri, 27-Dec-24 22:44:53 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=A_ASCLNAVfQoyucWOAIhecWtEpNotYoZr0bAFihgNxs-1735337693273-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '404'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999816'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6ac84634bff9193743c4b0911c09b4a6
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "Determine if the following
|
||||
feedback indicates that the user is satisfied or if further changes are needed.
|
||||
Respond with ''True'' if further changes are needed, or ''False'' if the user
|
||||
is satisfied. **Important** Do not include any additional commentary outside
|
||||
of your ''True'' or ''False'' response.\n\nFeedback: \"Don''t say hi, say Hello
|
||||
instead!\""}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream":
|
||||
false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '461'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=A_ASCLNAVfQoyucWOAIhecWtEpNotYoZr0bAFihgNxs-1735337693273-0.0.1.1-604800000;
|
||||
__cf_bm=wJkq_yLkzE3OdxE0aMJz.G0kce969.9JxRmZ0ratl4c-1735337693-1.0.1.1-OKpUoRrSPFGvWv5Hp5ET1PNZ7iZNHPKEAuakpcQUxxPSeisUIIR3qIOZ31MGmYugqB5.wkvidgbxOAagqJvmnw
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AjCtZNlWdrrPZhq0MJDqd16sMuQEJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1735337693,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"True\",\n \"refusal\": null\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 78,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 79,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_0aa8d3e20b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8f8caa87094f756b-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 27 Dec 2024 22:14:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '156'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999898'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_ec74bef2a9ef7b2144c03fd7f7bbeab0
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: Say the word: Hi\n\nThis
|
||||
is the expect criteria for your final answer: The word: Hi\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I now
|
||||
can give a great answer \nFinal Answer: Hi"}, {"role": "user", "content": "Feedback:
|
||||
Don''t say hi, say Hello instead!"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '986'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=A_ASCLNAVfQoyucWOAIhecWtEpNotYoZr0bAFihgNxs-1735337693273-0.0.1.1-604800000;
|
||||
__cf_bm=wJkq_yLkzE3OdxE0aMJz.G0kce969.9JxRmZ0ratl4c-1735337693-1.0.1.1-OKpUoRrSPFGvWv5Hp5ET1PNZ7iZNHPKEAuakpcQUxxPSeisUIIR3qIOZ31MGmYugqB5.wkvidgbxOAagqJvmnw
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AjCtZGv4f3h7GDdhyOy9G0sB1lRgC\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1735337693,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I understand the feedback and
|
||||
will adjust my response accordingly. \\nFinal Answer: Hello\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 188,\n \"completion_tokens\":
|
||||
18,\n \"total_tokens\": 206,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_0aa8d3e20b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8f8caa88cac4756b-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 27 Dec 2024 22:14:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '358'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999793'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_ae1ab6b206d28ded6fee3c83ed0c2ab7
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "Determine if the following
|
||||
feedback indicates that the user is satisfied or if further changes are needed.
|
||||
Respond with ''True'' if further changes are needed, or ''False'' if the user
|
||||
is satisfied. **Important** Do not include any additional commentary outside
|
||||
of your ''True'' or ''False'' response.\n\nFeedback: \"looks good\""}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '439'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=A_ASCLNAVfQoyucWOAIhecWtEpNotYoZr0bAFihgNxs-1735337693273-0.0.1.1-604800000;
|
||||
__cf_bm=wJkq_yLkzE3OdxE0aMJz.G0kce969.9JxRmZ0ratl4c-1735337693-1.0.1.1-OKpUoRrSPFGvWv5Hp5ET1PNZ7iZNHPKEAuakpcQUxxPSeisUIIR3qIOZ31MGmYugqB5.wkvidgbxOAagqJvmnw
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- Linux
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AjCtaiHL4TY8Dssk0j2miqmjrzquy\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1735337694,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"False\",\n \"refusal\": null\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 73,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 74,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_0aa8d3e20b\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8f8caa8bdd26756b-SEA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 27 Dec 2024 22:14:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '184'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999902'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_652891f79c1104a7a8436275d78a69f1
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
1659
tests/crew_test.py
1659
tests/crew_test.py
File diff suppressed because it is too large
Load Diff
@@ -43,7 +43,7 @@ def test_task_tool_reflect_agent_tools():
|
||||
assert task.tools == [fake_tool]
|
||||
|
||||
|
||||
def test_task_tool_takes_precedence_over_agent_tools():
|
||||
def test_task_tools_combined_with_agent_tools():
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
@@ -52,7 +52,7 @@ def test_task_tool_takes_precedence_over_agent_tools():
|
||||
|
||||
@tool
|
||||
def fake_task_tool() -> None:
|
||||
"Fake tool"
|
||||
"Fake task tool"
|
||||
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
@@ -69,7 +69,9 @@ def test_task_tool_takes_precedence_over_agent_tools():
|
||||
tools=[fake_task_tool],
|
||||
)
|
||||
|
||||
assert task.tools == [fake_task_tool]
|
||||
assert len(task.tools) == 2
|
||||
assert any(tool.name == fake_task_tool.name for tool in task.tools)
|
||||
assert any(tool.name == fake_tool.name for tool in task.tools)
|
||||
|
||||
|
||||
def test_task_prompt_includes_expected_output():
|
||||
@@ -1455,4 +1457,4 @@ def test_task_with_max_execution_time_exceeded():
|
||||
)
|
||||
|
||||
with pytest.raises(TimeoutError):
|
||||
task.execute_sync(agent=researcher)
|
||||
task.execute_sync(agent=researcher)
|
||||
|
||||
Reference in New Issue
Block a user