mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
black formatting
This commit is contained in:
@@ -24,9 +24,11 @@ from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, Tool
|
|||||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||||
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
||||||
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
|
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from agentops import track_agent
|
from agentops import track_agent
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def track_agent():
|
def track_agent():
|
||||||
def noop(f):
|
def noop(f):
|
||||||
return f
|
return f
|
||||||
@@ -315,9 +317,9 @@ class Agent(BaseModel):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self._rpm_controller:
|
if self._rpm_controller:
|
||||||
executor_args[
|
executor_args["request_within_rpm_limit"] = (
|
||||||
"request_within_rpm_limit"
|
self._rpm_controller.check_or_wait
|
||||||
] = self._rpm_controller.check_or_wait
|
)
|
||||||
|
|
||||||
prompt = Prompts(
|
prompt = Prompts(
|
||||||
i18n=self.i18n,
|
i18n=self.i18n,
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ from crewai_tools import BaseTool
|
|||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
description: str = (
|
||||||
|
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||||
|
)
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Implementation goes here
|
||||||
|
|||||||
@@ -26,10 +26,11 @@ from crewai.task import Task
|
|||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools.agent_tools import AgentTools
|
from crewai.tools.agent_tools import AgentTools
|
||||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import agentops
|
import agentops
|
||||||
except ImportError:
|
except ImportError:
|
||||||
agentops = None
|
agentops = None
|
||||||
|
|
||||||
|
|
||||||
class Crew(BaseModel):
|
class Crew(BaseModel):
|
||||||
@@ -241,7 +242,7 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
i18n = I18N(prompt_file=self.prompt_file)
|
i18n = I18N(prompt_file=self.prompt_file)
|
||||||
agentops.set_parent_key("daebe730-f54d-4af5-98df-e6946fb76d13")
|
agentops.set_parent_key("daebe730-f54d-4af5-98df-e6946fb76d13")
|
||||||
agentops.add_tags(['crewai'])
|
agentops.add_tags(["crewai"])
|
||||||
|
|
||||||
for agent in self.agents:
|
for agent in self.agents:
|
||||||
agent.i18n = i18n
|
agent.i18n = i18n
|
||||||
@@ -379,7 +380,9 @@ class Crew(BaseModel):
|
|||||||
if self.max_rpm:
|
if self.max_rpm:
|
||||||
self._rpm_controller.stop_rpm_counter()
|
self._rpm_controller.stop_rpm_counter()
|
||||||
if agentops:
|
if agentops:
|
||||||
agentops.end_session(end_state="Success", end_state_reason="Finished Execution")
|
agentops.end_session(
|
||||||
|
end_state="Success", end_state_reason="Finished Execution"
|
||||||
|
)
|
||||||
self._telemetry.end_crew(self, output)
|
self._telemetry.end_crew(self, output)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
|||||||
@@ -247,16 +247,16 @@ class Task(BaseModel):
|
|||||||
return exported_result.model_dump()
|
return exported_result.model_dump()
|
||||||
return exported_result
|
return exported_result
|
||||||
except Exception:
|
except Exception:
|
||||||
# sometimes the response contains valid JSON in the middle of text
|
# sometimes the response contains valid JSON in the middle of text
|
||||||
match = re.search(r"({.*})", result, re.DOTALL)
|
match = re.search(r"({.*})", result, re.DOTALL)
|
||||||
if match:
|
if match:
|
||||||
try:
|
try:
|
||||||
exported_result = model.model_validate_json(match.group(0))
|
exported_result = model.model_validate_json(match.group(0))
|
||||||
if self.output_json:
|
if self.output_json:
|
||||||
return exported_result.model_dump()
|
return exported_result.model_dump()
|
||||||
return exported_result
|
return exported_result
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
llm = self.agent.function_calling_llm or self.agent.llm
|
llm = self.agent.function_calling_llm or self.agent.llm
|
||||||
|
|
||||||
|
|||||||
@@ -256,9 +256,11 @@ class Telemetry:
|
|||||||
"async_execution?": task.async_execution,
|
"async_execution?": task.async_execution,
|
||||||
"output": task.expected_output,
|
"output": task.expected_output,
|
||||||
"agent_role": task.agent.role if task.agent else "None",
|
"agent_role": task.agent.role if task.agent else "None",
|
||||||
"context": [task.description for task in task.context]
|
"context": (
|
||||||
if task.context
|
[task.description for task in task.context]
|
||||||
else "None",
|
if task.context
|
||||||
|
else "None"
|
||||||
|
),
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold() for tool in task.tools
|
tool.name.casefold() for tool in task.tools
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -10,11 +10,12 @@ from crewai.agents.tools_handler import ToolsHandler
|
|||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||||
|
|
||||||
agentops = None
|
agentops = None
|
||||||
try:
|
try:
|
||||||
import agentops
|
import agentops
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
OPENAI_BIGGER_MODELS = ["gpt-4"]
|
OPENAI_BIGGER_MODELS = ["gpt-4"]
|
||||||
|
|
||||||
@@ -167,7 +168,9 @@ class ToolUsage:
|
|||||||
return error
|
return error
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
if agentops:
|
if agentops:
|
||||||
agentops.record(agentops.ErrorEvent(exception=e, trigger_event=tool_event))
|
agentops.record(
|
||||||
|
agentops.ErrorEvent(exception=e, trigger_event=tool_event)
|
||||||
|
)
|
||||||
return self.use(calling=calling, tool_string=tool_string)
|
return self.use(calling=calling, tool_string=tool_string)
|
||||||
|
|
||||||
if self.tools_handler:
|
if self.tools_handler:
|
||||||
@@ -189,7 +192,7 @@ class ToolUsage:
|
|||||||
|
|
||||||
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
||||||
if agentops:
|
if agentops:
|
||||||
agentops.record(tool_event)
|
agentops.record(tool_event)
|
||||||
self._telemetry.tool_usage(
|
self._telemetry.tool_usage(
|
||||||
llm=self.function_calling_llm,
|
llm=self.function_calling_llm,
|
||||||
tool_name=tool.name,
|
tool_name=tool.name,
|
||||||
@@ -228,7 +231,10 @@ class ToolUsage:
|
|||||||
for tool in self.tools:
|
for tool in self.tools:
|
||||||
if (
|
if (
|
||||||
tool.name.lower().strip() == tool_name.lower().strip()
|
tool.name.lower().strip() == tool_name.lower().strip()
|
||||||
or SequenceMatcher(None, tool.name.lower().strip(), tool_name.lower().strip()).ratio() > 0.9
|
or SequenceMatcher(
|
||||||
|
None, tool.name.lower().strip(), tool_name.lower().strip()
|
||||||
|
).ratio()
|
||||||
|
> 0.9
|
||||||
):
|
):
|
||||||
return tool
|
return tool
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
|
|||||||
Reference in New Issue
Block a user