mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-12 17:48:30 +00:00
Introduced a new set of environment tools to enhance file system interactions within the CrewAI framework. This includes tools for reading files, searching for files by name patterns, and listing directory contents, all with built-in path security to prevent unauthorized access. The new tools are designed to facilitate context engineering for agents, improving their ability to interact with the file system effectively. Additionally, updated the experimental module's to include these new tools in the public API.
57 lines
1.4 KiB
Python
57 lines
1.4 KiB
Python
from crewai.experimental.crew_agent_executor_flow import CrewAgentExecutorFlow
|
|
from crewai.experimental.environment_tools import (
|
|
BaseEnvironmentTool,
|
|
EnvironmentTools,
|
|
FileReadTool,
|
|
FileSearchTool,
|
|
GrepTool,
|
|
ListDirTool,
|
|
)
|
|
from crewai.experimental.evaluation import (
|
|
AgentEvaluationResult,
|
|
AgentEvaluator,
|
|
BaseEvaluator,
|
|
EvaluationScore,
|
|
EvaluationTraceCallback,
|
|
ExperimentResult,
|
|
ExperimentResults,
|
|
ExperimentRunner,
|
|
GoalAlignmentEvaluator,
|
|
MetricCategory,
|
|
ParameterExtractionEvaluator,
|
|
ReasoningEfficiencyEvaluator,
|
|
SemanticQualityEvaluator,
|
|
ToolInvocationEvaluator,
|
|
ToolSelectionEvaluator,
|
|
create_default_evaluator,
|
|
create_evaluation_callbacks,
|
|
)
|
|
|
|
|
|
__all__ = [
|
|
"AgentEvaluationResult",
|
|
"AgentEvaluator",
|
|
"BaseEnvironmentTool",
|
|
"BaseEvaluator",
|
|
"CrewAgentExecutorFlow",
|
|
"EnvironmentTools",
|
|
"EvaluationScore",
|
|
"EvaluationTraceCallback",
|
|
"ExperimentResult",
|
|
"ExperimentResults",
|
|
"ExperimentRunner",
|
|
"FileReadTool",
|
|
"FileSearchTool",
|
|
"GoalAlignmentEvaluator",
|
|
"GrepTool",
|
|
"ListDirTool",
|
|
"MetricCategory",
|
|
"ParameterExtractionEvaluator",
|
|
"ReasoningEfficiencyEvaluator",
|
|
"SemanticQualityEvaluator",
|
|
"ToolInvocationEvaluator",
|
|
"ToolSelectionEvaluator",
|
|
"create_default_evaluator",
|
|
"create_evaluation_callbacks",
|
|
]
|