mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 15:18:29 +00:00
adding agent step callback
This commit is contained in:
@@ -25,7 +25,7 @@ description: What are crewAI Agents and how to use them.
|
||||
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits |
|
||||
| **Verbose** | This allow you to actually see what is going on during the Crew execution. |
|
||||
| **Allow Delegation** | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
|
||||
|
||||
| **Step Callback** | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. |
|
||||
|
||||
## Creating an Agent
|
||||
|
||||
@@ -51,6 +51,7 @@ agent = Agent(
|
||||
max_rpm=10,
|
||||
verbose=True,
|
||||
allow_delegation=True
|
||||
step_callback=my_intermediate_step_callback
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ class Agent(BaseModel):
|
||||
verbose: Whether the agent execution should be in verbose mode.
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
@@ -90,6 +91,10 @@ class Agent(BaseModel):
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=CacheHandler(), description="An instance of the CacheHandler class."
|
||||
)
|
||||
step_callback: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
@@ -200,12 +205,13 @@ class Agent(BaseModel):
|
||||
"verbose": self.verbose,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"step_callback": self.step_callback,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
executor_args["request_within_rpm_limit"] = (
|
||||
self._rpm_controller.check_or_wait
|
||||
)
|
||||
executor_args[
|
||||
"request_within_rpm_limit"
|
||||
] = self._rpm_controller.check_or_wait
|
||||
|
||||
if self.memory:
|
||||
summary_memory = ConversationSummaryMemory(
|
||||
|
||||
@@ -22,6 +22,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
request_within_rpm_limit: Any = None
|
||||
max_iterations: Optional[int] = 15
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
step_callback: Optional[Any] = None
|
||||
|
||||
@root_validator()
|
||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||
@@ -67,7 +68,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
|
||||
self.step_callback(next_step_output)
|
||||
intermediate_steps.extend(next_step_output)
|
||||
if len(next_step_output) == 1:
|
||||
next_step_action = next_step_output[0]
|
||||
|
||||
Reference in New Issue
Block a user