Cleaned up logs now that I've isolated the issue to the LLM

This commit is contained in:
Brandon Hancock
2024-06-25 16:22:56 -07:00
parent cc1c97e87d
commit be0a4c2fe5
3 changed files with 13 additions and 39 deletions

View File

@@ -171,7 +171,6 @@ class Agent(BaseModel):
"""set agent executor is set."""
if hasattr(self.llm, "model_name"):
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
print("TOKENHANDLER UUID", token_handler.id)
# Ensure self.llm.callbacks is a list
if not isinstance(self.llm.callbacks, list):
@@ -181,10 +180,6 @@ class Agent(BaseModel):
if not any(
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
):
print(
"IMPORTANT: TokenCalcHandler not found in callbacks. Adding",
token_handler.id,
)
self.llm.callbacks.append(token_handler)
if not self.agent_executor:
@@ -384,12 +379,15 @@ class Agent(BaseModel):
"tools",
"tools_handler",
"cache_handler",
"llm", # TODO: THIS GET'S THINGS WORKING AGAIN.``
}
print("LLM IN COPY", self.llm.model_name)
copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None}
copied_agent = Agent(**copied_data)
print("COPIED AGENT LLM", copied_agent.llm.model_name)
copied_agent.tools = deepcopy(self.tools)
return copied_agent

View File

@@ -279,19 +279,10 @@ class Crew(BaseModel):
f"The process '{self.process}' is not implemented yet."
)
print("FINISHED EXECUTION OF CREW", self.id)
print("GOING TO INVESTIGATE TOKEN USAGE")
for agent in self.agents:
print("ANALYZING AGENT", agent.id)
print("AGENT _token_process id: ", agent._token_process.id)
print("AGENT USAGE METRICS", agent._token_process.get_summary())
# TODO: THIS IS A BUG. ONLY THE LAST AGENT'S TOKEN USAGE IS BEING RETURNED
metrics = metrics + [
agent._token_process.get_summary() for agent in self.agents
]
print()
self.usage_metrics = {
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
}
@@ -312,6 +303,16 @@ class Crew(BaseModel):
# TODO: I would expect we would want to merge the usage metrics from each crew execution
results.append(output)
print("CREW USAGE METRICS:", crew.usage_metrics)
print(
"ORIGINAL AGENT USAGE METRICS",
[agent._token_process.get_summary() for agent in self.agents],
)
print(
"COPIED AGENT USAGE METRICS",
[agent._token_process.get_summary() for agent in crew.agents],
)
return results
async def kickoff_async(
@@ -332,7 +333,6 @@ class Crew(BaseModel):
# TODO: FIGURE OUT HOW TO MERGE THE USAGE METRICS
# TODO: I would expect we would want to merge the usage metrics from each crew execution
return results
def train(self, n_iterations: int) -> None:
@@ -384,8 +384,6 @@ class Crew(BaseModel):
for key in total_token_usage:
total_token_usage[key] += current_token_usage.get(key, 0)
print("Updated total_token_usage:", total_token_usage)
self._finish_execution(task_output)
# type: ignore # Item "None" of "Agent | None" has no attribute "_token_process")
@@ -461,11 +459,6 @@ class Crew(BaseModel):
"tasks",
}
print("CREW ID", self.id)
print("CURRENT IDS FOR AGENTS", [agent.id for agent in self.agents])
print("CURRENT IDS FOR TASKS", [task.id for task in self.tasks])
# TODO: I think there is a disconnect. We need to pass new agents and tasks to the new crew
cloned_agents = [agent.copy() for agent in self.agents]
cloned_tasks = [task.copy(cloned_agents) for task in self.tasks]
@@ -477,12 +470,6 @@ class Crew(BaseModel):
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
print("COPIED CREW ID", copied_crew.id)
print("NEW IDS FOR AGENTS", [agent.id for agent in copied_crew.agents])
print("NEW IDS FOR TASKS", [task.id for task in copied_crew.tasks])
# TODO: EXPERIMENT, PRINT ID'S AND MAKE SURE I'M CALLING THE RIGHT AGENTS AND TASKS
return copied_crew
def _set_tasks_callbacks(self) -> None:
@@ -511,7 +498,6 @@ class Crew(BaseModel):
If full_output is True, then returned data type will be a dictionary else returned outputs are string
"""
if self.full_output:
print("SPITTING OUT FULL OUTPUT FOR CREW", self.id)
return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
"final_output": output,
"tasks_outputs": [task.output for task in self.tasks if task],

View File

@@ -199,12 +199,6 @@ class Task(BaseModel):
tools=tools,
)
print("CALLING EXECUTE ON TASK WITH ID", task.id)
print("THIS TASK IS CALLING AGENT", agent.id)
print(
"CALLING TOKEN PROCESS in task on AGENT", agent._token_process.get_summary()
)
exported_output = self._export_output(result)
self.output = TaskOutput(
@@ -260,7 +254,6 @@ class Task(BaseModel):
"context",
"tools",
}
print("ORIGINAL TOOLS:", self.tools)
copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None}
@@ -277,8 +270,6 @@ class Task(BaseModel):
# cloned_agent = self.agent.copy() if self.agent else None
cloned_tools = deepcopy(self.tools) if self.tools else []
print("CLONED_TOOLS", cloned_tools)
copied_task = Task(
**copied_data,
context=cloned_context,
@@ -286,7 +277,6 @@ class Task(BaseModel):
tools=cloned_tools,
)
print("TASK TOOLS:", copied_task.tools)
return copied_task
def _export_output(self, result: str) -> Any: