mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
Added support for logging in JSON format as well. (#1985)
* Added functionality to have json format as well for the logs * Added additional comments, refractored logging functionality * Fixed documentation to include the new paramter * Fixed typo * Added a Pydantic Error Check between output_log_file and save_as_json parameter * Removed the save_to_json parameter, incorporated the functionality directly with output_log_file * Fixed typo * Sorted the imports using isort --------- Co-authored-by: Vidit Ostwal <vidit.ostwal@piramal.com> Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com>
This commit is contained in:
@@ -23,14 +23,14 @@ A crew in crewAI represents a collaborative group of agents working together to
|
|||||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||||
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||||
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
||||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||||
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. |
|
| **Output Log File** _(optional)_ | `output_log_file` | Set to True to save logs as logs.txt in the current directory or provide a file path. Logs will be in JSON format if the filename ends in .json, otherwise .txt. Defautls to `None`. |
|
||||||
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
||||||
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
||||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||||
@@ -240,6 +240,23 @@ print(f"Tasks Output: {crew_output.tasks_output}")
|
|||||||
print(f"Token Usage: {crew_output.token_usage}")
|
print(f"Token Usage: {crew_output.token_usage}")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Accessing Crew Logs
|
||||||
|
|
||||||
|
You can see real time log of the crew execution, by setting `output_log_file` as a `True(Boolean)` or a `file_name(str)`. Supports logging of events as both `file_name.txt` and `file_name.json`.
|
||||||
|
In case of `True(Boolean)` will save as `logs.txt`.
|
||||||
|
|
||||||
|
In case of `output_log_file` is set as `False(Booelan)` or `None`, the logs will not be populated.
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
# Save crew logs
|
||||||
|
crew = Crew(output_log_file = True) # Logs will be saved as logs.txt
|
||||||
|
crew = Crew(output_log_file = file_name) # Logs will be saved as file_name.txt
|
||||||
|
crew = Crew(output_log_file = file_name.txt) # Logs will be saved as file_name.txt
|
||||||
|
crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name.json
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Memory Utilization
|
## Memory Utilization
|
||||||
|
|
||||||
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
||||||
|
|||||||
@@ -184,9 +184,9 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="Path to the prompt json file to be used for the crew.",
|
description="Path to the prompt json file to be used for the crew.",
|
||||||
)
|
)
|
||||||
output_log_file: Optional[str] = Field(
|
output_log_file: Optional[Union[bool, str]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="output_log_file",
|
description="Path to the log file to be saved",
|
||||||
)
|
)
|
||||||
planning: Optional[bool] = Field(
|
planning: Optional[bool] = Field(
|
||||||
default=False,
|
default=False,
|
||||||
@@ -440,6 +440,7 @@ class Crew(BaseModel):
|
|||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def key(self) -> str:
|
def key(self) -> str:
|
||||||
source = [agent.key for agent in self.agents] + [
|
source = [agent.key for agent in self.agents] + [
|
||||||
|
|||||||
@@ -1,30 +1,64 @@
|
|||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
|
||||||
class FileHandler:
|
class FileHandler:
|
||||||
"""take care of file operations, currently it only logs messages to a file"""
|
"""Handler for file operations supporting both JSON and text-based logging.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path (Union[bool, str]): Path to the log file or boolean flag
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, file_path):
|
def __init__(self, file_path: Union[bool, str]):
|
||||||
if isinstance(file_path, bool):
|
self._initialize_path(file_path)
|
||||||
|
|
||||||
|
def _initialize_path(self, file_path: Union[bool, str]):
|
||||||
|
if file_path is True: # File path is boolean True
|
||||||
self._path = os.path.join(os.curdir, "logs.txt")
|
self._path = os.path.join(os.curdir, "logs.txt")
|
||||||
elif isinstance(file_path, str):
|
|
||||||
self._path = file_path
|
elif isinstance(file_path, str): # File path is a string
|
||||||
|
if file_path.endswith((".json", ".txt")):
|
||||||
|
self._path = file_path # No modification if the file ends with .json or .txt
|
||||||
|
else:
|
||||||
|
self._path = file_path + ".txt" # Append .txt if the file doesn't end with .json or .txt
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("file_path must be either a boolean or a string.")
|
raise ValueError("file_path must be a string or boolean.") # Handle the case where file_path isn't valid
|
||||||
|
|
||||||
def log(self, **kwargs):
|
def log(self, **kwargs):
|
||||||
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
try:
|
||||||
message = (
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
f"{now}: "
|
log_entry = {"timestamp": now, **kwargs}
|
||||||
+ ", ".join([f'{key}="{value}"' for key, value in kwargs.items()])
|
|
||||||
+ "\n"
|
|
||||||
)
|
|
||||||
with open(self._path, "a", encoding="utf-8") as file:
|
|
||||||
file.write(message + "\n")
|
|
||||||
|
|
||||||
|
if self._path.endswith(".json"):
|
||||||
|
# Append log in JSON format
|
||||||
|
with open(self._path, "a", encoding="utf-8") as file:
|
||||||
|
# If the file is empty, start with a list; else, append to it
|
||||||
|
try:
|
||||||
|
# Try reading existing content to avoid overwriting
|
||||||
|
with open(self._path, "r", encoding="utf-8") as read_file:
|
||||||
|
existing_data = json.load(read_file)
|
||||||
|
existing_data.append(log_entry)
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError):
|
||||||
|
# If no valid JSON or file doesn't exist, start with an empty list
|
||||||
|
existing_data = [log_entry]
|
||||||
|
|
||||||
|
with open(self._path, "w", encoding="utf-8") as write_file:
|
||||||
|
json.dump(existing_data, write_file, indent=4)
|
||||||
|
write_file.write("\n")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Append log in plain text format
|
||||||
|
message = f"{now}: " + ", ".join([f"{key}=\"{value}\"" for key, value in kwargs.items()]) + "\n"
|
||||||
|
with open(self._path, "a", encoding="utf-8") as file:
|
||||||
|
file.write(message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to log message: {str(e)}")
|
||||||
|
|
||||||
class PickleHandler:
|
class PickleHandler:
|
||||||
def __init__(self, file_name: str) -> None:
|
def __init__(self, file_name: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user