Feature: Log files (#423)

* log_file

feature: added a new parameter for crew that creates a txt file to log agent execution

* unit tests and documentation

unit test if file is created but not what is inside the file
This commit is contained in:
GabeKoga
2024-04-05 19:44:50 -03:00
committed by GitHub
parent aebc443b62
commit 9db99befb6
7 changed files with 231 additions and 5 deletions

View File

@@ -27,6 +27,8 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Step Callback** *(optional)* | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
| **Task Callback** *(optional)* | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
| **Share Crew** *(optional)* | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
| **Output Log File** *(optional)* | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently and it will be called logs.txt or passing a string with the full path and name of the file. |
!!! note "Crew Max RPM"
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.

8
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]]
name = "aiohttp"
@@ -4962,13 +4962,13 @@ test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.
[[package]]
name = "typing-extensions"
version = "4.10.0"
version = "4.11.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"},
{file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"},
{file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"},
{file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"},
]
[[package]]

View File

@@ -25,7 +25,7 @@ from crewai.process import Process
from crewai.task import Task
from crewai.telemetry import Telemetry
from crewai.tools.agent_tools import AgentTools
from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities import I18N, Logger, RPMController, FileHandler
class Crew(BaseModel):
@@ -55,6 +55,7 @@ class Crew(BaseModel):
_execution_span: Any = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr()
_logger: Logger = PrivateAttr()
_file_handler: FileHandler = PrivateAttr()
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
@@ -115,6 +116,10 @@ class Crew(BaseModel):
default=None,
description="Path to the language file to be used for the crew.",
)
output_log_file: Optional[Union[bool, str]] = Field(
default=False,
description="output_log_file",
)
@field_validator("id", mode="before")
@classmethod
@@ -145,6 +150,8 @@ class Crew(BaseModel):
"""Set private attributes."""
self._cache_handler = CacheHandler()
self._logger = Logger(self.verbose)
if self.output_log_file:
self._file_handler = FileHandler(self.output_log_file)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
self._telemetry = Telemetry()
self._telemetry.set_tracer()
@@ -278,6 +285,11 @@ class Crew(BaseModel):
"info", f"== Starting Task: {task.description}", color="bold_purple"
)
if self.output_log_file:
self._file_handler.log(
agent=role, task=task.description, status="started"
)
output = task.execute(context=task_output)
if not task.async_execution:
task_output = output
@@ -285,6 +297,9 @@ class Crew(BaseModel):
role = task.agent.role if task.agent is not None else "None"
self._logger.log("debug", f"== [{role}] Task output: {task_output}\n\n")
if self.output_log_file:
self._file_handler.log(agent=role, task=task_output, status="completed")
self._finish_execution(task_output)
return self._format_output(task_output)
@@ -306,12 +321,22 @@ class Crew(BaseModel):
self._logger.log("debug", f"Working Agent: {manager.role}")
self._logger.log("info", f"Starting Task: {task.description}")
if self.output_log_file:
self._file_handler.log(
agent=manager.role, task=task.description, status="started"
)
task_output = task.execute(
agent=manager, context=task_output, tools=manager.tools
)
self._logger.log("debug", f"[{manager.role}] Task output: {task_output}")
if self.output_log_file:
self._file_handler.log(
agent=manager.role, task=task_output, status="completed"
)
self._finish_execution(task_output)
return self._format_output(task_output), manager._token_process.get_summary()

View File

@@ -5,3 +5,4 @@ from .logger import Logger
from .printer import Printer
from .prompts import Prompts
from .rpm_controller import RPMController
from .fileHandler import FileHandler

View File

@@ -0,0 +1,20 @@
import os
from datetime import datetime
class FileHandler:
"""take care of file operations, currently it only logs messages to a file"""
def __init__(self, file_path):
if isinstance(file_path, bool):
self._path = os.path.join(os.curdir, "logs.txt")
elif isinstance(file_path, str):
self._path = file_path
else:
raise ValueError("file_path must be either a boolean or a string.")
def log(self, **kwargs):
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = f"{now}: ".join([f"{key}={value}" for key, value in kwargs.items()])
with open(self._path, "a") as file:
file.write(message + "\n")

View File

@@ -0,0 +1,160 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Researcher. You''re
an expert researcher, specialized in technology, software engineering, AI and
startups. You work as a freelancer and is now working on doing research and
analysis for a new customer.\nYour personal goal is: Make the best research
and analysis on content about AI and AI agentsTo give my best complete final
answer to the task use the exact following format:\n\nThought: I now can give
a great answer\nFinal Answer: my best complete final answer to the task.\nYour
final answer must be the great and the most complete as possible, it must be
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
Task: Say Hi\n\nThis is the expect criteria for your final answer: The word:
Hi \n you MUST return the actual complete content as the final answer, not a
summary.\n\nBegin! This is VERY important to you, use the tools available and
give your best Final Answer, your job depends on it!\n\nThought: \n"}], "model":
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '1082'
content-type:
- application/json
cookie:
- _cfuvid=zVKnitRNLhrt8b2P3MHGfXS_82YiqkGpi46seIwshAM-1709396719694-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.16.2
x-stainless-arch:
- other:amd64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- Windows
x-stainless-package-version:
- 1.16.2
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.10.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
now"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
can"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
give"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
great"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Hi"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-9AjeorqO0QfP8DJ8NwGIUKSlhQqav","object":"chat.completion.chunk","created":1712345814,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 86fbfd5e4e45012b-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Fri, 05 Apr 2024 19:36:55 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Dup92ckJhbI_FxZty6XIjohW.sTaChSMX.8lwju_iA8-1712345815-1.0.1.1-DYCBIKozKcyEYWv.mE5gRee5frdxJU8EBOeZrex7BOH_U4HLjPJ4IMUP0m_YMiO3fKf5IClhW3KIzE8cl2C.ww;
path=/; expires=Fri, 05-Apr-24 20:06:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=.Ctsps4oQpopvSdn2mneN2jnLB0vatjzGjPz1HgR734-1712345815450-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '363'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299750'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 50ms
x-request-id:
- req_83d3bc5e55b3d012f700b51707cc46e0
status:
code: 200
message: OK
version: 1

View File

@@ -894,3 +894,21 @@ def test_disabled_memory_using_contextual_memory():
with patch.object(ContextualMemory, "build_context_for_task") as contextual_mem:
crew.kickoff()
contextual_mem.assert_not_called()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_log_file_output(tmp_path):
test_file = tmp_path / "logs.txt"
tasks = [
Task(
description="Say Hi",
expected_output="The word: Hi",
agent=researcher,
)
]
test_message = {"agent": "Researcher", "task": "Say Hi"}
crew = Crew(agents=[researcher], tasks=tasks, output_log_file=str(test_file))
crew.kickoff()
assert test_file.exists()