Skip to content

Commit

Permalink
commit
Browse files Browse the repository at this point in the history
  • Loading branch information
yangdefeng committed Sep 26, 2021
1 parent e87276e commit 23b8645
Show file tree
Hide file tree
Showing 6 changed files with 36 additions and 12 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,5 @@ test_frame/my/
redis_queue_web/
not_up_git/
dist/
*.egg-info/
*.egg-info/
distributed_frame_config.py
4 changes: 2 additions & 2 deletions nb_log/handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ class ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos(ConcurrentRotatin
"""
ConcurrentRotatingFileHandler 解决了多进程下文件切片问题,但频繁操作文件锁,带来程序性能巨大下降。
反复测试极限日志写入频次,在windows上比不切片的写入性能降低100倍。在linux上比不切片性能降低10倍。多进程切片文件锁在windows使用pywin32,在linux上还是要fcntl实现。
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度和不做多进程安全切片的文件写入速度几乎一样
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度比不做多进程安全切片的文件写入速度更快
主动触发写入文件。
"""
file_handler_list = []
Expand Down Expand Up @@ -682,7 +682,7 @@ class ConcurrentRotatingFileHandlerWithBufferInitiativeLinux(ConcurrentRotatingF
"""
ConcurrentRotatingFileHandler 解决了多进程下文件切片问题,但频繁操作文件锁,带来程序性能巨大下降。
反复测试极限日志写入频次,在windows上比不切片的写入性能降低100倍。在linux上比不切片性能降低10倍。多进程切片文件锁在windows使用pywin32,在linux上还是要fcntl实现。
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度和不做多进程安全切片的文件写入速度几乎一样
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度比不做多进程安全切片的文件写入更快
主动触发写入文件。
"""
file_handler_list = []
Expand Down
15 changes: 11 additions & 4 deletions nb_log/log_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import typing
from functools import lru_cache
from logging import FileHandler
from nb_log import nb_log_config_default
from nb_log import nb_log_config_default # noqa
from nb_log.handlers import *


Expand Down Expand Up @@ -258,7 +258,7 @@ def get_logger_and_add_handlers(self, log_level_int: int = None, *, is_add_strea
self._log_path = log_path
self._log_filename = log_filename
self._log_file_size = log_file_size
if log_file_handler_type not in (None, 1, 2, 3, 4):
if log_file_handler_type not in (None, 1, 2, 3, 4, 5):
raise ValueError("log_file_handler_type的值必须设置为 1 2 3 4这四个数字")
self._log_file_handler_type = log_file_handler_type or nb_log_config_default.LOG_FILE_HANDLER_TYPE
self._mongo_url = mongo_url
Expand Down Expand Up @@ -333,14 +333,15 @@ def __add_handlers(self):
self._judge_logger_has_handler_type(ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos) or
self._judge_logger_has_handler_type(ConcurrentRotatingFileHandlerWithBufferInitiativeLinux) or
self._judge_logger_has_handler_type(ConcurrentDayRotatingFileHandler) or
self._judge_logger_has_handler_type(FileHandler)
self._judge_logger_has_handler_type(FileHandler) or
self._judge_logger_has_handler_type(ConcurrentRotatingFileHandler)
) and all(
[self._log_path, self._log_filename]):
if not os.path.exists(self._log_path):
os.makedirs(self._log_path)
log_file = os.path.join(self._log_path, self._log_filename)
file_handler = None
if self._log_file_handler_type in (1, None):
if self._log_file_handler_type == 1:
if os_name == 'nt':
# 在win下使用这个ConcurrentRotatingFileHandler可以解决多进程安全切片,但性能损失惨重。
# 10进程各自写入10万条记录到同一个文件消耗15分钟。比不切片写入速度降低100倍。
Expand All @@ -355,12 +356,18 @@ def __add_handlers(self):
maxBytes=self._log_file_size * 1024 * 1024,
backupCount=nb_log_config_default.LOG_FILE_BACKUP_COUNT,
encoding="utf-8")

elif self._log_file_handler_type == 4:
file_handler = WatchedFileHandler(log_file)
elif self._log_file_handler_type == 2:
file_handler = ConcurrentDayRotatingFileHandler(self._log_filename, self._log_path, back_count=nb_log_config_default.LOG_FILE_BACKUP_COUNT)
elif self._log_file_handler_type == 3:
file_handler = FileHandler(log_file, mode='a', encoding='utf-8')
elif self._log_file_handler_type == 5:
file_handler = ConcurrentRotatingFileHandler(log_file,
maxBytes=self._log_file_size * 1024 * 1024,
backupCount=nb_log_config_default.LOG_FILE_BACKUP_COUNT,
encoding="utf-8")
self.__add_a_hanlder(file_handler)

# REMIND 添加mongo日志。
Expand Down
8 changes: 5 additions & 3 deletions nb_log/nb_log_config_default.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,15 @@ def add_fields(self, log_record, record, message_dict):
home_path = os.environ.get("HOME", '/') # 这个是获取linux系统的当前用户的主目录,不需要亲自设置
LOG_PATH = Path(home_path) / Path('pythonlogs') # linux mac 权限很严格,非root权限不能在/pythonlogs写入,修改一下默认值。

LOG_FILE_HANDLER_TYPE = 1 # 1 2 3 4
LOG_FILE_HANDLER_TYPE = 1 # 1 2 3 4 5
"""
LOG_FILE_HANDLER_TYPE 这个值可以设置为 1 2 3 4 四种值,
1为使用多进程安全按日志文件大小切割的文件日志
LOG_FILE_HANDLER_TYPE 这个值可以设置为 1 2 3 4 5 四种值,
1为使用多进程安全按日志文件大小切割的文件日志,这是本人实现的批量写入日志,减少操作文件锁次数,测试10进程快速写入文件,win上性能比第5种提高了100倍,linux提升5倍
2为多进程安全按天自动切割的文件日志,同一个文件,每天生成一个日志
3为不自动切割的单个文件的日志(不切割文件就不会出现所谓进程安不安全的问题)
4为 WatchedFileHandler,这个是需要在linux下才能使用,需要借助lograte外力进行日志文件的切割,多进程安全。
5 为第三方的concurrent_log_handler.ConcurrentRotatingFileHandler按日志文件大小切割的文件日志,
这个是采用了文件锁,多进程安全切割,文件锁在linux上使用fcntl性能还行,win上使用win32con性能非常惨。按大小切割建议不要选第5个个filehandler而是选择第1个。
"""

LOG_LEVEL_FILTER = logging.DEBUG # 默认日志级别,低于此级别的日志不记录了。例如设置为INFO,那么logger.debug的不会记录,只会记录logger.info以上级别的。
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

setup(
name='nb_log', #
version="5.4",
version="5.8",
description=(
'very sharp color display,monkey patch bulitin print and high-performance multiprocess safe roating file handler,other handlers includeing dintalk ,email,kafka,elastic and so on '
),
Expand Down Expand Up @@ -63,7 +63,7 @@
python setup.py sdist upload -r pypi
python setup.py sdist & twine upload dist/nb_log-5.4.tar.gz
python setup.py sdist & twine upload dist/nb_log-5.8.tar.gz
twine upload dist/*
Expand Down
14 changes: 14 additions & 0 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import nb_log
import time

logger = nb_log.get_logger('dsdsd',log_filename='dsdsd.log',is_add_stream_handler=False)


t1 = time.perf_counter()
for i in range(100 * 10000):
logger.debug('heloo'*10)
print(time.perf_counter()-t1)


# nb_log的 ConcurrentRotatingFileHandlerWithBufferInitiativeWindow windwos 单进程写入100万条 115秒
# linux 58秒。

0 comments on commit 23b8645

Please sign in to comment.