forked from rwth-i6/returnn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Log.py
198 lines (174 loc) · 6.24 KB
/
Log.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
from __future__ import print_function
import logging
import os
import sys
try:
import StringIO
except ImportError:
import io as StringIO
import threading
from threading import RLock
import contextlib
import string
import time
class Stream():
def __init__(self, log, lvl):
"""
:type log: logging.Logger
:type lvl: int
"""
self.buf = StringIO.StringIO()
self.log = log
self.lvl = lvl
self.lock = RLock()
def write(self, msg):
with self.lock:
if msg == '\n':
self.flush()
else:
self.buf.write(msg)
def flush(self):
with self.lock:
self.buf.flush()
self.log.log(self.lvl, self.buf.getvalue())
self.buf.truncate(0)
# truncate does not change the current position.
# In Python 2.7, it incorrectly does. See: http://bugs.python.org/issue30250
self.buf.seek(0)
class Log:
def __init__(self):
self.initialized = False
self.filename = None
def initialize(self, logs = [], verbosity = [], formatter = []):
self.initialized = True
fmt = { 'default' : logging.Formatter('%(message)s'),
'timed' : logging.Formatter('%(asctime)s %(message)s', datefmt = '%Y-%m-%d,%H:%M:%S.%MS'),
'raw' : logging.Formatter('%(message)s'),
'verbose': logging.Formatter('%(levelname)s - %(asctime)s %(message)s', datefmt = '%Y-%m-%d,%H:%M:%S.%MS')
}
self.v = [ logging.getLogger('v' + str(v)) for v in range(6) ]
for l in self.v:
# Reset handler list, in case we have initialized some earlier (e.g. multiple log.initialize() calls).
l.handlers = []
if not 'stdout' in logs:
logs.append('stdout')
if len(formatter) == 1:
# if only one format provided, use it for all logs
formatter = [formatter[0]] * len(logs)
for i in range(len(logs)):
t = logs[i]
v = 3
if i < len(verbosity):
v = verbosity[i]
elif len(verbosity) == 1:
v = verbosity[0]
assert v <= 5, "invalid verbosity: " + str(v)
f = fmt['default'] if i >= len(formatter) or not fmt.has_key(formatter[i]) else fmt[formatter[i]]
if t == 'stdout':
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
elif t.startswith("|"): # pipe-format
proc_cmd = t[1:].strip()
from subprocess import Popen, PIPE
proc = Popen(proc_cmd, shell=True, stdin=PIPE)
handler = logging.StreamHandler(proc.stdin)
handler.setLevel(logging.DEBUG)
elif os.path.isdir(os.path.dirname(t)):
if "$" in t:
from Util import get_utc_start_time_filename_part
t = string.Template(t).substitute(date=get_utc_start_time_filename_part())
self.filename = t
handler = logging.FileHandler(t)
handler.setLevel(logging.DEBUG)
else:
assert False, "invalid log target %r" % t
handler.setFormatter(f)
for j in range(v + 1):
if not handler in self.v[j].handlers:
self.v[j].addHandler(handler)
self.verbose = [ True ] * 6
null = logging.FileHandler(os.devnull)
for i in range(len(self.v)):
self.v[i].setLevel(logging.DEBUG)
if not self.v[i].handlers:
self.verbose[i] = False
self.v[i].addHandler(null)
self.error = Stream(self.v[0], logging.CRITICAL)
self.v0 = Stream(self.v[0], logging.ERROR)
self.v1 = Stream(self.v[1], logging.INFO)
self.v2 = Stream(self.v[2], logging.INFO)
self.v3 = Stream(self.v[3], logging.DEBUG)
self.v4 = Stream(self.v[4], logging.DEBUG)
self.v5 = Stream(self.v[5], logging.DEBUG)
def init_by_config(self, config):
"""
:param Config.Config config:
"""
logs = config.list('log', [])
log_verbosity = config.int_list('log_verbosity', [])
log_format = config.list('log_format', [])
self.initialize(logs=logs, verbosity=log_verbosity, formatter=log_format)
log = Log()
# Some external code (e.g. pyzmq) will initialize the logging system
# via logging.basicConfig(). That will set a handler to the root logger,
# if there is none. This default handler usually prints to stderr.
# Because all our custom loggers are childs of the root logger,
# this will have the effect that everything gets logged twice.
# By adding a dummy handler to the root logger, we will avoid that
# it adds any other default handlers.
logging.getLogger().addHandler(logging.NullHandler())
class StreamThreadLocal(threading.local):
"""
This will just buffer everything, thread-locally, and not forward it to any stream.
The idea is that multiple tasks will run in multiple threads and you want to catch all the logging/stdout
of each to not clutter the output, and also you want to keep it separate for each.
"""
def __init__(self):
self.buf = StringIO.StringIO()
def write(self, msg):
self.buf.write(msg)
def flush(self):
pass
class StreamDummy:
"""
This will just discard any data.
"""
def write(self, msg):
pass
def flush(self):
pass
@contextlib.contextmanager
def wrap_log_streams(alternative_stream, also_sys_stdout=False, tf_log_verbosity=None):
"""
:param StreamThreadLocal|StreamDummy alternative_stream:
:param bool also_sys_stdout: wrap sys.stdout as well
:param int|str|None tf_log_verbosity: e.g. "WARNING"
:return: context manager which yields (original info stream v1, alternative_stream)
"""
v_attrib_keys = ["v%i" % i for i in range(6)] + ["error"]
# Store original values.
orig_v_list = log.v
orig_v_attribs = {key: getattr(log, key) for key in v_attrib_keys}
orig_stdout = sys.stdout
log.v = [alternative_stream] * len(orig_v_list)
for key in v_attrib_keys:
setattr(log, key, alternative_stream)
if also_sys_stdout:
sys.stdout = alternative_stream
orig_tf_log_verbosity = None
if tf_log_verbosity is not None:
import tensorflow as tf
orig_tf_log_verbosity = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf_log_verbosity)
try:
yield (orig_v_attribs["v1"], alternative_stream)
finally:
# Restore original values.
log.v = orig_v_list
for key, value in orig_v_attribs.items():
setattr(log, key, value)
if also_sys_stdout:
sys.stdout = orig_stdout
if tf_log_verbosity is not None:
import tensorflow as tf
tf.logging.set_verbosity(orig_tf_log_verbosity)