Commit 82572e78 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

logs are now properly split into stdout and stderr messages; log level is now...

logs are now properly split into stdout and stderr messages; log level is now part of config parameters
misc.logging:
- GMS_logger:
    - added keyword 'log_level'
    - added separate StreamLogger for stderr
    - log level is now written to log files
- added class LessThanFilter
config.Job:
- added keyword 'log_level'
updated __version__
Former-commit-id: 65a3dbc3
Former-commit-id: 8eb90f49
parent 9ebb5f84
......@@ -15,7 +15,7 @@ from . import config
from .processing.process_controller import process_controller
__version__ = '20170411.02'
__version__ = '20170411.03'
__author__ = 'Daniel Scheffler'
__all__ = ['algorithms',
'io',
......
......@@ -678,8 +678,8 @@ class L1B_object(L1A_object):
%(self.scene_ID,self.entity_ID,err)) for err in COREG_obj.tracked_errors]
else:
if self.coreg_needed:
self.logger.info('Coregistration skipped because no suitable reference scene is available or '
'spatial query failed.')
self.logger.warning('Coregistration skipped because no suitable reference scene is available or '
'spatial query failed.')
else:
self.logger.info('Coregistration of scene %s (entity ID %s) skipped because target dataset ID equals '
'reference dataset ID.' %(self.scene_ID, self.entity_ID))
......
......@@ -199,7 +199,8 @@ class GMS_object(object):
if self._logger and self._logger.handlers[:]:
return self._logger
else:
self._logger = GMS_logger('log__' + self.baseN, self.scene_ID, self.path_logfile, append=True)
self._logger = GMS_logger('log__' + self.baseN, fmt_suffix=self.scene_ID, path_logfile=self.path_logfile,
log_level=CFG.job.log_level, append=True)
return self._logger
......
......@@ -61,7 +61,8 @@ GMS_config = GMS_configuration()
class Job:
def __init__(self, call_type, ID, exec_mode='Python', db_host='localhost', exec_L1AP=None, exec_L1BP=None,
exec_L1CP=None, exec_L2AP=None, exec_L2BP=None, exec_L2CP=None, CPUs=None, sub_multiProc=True,
exc_handler=True, blocksize=(2048,2048), profiling=False, bench_all=False, bench_cloudMask=False):
exc_handler=True, log_level='INFO', blocksize=(2048,2048), profiling=False, bench_all=False,
bench_cloudMask=False):
"""Create a job configuration
......@@ -79,6 +80,8 @@ class Job:
:param CPUs: number of CPU cores to be used for processing (default: None -> use all available)
:param sub_multiProc: allow multiprocessing within workers
:param exc_handler: enable/disable automatic handling of unexpected exceptions (default: True -> enabled)
:param log_level: the logging level to be used (choices: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL';
default: 'INFO')
:param blocksize: X/Y block size to be used for any tiling process (default: (2048,2048)
:param profiling: enable/disable code profiling (default: False)
:param bench_all: enable/disable benchmark of the whole processing pipeline
......@@ -106,7 +109,7 @@ class Job:
self.CPUs = CPUs if CPUs else multiprocessing.cpu_count()
self.allow_subMultiprocessing = sub_multiProc
self.disable_exception_handler = exc_handler is False
self.log_level = 'INFO' # TODO implement log level
self.log_level = log_level
self.tiling_block_size_XY = blocksize
self.profiling = profiling
self.benchmark_global = bench_all
......
......@@ -292,7 +292,8 @@ class SRF(object):
def pickle_SRF_DB(L1A_Instances):
list_GMS_identifiers = [i.GMS_identifier for i in L1A_Instances]
out_dict = collections.OrderedDict()
logger = GMS_logger('log__SRF2PKL', path_logfile=os.path.join(CFG.job.path_testing,'out/log__SRF2PKL.log'),append=False)
logger = GMS_logger('log__SRF2PKL', path_logfile=os.path.join(CFG.job.path_testing,'out/log__SRF2PKL.log'),
log_level=CFG.job.log_level, append=False)
for Id,Inst in zip(list_GMS_identifiers,L1A_Instances):
Id['logger'] = logger
out_dict[Inst.satellite+'_'+Inst.sensor+(('_'+Inst.subsystem) if Inst.subsystem not in ['',None] else '')] \
......
......@@ -13,17 +13,18 @@ except ImportError:
class GMS_logger(logging.Logger):
def __init__(self, name_logfile, fmt_suffix=None, path_logfile=None, append=True):
def __init__(self, name_logfile, fmt_suffix=None, path_logfile=None, log_level='INFO', append=True):
# type: (str, int, str, bool) -> None
"""Returns a logging.logger instance pointing to the given logfile path.
:param name_logfile:
:param fmt_suffix: if given, it will be included into log formatter
:param path_logfile: if no path is given, only a StreamHandler is created
:param log_level: the logging level to be used (choices: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL';
default: 'INFO')
:param append: <bool> whether to append the log message to an existing logfile (1)
or to create a new logfile (0); default=1
"""
# TODO redirect different log levels to stdout and stderr
# TODO add log level keyword
# private attributes
self._captured_stream = ''
......@@ -31,6 +32,8 @@ class GMS_logger(logging.Logger):
self.path_logfile = path_logfile
self.formatter_fileH = logging.Formatter('%(asctime)s' + (' [%s]' % fmt_suffix if fmt_suffix else '') +
' %(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
self.formatter_ConsoleH = logging.Formatter('%(asctime)s' + (' [%s]' % fmt_suffix if fmt_suffix else '') +
': %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
if path_logfile:
......@@ -47,28 +50,38 @@ class GMS_logger(logging.Logger):
# create FileHandler
fileHandler = logging.FileHandler(path_logfile, mode='a' if append else 'w')
fileHandler.setFormatter(self.formatter_fileH)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setLevel(log_level)
else:
fileHandler = None
## create StreamHandler
## create StreamHandler # TODO add a StringIO handler
#self.streamObj = StringIO()
#self.streamHandler = logging.StreamHandler(stream=self.streamObj)
#self.streamHandler.setFormatter(formatter)
#self.streamHandler.set_name('StringIO handler')
# create ConsoleHandler
consoleHandler = logging.StreamHandler(stream=sys.stdout) # by default it would go to sys.stderr
consoleHandler.setFormatter(self.formatter_fileH) # could also recceive a different formatter
consoleHandler.set_name('console handler')
# create ConsoleHandler for logging levels DEGUG and INFO -> logging to sys.stdout
consoleHandler_out = logging.StreamHandler(stream=sys.stdout) # by default it would go to sys.stderr
consoleHandler_out.setFormatter(self.formatter_ConsoleH)
consoleHandler_out.set_name('console handler stdout')
consoleHandler_out.setLevel(log_level)
consoleHandler_out.addFilter(LessThanFilter(logging.WARNING))
# create ConsoleHandler for logging levels WARNING, ERROR, CRITICAL -> logging to sys.stderr
consoleHandler_err = logging.StreamHandler(stream=sys.stderr)
consoleHandler_err.setFormatter(self.formatter_ConsoleH)
consoleHandler_err.setLevel(logging.WARNING)
consoleHandler_err.set_name('console handler stderr')
self.setLevel(logging.DEBUG)
self.setLevel(log_level)
if not self.handlers:
if fileHandler:
self.addHandler(fileHandler)
#self.addHandler(self.streamHandler)
self.addHandler(consoleHandler)
self.addHandler(consoleHandler_out)
self.addHandler(consoleHandler_err)
# if append:
# logfileHandler = logging.FileHandler(path_logfile, mode='a')
......@@ -76,17 +89,17 @@ class GMS_logger(logging.Logger):
# logfileHandler = logging.FileHandler(path_logfile, mode='w')
# logfileHandler.setFormatter(formatter)
# logfileHandler.setLevel(logging.CRITICAL)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(formatter)
# consoleHandler.setLevel(logging.CRITICAL)
# consoleHandler_out = logging.StreamHandler()
# consoleHandler_out.setFormatter(formatter)
# consoleHandler_out.setLevel(logging.CRITICAL)
## logger.setLevel(logging.DEBUG)
# if CPUs == 1:
# if not logger.handlers:
# logger.addHandler(logfileHandler)
# logger.addHandler(consoleHandler)
# logger.addHandler(consoleHandler_out)
# else:
# logger.addHandler(logfileHandler)
# logger.addHandler(consoleHandler)
# logger.addHandler(consoleHandler_out)
@property
def captured_stream(self):
......@@ -147,4 +160,16 @@ def close_logger(logger):
def shutdown_loggers():
logging.shutdown()
\ No newline at end of file
logging.shutdown()
class LessThanFilter(logging.Filter):
# http://stackoverflow.com/questions/2302315/how-can-info-and-debug-logging-message-be-sent-to-stdout-and-higher-level-messag
def __init__(self, exclusive_maximum, name=""):
super(LessThanFilter, self).__init__(name)
self.max_level = exclusive_maximum
def filter(self, record):
#non-zero return means we log this message
return True if record.levelno < self.max_level else False
\ No newline at end of file
......@@ -87,8 +87,8 @@ class process_controller(object):
self.logger.info('Process Controller initialized for job ID %s (comment: %s).'
% (self.job.ID, self.DB_job_record.comment))
self.logger.info('Deleting previously processed data...')
self.DB_job_record.delete_procdata_of_entire_job(force=True) # FIXME
#self.logger.info('Deleting previously processed data...')
#self.DB_job_record.delete_procdata_of_entire_job(force=True) # TODO make this optional
@property
......@@ -96,8 +96,9 @@ class process_controller(object):
if self._logger and self._logger.handlers[:]:
return self._logger
else:
self._logger = GMS_logger('log__%s' %self.job.ID, path_logfile=os.path.join(self.job.path_job_logs,'%s.log'
% self.job.ID), append=False)
self._logger = GMS_logger('log__%s' %self.job.ID,
path_logfile=os.path.join(self.job.path_job_logs,'%s.log' % self.job.ID),
log_level=self.job.log_level, append=False)
return self._logger
......@@ -417,7 +418,8 @@ class process_controller(object):
self.logger.warning('Process controller stopped by user.')
del self.logger
shutdown_loggers()
raise KeyboardInterrupt
raise KeyboardInterrupt # terminate execution and show traceback
def benchmark(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment