Commit b6a2f1b6 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

PEP-8 editing. Added style checkers.

parent a5f41a39
Pipeline #1207 passed with stage
in 8 minutes and 1 second
before_script: before_script:
- git lfs pull - git lfs pull
stages:
- test
- deploy
test_gms_preprocessing: test_gms_preprocessing:
stage: test
script: script:
- source /root/anaconda3/bin/activate - source /root/anaconda3/bin/activate
- export GDAL_DATA=/root/anaconda3/share/gdal - export GDAL_DATA=/root/anaconda3/share/gdal
...@@ -17,7 +24,24 @@ test_gms_preprocessing: ...@@ -17,7 +24,24 @@ test_gms_preprocessing:
- nosetests.xml - nosetests.xml
when: always when: always
pages:
test_styles:
stage: test
script:
- source /root/anaconda3/bin/activate
- export GDAL_DATA=/root/anaconda3/share/gdal
- export PYTHONPATH=$PYTHONPATH:/root # /root <- directory needed later
- pip install flake8 pycodestyle pylint pydocstyle # TODO remove as soon as docker container is rebuilt
- make lint
artifacts:
paths:
- tests/linting/flake8.log
- tests/linting/pycodestyle.log
- tests/linting/pydocstyle.log
when: always
deploy_pages:
stage: deploy stage: deploy
dependencies: dependencies:
- test_gms_preprocessing - test_gms_preprocessing
...@@ -28,7 +52,6 @@ pages: ...@@ -28,7 +52,6 @@ pages:
- cp nosetests.* public/nosetests_reports/ - cp nosetests.* public/nosetests_reports/
- mkdir -p public/doc - mkdir -p public/doc
- cp -r docs/_build/html/* public/doc/ - cp -r docs/_build/html/* public/doc/
artifacts: artifacts:
paths: paths:
- public - public
......
...@@ -50,7 +50,9 @@ clean-test: ## remove test and coverage artifacts ...@@ -50,7 +50,9 @@ clean-test: ## remove test and coverage artifacts
rm -fr nosetests.xml rm -fr nosetests.xml
lint: ## check style with flake8 lint: ## check style with flake8
flake8 gms_preprocessing tests flake8 --max-line-length=120 gms_preprocessing tests > ./tests/linting/flake8.log
pycodestyle gms_preprocessing --exclude="*.ipynb,*.ipynb*,envifilehandling.py" --max-line-length=120 > ./tests/linting/pycodestyle.log
-pydocstyle gms_preprocessing > ./tests/linting/pydocstyle.log
test: ## run tests quickly with the default Python test: ## run tests quickly with the default Python
python setup.py test python setup.py test
......
...@@ -4,12 +4,12 @@ import os ...@@ -4,12 +4,12 @@ import os
if 'MPLBACKEND' not in os.environ: if 'MPLBACKEND' not in os.environ:
os.environ['MPLBACKEND'] = 'Agg' os.environ['MPLBACKEND'] = 'Agg'
from . import algorithms from . import algorithms # noqa: E402
from . import io from . import io # noqa: E402
from . import misc from . import misc # noqa: E402
from . import processing from . import processing # noqa: E402
from . import config from . import config # noqa: E402
from .processing.process_controller import process_controller from .processing.process_controller import process_controller # noqa: E402
__author__ = """Daniel Scheffler""" __author__ = """Daniel Scheffler"""
__email__ = 'daniel.scheffler@gfz-potsdam.de' __email__ = 'daniel.scheffler@gfz-potsdam.de'
......
...@@ -416,12 +416,13 @@ class L1A_object(GMS_object): ...@@ -416,12 +416,13 @@ class L1A_object(GMS_object):
if ds: if ds:
sds_md = ds.GetMetadata('SUBDATASETS') sds_md = ds.GetMetadata('SUBDATASETS')
data_arr = None
for bidx, b in enumerate(self.LayerBandsAssignment): for bidx, b in enumerate(self.LayerBandsAssignment):
sds_name = [i for i in sds_md.values() if '%s_Band%s:ImageData' % (subsystem_identifier, b) in str(i) or sds_name = [i for i in sds_md.values() if '%s_Band%s:ImageData' % (subsystem_identifier, b) in str(i) or
'%s_Swath:ImageData%s' % (subsystem_identifier, b) in str(i)][0] '%s_Swath:ImageData%s' % (subsystem_identifier, b) in str(i)][0]
data = gdalnumeric.LoadFile(sds_name) data = gdalnumeric.LoadFile(sds_name)
data_arr = np.empty(data.shape + (len(self.LayerBandsAssignment),), if bidx == 0:
data.dtype) if bidx == 0 else data_arr data_arr = np.empty(data.shape + (len(self.LayerBandsAssignment),), data.dtype)
data_arr[:, :, bidx] = data data_arr[:, :, bidx] = data
if CFG.job.exec_mode == 'Flink' and path_output is None: # numpy array output if CFG.job.exec_mode == 'Flink' and path_output is None: # numpy array output
...@@ -442,14 +443,16 @@ class L1A_object(GMS_object): ...@@ -442,14 +443,16 @@ class L1A_object(GMS_object):
if subsystem_identifier in str(ds.dimensions()) and 'ImagePixel' in str(ds.dimensions()): if subsystem_identifier in str(ds.dimensions()) and 'ImagePixel' in str(ds.dimensions()):
list_matching_dsIdx.append(i) list_matching_dsIdx.append(i)
i += 1 i += 1
except: except Exception:
break break
list_matching_dsIdx = list_matching_dsIdx[:3] if self.subsystem == 'VNIR1' else \ list_matching_dsIdx = list_matching_dsIdx[:3] if self.subsystem == 'VNIR1' else \
[list_matching_dsIdx[-1]] if self.subsystem == 'VNIR2' else list_matching_dsIdx [list_matching_dsIdx[-1]] if self.subsystem == 'VNIR2' else list_matching_dsIdx
data_arr = None
for i, dsIdx in enumerate(list_matching_dsIdx): for i, dsIdx in enumerate(list_matching_dsIdx):
data = hdfFile.select(dsIdx)[:] data = hdfFile.select(dsIdx)[:]
data_arr = np.empty(data.shape + (len(self.LayerBandsAssignment),), data.dtype) if i == 0 else data_arr if i == 0:
data_arr = np.empty(data.shape + (len(self.LayerBandsAssignment),), data.dtype)
data_arr[:, :, i] = data data_arr[:, :, i] = data
if CFG.job.exec_mode == 'Flink' and path_output is None: # numpy array output if CFG.job.exec_mode == 'Flink' and path_output is None: # numpy array output
...@@ -462,7 +465,7 @@ class L1A_object(GMS_object): ...@@ -462,7 +465,7 @@ class L1A_object(GMS_object):
self.logger.error('Missing HDF4 support. Reading HDF file failed.') self.logger.error('Missing HDF4 support. Reading HDF file failed.')
raise ImportError('No suitable library for reading HDF4 data available.') raise ImportError('No suitable library for reading HDF4 data available.')
ds = None del ds
def import_metadata(self, v=False): def import_metadata(self, v=False):
"""Reads metainformation of the given file from the given ASCII metafile. """Reads metainformation of the given file from the given ASCII metafile.
...@@ -570,7 +573,7 @@ class L1A_object(GMS_object): ...@@ -570,7 +573,7 @@ class L1A_object(GMS_object):
if conv == 'Rad': if conv == 'Rad':
"""http://s2tbx.telespazio-vega.de/sen2three/html/r2rusage.html?highlight=quantification182 """http://s2tbx.telespazio-vega.de/sen2three/html/r2rusage.html?highlight=quantification182
rToa = (float)(DN_L1C_band / QUANTIFICATION_VALUE); rToa = (float)(DN_L1C_band / QUANTIFICATION_VALUE);
L = (rToa * e0__SOLAR_IRRADIANCE_For_band * cos(Z__Sun_Angles_Grid_Zenith_Values)) / L = (rToa * e0__SOLAR_IRRADIANCE_For_band * cos(Z__Sun_Angles_Grid_Zenith_Values)) /
(PI * U__earth_sun_distance_correction_factor); (PI * U__earth_sun_distance_correction_factor);
L = (U__earth_sun_distance_correction_factor * rToa * e0__SOLAR_IRRADIANCE_For_band * cos( L = (U__earth_sun_distance_correction_factor * rToa * e0__SOLAR_IRRADIANCE_For_band * cos(
Z__Sun_Angles_Grid_Zenith_Values)) / PI;""" Z__Sun_Angles_Grid_Zenith_Values)) / PI;"""
...@@ -650,7 +653,7 @@ class L1A_object(GMS_object): ...@@ -650,7 +653,7 @@ class L1A_object(GMS_object):
if False in [self.GeoAlign_ok, self.GeoTransProj_ok]: if False in [self.GeoAlign_ok, self.GeoTransProj_ok]:
previous_dataname = self.MetaObj.Dataname previous_dataname = self.MetaObj.Dataname
if hasattr(self, 'arr') and isinstance(self.arr, (GeoArray, np.ndarray)) and \ if hasattr(self, 'arr') and isinstance(self.arr, (GeoArray, np.ndarray)) and \
self.MetaObj.Dataname.startswith('/vsi'): self.MetaObj.Dataname.startswith('/vsi'):
outP = os.path.join(self.ExtractedFolder, self.baseN + '__' + self.arr_desc) outP = os.path.join(self.ExtractedFolder, self.baseN + '__' + self.arr_desc)
# FIXME ineffective but needed as long as georeference_by_TieP_or_inherent_GCPs does not support # FIXME ineffective but needed as long as georeference_by_TieP_or_inherent_GCPs does not support
# FIXME direct array inputs # FIXME direct array inputs
...@@ -721,21 +724,26 @@ class L1A_object(GMS_object): ...@@ -721,21 +724,26 @@ class L1A_object(GMS_object):
mask_clouds = None # FIXME mask_clouds = None # FIXME
else: else:
# FIXME Landsat cloud mask pixel values are currently not compatible to definition_dicts.get_mask_classdefinition # FIXME Landsat cloud mask pixel values are currently not compatible to
# append /<GeoMultiSensRepo>/algorithms to PATH in order to properly import py_tools_ah when unpickling cloud classifiers # FIXME definition_dicts.get_mask_classdefinition
# append /<GeoMultiSensRepo>/algorithms to PATH in order to properly import py_tools_ah when unpickling
# cloud classifiers
sys.path.append( sys.path.append(
os.path.join(os.path.dirname(__file__))) # FIXME handle py_tools_ah as normal external dependency os.path.join(os.path.dirname(__file__))) # FIXME handle py_tools_ah as normal external dependency
# in_mem = hasattr(self, 'arr') and isinstance(self.arr, np.ndarray) # in_mem = hasattr(self, 'arr') and isinstance(self.arr, np.ndarray)
# if in_mem: # if in_mem:
# (rS, rE), (cS, cE) = self.arr_pos if self.arr_pos else ((0,self.shape_fullArr[0]),(0,self.shape_fullArr[1])) # (rS, rE), (cS, cE) = \
# bands = self.arr.shape[2] if len(self.arr.shape) == 3 else 1 # self.arr_pos if self.arr_pos else ((0,self.shape_fullArr[0]),(0,self.shape_fullArr[1]))
# bands = self.arr.shape[2] if len(self.arr.shape) == 3 else 1
# else: # else:
# subset = subset if subset else ['block', self.arr_pos] if self.arr_pos else ['cube', None] # subset = subset if subset else ['block', self.arr_pos] if self.arr_pos else ['cube', None]
# bands, rS, rE, cS, cE = list(GEOP.get_subsetProps_from_subsetArg(self.shape_fullArr, subset).values())[2:7] # bands, rS, rE, cS, cE = \
# list(GEOP.get_subsetProps_from_subsetArg(self.shape_fullArr, subset).values())[2:7]
# arr_isPath = isinstance(self.arr, str) and os.path.isfile(self.arr) # FIXME # arr_isPath = isinstance(self.arr, str) and os.path.isfile(self.arr) # FIXME
# inPath = self.arr if arr_isPath else self.MetaObj.Dataname if \ # # FIXME ersetzen durch path generator?:
# (hasattr(self,'MetaObj') and self.MetaObj) else self.meta_odict['Dataname'] # FIXME ersetzen durch path generator? # inPath = self.arr if arr_isPath else self.MetaObj.Dataname if \
# (hasattr(self,'MetaObj') and self.MetaObj) else self.meta_odict['Dataname']
if not self.path_cloud_class_obj or self.satellite == 'Sentinel-2A': # FIXME dont exclude S2 here if not self.path_cloud_class_obj or self.satellite == 'Sentinel-2A': # FIXME dont exclude S2 here
self.log_for_fullArr_or_firstTile('Cloud masking is not yet implemented for %s %s...' self.log_for_fullArr_or_firstTile('Cloud masking is not yet implemented for %s %s...'
...@@ -754,9 +762,10 @@ class L1A_object(GMS_object): ...@@ -754,9 +762,10 @@ class L1A_object(GMS_object):
# logger.info("Cloud mask missing -> derive own cloud mask.") # logger.info("Cloud mask missing -> derive own cloud mask.")
# CldMsk = CloudMask(logger=logger, persistence_file=options["cld_mask"]["persistence_file"], # CldMsk = CloudMask(logger=logger, persistence_file=options["cld_mask"]["persistence_file"],
# processing_tiles=options["cld_mask"]["processing_tiles"]) # processing_tiles=options["cld_mask"]["processing_tiles"])
# s2img.mask_clouds = CldMsk(S2_img=s2img, target_resolution=options["cld_mask"]["target_resolution"], # s2img.mask_clouds = \
# majority_filter_options=options["cld_mask"]["majority_mask_filter"], # CldMsk(S2_img=s2img, target_resolution=options["cld_mask"]["target_resolution"],
# nodata_value=options["cld_mask"]['nodata_value_mask']) # majority_filter_options=options["cld_mask"]["majority_mask_filter"],
# nodata_value=options["cld_mask"]['nodata_value_mask'])
# del CldMsk # del CldMsk
self.GMS_identifier['logger'] = self.logger self.GMS_identifier['logger'] = self.logger
...@@ -788,21 +797,21 @@ class L1A_object(GMS_object): ...@@ -788,21 +797,21 @@ class L1A_object(GMS_object):
for i, class_path in zip(range(0, 2 * len(pathlist_cloud_class_obj), 2), pathlist_cloud_class_obj): for i, class_path in zip(range(0, 2 * len(pathlist_cloud_class_obj), 2), pathlist_cloud_class_obj):
categories_timinggroup_timing[i:i + 1, 0] = os.path.splitext(os.path.basename(class_path))[0] categories_timinggroup_timing[i:i + 1, 0] = os.path.splitext(os.path.basename(class_path))[0]
t1 = time.time() t1 = time.time()
CLD_obj = CLD_P.GmsCloudClassifier(classifier=class_path) # CLD_obj = CLD_P.GmsCloudClassifier(classifier=class_path)
categories_timinggroup_timing[i, 1] = "import time" categories_timinggroup_timing[i, 1] = "import time"
categories_timinggroup_timing[i, 2] = time.time() - t1 categories_timinggroup_timing[i, 2] = time.time() - t1
t2 = time.time() t2 = time.time()
mask_clouds = CLD_obj(self) # mask_clouds = CLD_obj(self)
categories_timinggroup_timing[i + 1, 1] = "processing time" categories_timinggroup_timing[i + 1, 1] = "processing time"
categories_timinggroup_timing[i + 1, 2] = time.time() - t2 categories_timinggroup_timing[i + 1, 2] = time.time() - t2
classifiers = np.unique(categories_timinggroup_timing[:, 0]) # classifiers = np.unique(categories_timinggroup_timing[:, 0])
categories = np.unique(categories_timinggroup_timing[:, 1]) # categories = np.unique(categories_timinggroup_timing[:, 1])
plt.ioff() plt.ioff()
fig = plt.figure() fig = plt.figure()
ax = fig.add_subplot(111) # ax = fig.add_subplot(111)
space = 0.3 # space = 0.3
n = len(classifiers) # n = len(classifiers)
width = (1 - space) / (len(classifiers)) # width = (1 - space) / (len(classifiers))
# for i,classif in enumerate(classifiers): # FIXME # for i,classif in enumerate(classifiers): # FIXME
# vals = dpoints[dpoints[:,0] == cond][:,2].astype(np.float) # vals = dpoints[dpoints[:,0] == cond][:,2].astype(np.float)
# pos = [j - (1 - space) / 2. + i * width for j in range(1,len(categories)+1)] # pos = [j - (1 - space) / 2. + i * width for j in range(1,len(categories)+1)]
...@@ -951,7 +960,8 @@ class L1A_object(GMS_object): ...@@ -951,7 +960,8 @@ class L1A_object(GMS_object):
self.arr.gt = mapinfo2geotransform(self.MetaObj.map_info) self.arr.gt = mapinfo2geotransform(self.MetaObj.map_info)
self.arr.prj = self.MetaObj.projection self.arr.prj = self.MetaObj.projection
self.mask_nodata.gt = self.arr.gt # must be set here because nodata mask has been computed from self.arr without geoinfos # must be set here because nodata mask has been computed from self.arr without geoinfos:
self.mask_nodata.gt = self.arr.gt
self.mask_nodata.prj = self.arr.prj self.mask_nodata.prj = self.arr.prj
def update_spec_vals_according_to_dtype(self, dtype=None): def update_spec_vals_according_to_dtype(self, dtype=None):
......
...@@ -208,7 +208,7 @@ class Scene_finder(object): ...@@ -208,7 +208,7 @@ class Scene_finder(object):
self.plusminus_years = plusminus_years self.plusminus_years = plusminus_years
# get temporal constraints # get temporal constraints
add_years = lambda dt, years: dt.replace(dt.year + years) \ def add_years(dt, years): return dt.replace(dt.year + years) \
if not (dt.month == 2 and dt.day == 29) else dt.replace(dt.year + years, 3, 1) if not (dt.month == 2 and dt.day == 29) else dt.replace(dt.year + years, 3, 1)
self.timeStart = add_years(self.src_AcqDate, -plusminus_years) self.timeStart = add_years(self.src_AcqDate, -plusminus_years)
timeEnd = add_years(self.src_AcqDate, +plusminus_years) timeEnd = add_years(self.src_AcqDate, +plusminus_years)
...@@ -242,7 +242,9 @@ class Scene_finder(object): ...@@ -242,7 +242,9 @@ class Scene_finder(object):
GDF['acquisitiondate'] = list(GDF['object'].map(lambda scene: scene.acquisitiondate)) GDF['acquisitiondate'] = list(GDF['object'].map(lambda scene: scene.acquisitiondate))
GDF['cloudcover'] = list(GDF['object'].map(lambda scene: scene.cloudcover)) GDF['cloudcover'] = list(GDF['object'].map(lambda scene: scene.cloudcover))
GDF['polyLonLat'] = list(GDF['object'].map(lambda scene: scene.polyLonLat)) GDF['polyLonLat'] = list(GDF['object'].map(lambda scene: scene.polyLonLat))
LonLat2UTM = lambda polyLL: reproject_shapelyGeometry(polyLL, 4326, self.src_prj)
def LonLat2UTM(polyLL): return reproject_shapelyGeometry(polyLL, 4326, self.src_prj)
GDF['polyUTM'] = list(GDF['polyLonLat'].map(LonLat2UTM)) GDF['polyUTM'] = list(GDF['polyLonLat'].map(LonLat2UTM))
self.GDF_ref_scenes = GDF self.GDF_ref_scenes = GDF
...@@ -271,7 +273,7 @@ class Scene_finder(object): ...@@ -271,7 +273,7 @@ class Scene_finder(object):
GDF = self.GDF_ref_scenes GDF = self.GDF_ref_scenes
if not GDF.empty: if not GDF.empty:
# get overlap parameter # get overlap parameter
get_OL_prms = lambda poly: get_overlap_polygon(poly, self.src_footprint_poly) def get_OL_prms(poly): return get_overlap_polygon(poly, self.src_footprint_poly)
GDF['overlapParams'] = list(GDF['polyLonLat'].map(get_OL_prms)) GDF['overlapParams'] = list(GDF['polyLonLat'].map(get_OL_prms))
GDF['overlap area'] = list(GDF['overlapParams'].map(lambda OL_prms: OL_prms['overlap area'])) GDF['overlap area'] = list(GDF['overlapParams'].map(lambda OL_prms: OL_prms['overlap area']))
GDF['overlap percentage'] = list(GDF['overlapParams'].map(lambda OL_prms: OL_prms['overlap percentage'])) GDF['overlap percentage'] = list(GDF['overlapParams'].map(lambda OL_prms: OL_prms['overlap percentage']))
...@@ -285,9 +287,9 @@ class Scene_finder(object): ...@@ -285,9 +287,9 @@ class Scene_finder(object):
GDF = self.GDF_ref_scenes GDF = self.GDF_ref_scenes
if not GDF.empty: if not GDF.empty:
# get processing level of refernce scenes # get processing level of refernce scenes
query_procL = lambda sceneID: \ def query_procL(sceneID):
DB_T.get_info_from_postgreSQLdb(CFG.job.conn_database, 'scenes_proc', ['proc_level'], return DB_T.get_info_from_postgreSQLdb(CFG.job.conn_database, 'scenes_proc', ['proc_level'],
{'sceneid': sceneID}) {'sceneid': sceneID})
GDF['temp_queryRes'] = list(GDF['sceneid'].map(query_procL)) GDF['temp_queryRes'] = list(GDF['sceneid'].map(query_procL))
GDF['proc_level'] = list(GDF['temp_queryRes'].map(lambda queryRes: queryRes[0][0] if queryRes else None)) GDF['proc_level'] = list(GDF['temp_queryRes'].map(lambda queryRes: queryRes[0][0] if queryRes else None))
GDF.drop('temp_queryRes', axis=1, inplace=True) GDF.drop('temp_queryRes', axis=1, inplace=True)
...@@ -300,40 +302,46 @@ class Scene_finder(object): ...@@ -300,40 +302,46 @@ class Scene_finder(object):
if not GDF.empty: if not GDF.empty:
# get path of binary file and check if the corresponding dataset exists # get path of binary file and check if the corresponding dataset exists
GDF = self.GDF_ref_scenes GDF = self.GDF_ref_scenes
get_path_binary = lambda GDF_row: \
PG.path_generator(scene_ID=GDF_row['sceneid'], proc_level=GDF_row['proc_level']).get_path_imagedata() def get_path_binary(GDF_row):
check_exists = lambda path: os.path.exists(path) return PG.path_generator(scene_ID=GDF_row['sceneid'], proc_level=GDF_row['proc_level'])\
.get_path_imagedata()
def check_exists(path): return os.path.exists(path)
GDF['path_ref'] = GDF.apply(lambda GDF_row: get_path_binary(GDF_row), axis=1) GDF['path_ref'] = GDF.apply(lambda GDF_row: get_path_binary(GDF_row), axis=1)
GDF['refDs_exists'] = list(GDF['path_ref'].map(check_exists)) GDF['refDs_exists'] = list(GDF['path_ref'].map(check_exists))
# filter scenes out where the corresponding dataset does not exist on fileserver # filter scenes out where the corresponding dataset does not exist on fileserver
self.GDF_ref_scenes = GDF[GDF['refDs_exists'] == True] self.GDF_ref_scenes = GDF[GDF['refDs_exists']]
def _filter_by_entity_ID_availability(self): def _filter_by_entity_ID_availability(self):
GDF = self.GDF_ref_scenes GDF = self.GDF_ref_scenes
if not GDF.empty: if not GDF.empty:
# check if a proper entity ID can be gathered from database # check if a proper entity ID can be gathered from database
query_eID = lambda sceneID: DB_T.get_info_from_postgreSQLdb(CFG.job.conn_database, 'scenes', ['entityid'], def query_eID(sceneID):
{'id': sceneID}, records2fetch=1) return DB_T.get_info_from_postgreSQLdb(CFG.job.conn_database, 'scenes', ['entityid'],
{'id': sceneID}, records2fetch=1)
GDF['temp_queryRes'] = list(GDF['sceneid'].map(query_eID)) GDF['temp_queryRes'] = list(GDF['sceneid'].map(query_eID))
GDF['entityid'] = list(GDF['temp_queryRes'].map(lambda queryRes: queryRes[0][0] if queryRes else None)) GDF['entityid'] = list(GDF['temp_queryRes'].map(lambda queryRes: queryRes[0][0] if queryRes else None))
GDF.drop('temp_queryRes', axis=1, inplace=True) GDF.drop('temp_queryRes', axis=1, inplace=True)
# filter scenes out that have no entity ID (database errors) # filter scenes out that have no entity ID (database errors)
self.GDF_ref_scenes = GDF[GDF['refDs_exists'] == True] self.GDF_ref_scenes = GDF[GDF['refDs_exists']]
def _filter_by_projection(self): def _filter_by_projection(self):
GDF = self.GDF_ref_scenes GDF = self.GDF_ref_scenes
if not GDF.empty: if not GDF.empty:
# compare projections of target and reference image # compare projections of target and reference image
from ..io.Input_reader import read_ENVIhdr_to_dict from ..io.Input_reader import read_ENVIhdr_to_dict
get_prj = lambda path_binary: \
read_ENVIhdr_to_dict(os.path.splitext(path_binary)[0] + '.hdr')['coordinate system string'] def get_prj(path_binary):
is_prj_equal = lambda path_binary: prj_equal(self.src_prj, get_prj(path_binary)) return read_ENVIhdr_to_dict(os.path.splitext(path_binary)[0] + '.hdr')['coordinate system string']
def is_prj_equal(path_binary): return prj_equal(self.src_prj, get_prj(path_binary))
GDF['prj_equal'] = list(GDF['path_ref'].map(is_prj_equal)) GDF['prj_equal'] = list(GDF['path_ref'].map(is_prj_equal))
# filter scenes out that have a different projection # filter scenes out that have a different projection
self.GDF_ref_scenes = GDF[GDF['prj_equal'] == True] self.GDF_ref_scenes = GDF[GDF['prj_equal']]
class ref_Scene: class ref_Scene:
...@@ -431,13 +439,14 @@ class L1B_object(L1A_object): ...@@ -431,13 +439,14 @@ class L1B_object(L1A_object):
% (date_minmax[0].month, date_minmax[0].day, date_minmax[1].month, date_minmax[1].day) % (date_minmax[0].month, date_minmax[0].day, date_minmax[1].month, date_minmax[1].day)
# TODO weitere Kriterien einbauen! # TODO weitere Kriterien einbauen!
query_scenes = lambda condlist: DB_T.get_overlapping_scenes_from_postgreSQLdb( def query_scenes(condlist):
CFG.job.conn_database, return DB_T.get_overlapping_scenes_from_postgreSQLdb(
table='scenes', CFG.job.conn_database,
tgt_corners_lonlat=self.trueDataCornerLonLat, table='scenes',
conditions=condlist, tgt_corners_lonlat=self.trueDataCornerLonLat,
add_cmds='ORDER BY scenes.cloudcover ASC', conditions=condlist,
timeout=30000) add_cmds='ORDER BY scenes.cloudcover ASC',
timeout=30000)
conds_descImportance = [dataset_cond, cloudcov_cond, dayrange_cond] conds_descImportance = [dataset_cond, cloudcov_cond, dayrange_cond]
self.logger.info('Querying database in order to find a suitable reference scene for co-registration.') self.logger.info('Querying database in order to find a suitable reference scene for co-registration.')
...@@ -491,7 +500,7 @@ class L1B_object(L1A_object): ...@@ -491,7 +500,7 @@ class L1B_object(L1A_object):
break break
# start download of scene data not available and start L1A processing # start download of scene data not available and start L1A processing
dl_cmd = lambda scene_ID: print('%s %s %s' % ( def dl_cmd(scene_ID): print('%s %s %s' % (
CFG.job.java_commands['keyword'].strip(), # FIXME CFG.job.java_commands is deprecated CFG.job.java_commands['keyword'].strip(), # FIXME CFG.job.java_commands is deprecated
CFG.job.java_commands["value_download"].strip(), scene_ID)) CFG.job.java_commands["value_download"].strip(), scene_ID))
...@@ -605,7 +614,9 @@ class L1B_object(L1A_object): ...@@ -605,7 +614,9 @@ class L1B_object(L1A_object):
for idx, cwl, fwhm in zip(range(len(shift_cwl)), shift_cwl, shift_fwhm): for idx, cwl, fwhm in zip(range(len(shift_cwl)), shift_cwl, shift_fwhm):
if shift_bbl[idx]: if shift_bbl[idx]:
continue # skip cwl if it is declared as bad band above continue # skip cwl if it is declared as bad band above
is_inside = lambda r_cwl, s_cwl, s_fwhm: s_cwl - s_fwhm / 2 < r_cwl < s_cwl + s_fwhm / 2
def is_inside(r_cwl, s_cwl, s_fwhm): return s_cwl - s_fwhm / 2 < r_cwl < s_cwl + s_fwhm / 2
matching_r_cwls = [r_cwl for i, r_cwl in enumerate(ref_cwl) if matching_r_cwls = [r_cwl for i, r_cwl in enumerate(ref_cwl) if
is_inside(r_cwl, cwl, fwhm) and not ref_bbl[i]] is_inside(r_cwl, cwl, fwhm) and not ref_bbl[i]]
if matching_r_cwls: if matching_r_cwls:
......
...@@ -6,15 +6,10 @@ import re ...@@ -6,15 +6,10 @@ import re
import logging import logging
import dill import dill
import traceback import traceback
from typing import List from typing import List, TypeVar
import numpy as np import numpy as np
try:
from osgeo import osr
except ImportError:
import osr
from geoarray import GeoArray from geoarray import GeoArray
from py_tools_ds.geo.map_info import mapinfo2geotransform from py_tools_ds.geo.map_info import mapinfo2geotransform
...@@ -165,7 +160,8 @@ class L1C_object(L1B_object): ...@@ -165,7 +160,8 @@ class L1C_object(L1B_object):
:return: :return:
""" """
if self._SAA_arr is None: if self._SAA_arr is None:
_ = self.SZA_arr # getter also sets self._SAA_arr # noinspection PyStatementEffect
self.SZA_arr # getter also sets self._SAA_arr
return self._SAA_arr return self._SAA_arr
@SAA_arr.setter @SAA_arr.setter
...@@ -201,6 +197,9 @@ class L1C_object(L1B_object): ...@@ -201,6 +197,9 @@ class L1C_object(L1B_object):
del self.dem del self.dem
_T_list_L1Cobjs = TypeVar(List[L1C_object])
class AtmCorr(object): class AtmCorr(object):
def __init__(self, *L1C_objs, reporting=False): def __init__(self, *L1C_objs, reporting=False):
"""Wrapper around atmospheric correction by Andre Hollstein, GFZ Potsdam """Wrapper around atmospheric correction by Andre Hollstein, GFZ Potsdam
...@@ -227,7 +226,7 @@ class AtmCorr(object): ...@@ -227,7 +226,7 @@ class AtmCorr(object):
assert len(list(set(scene_IDs))) == 1, \ assert len(list(set(scene_IDs))) == 1, \
"Input GMS objects for 'AtmCorr' must all belong to the same scene ID!. Received %s." % scene_IDs "Input GMS objects for 'AtmCorr' must all belong to the same scene ID!. Received %s." % scene_IDs
self.inObjs = L1C_objs # type: List[L1C_object] self.inObjs = L1C_objs # type: _T_list_L1Cobjs
self.reporting = reporting self.reporting = reporting
self.ac_input = {} # set by self.run_atmospheric_correction() self.ac_input = {} # set by self.run_atmospheric_correction()
self.results = None # direct output of external atmCorr module (set by run_atmospheric_correction) self.results = None # direct output of external atmCorr module (set by run_atmospheric_correction)
...@@ -850,7 +849,7 @@ class AtmCorr(object): ...@@ -850,7 +849,7 @@ class AtmCorr(object):
# FIXME really set AC nodata values to GMS outZero? # FIXME really set AC nodata values to GMS outZero?
surf_refl[nodata] = oZ_refl # overwrite AC nodata values with GMS outZero surf_refl[nodata] = oZ_refl # overwrite AC nodata values with GMS outZero
# apply the original nodata mask (indicating background values) # apply the original nodata mask (indicating background values)
surf_refl[np.array(inObj.mask_nodata) == False] = oF_refl surf_refl[np.array(inObj.mask_nodata).astype(np.int8) == 0] = oF_refl
if self.results.bad_data_value is np.nan: if self.results.bad_data_value is np.nan:
surf_refl[