Commit 9e7bd1f6 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

Fixed issue #69 (Spatial homogenization leaves resampling artifacs at the image edges).

Converted GMS_object.from_sensor_subsystems, GMS_object.from_tiles and GMS_object.from_disk to class methods.
parent 3cb42d55
......@@ -477,7 +477,7 @@ class L1B_object(L1A_object):
"""
# get GMS_object for reference scene
path_gmsFile = PG.path_generator(scene_ID=self.spatRef_scene.scene_ID).get_path_gmsfile()
ref_obj = GMS_object().from_disk((path_gmsFile, ['cube', None]))
ref_obj = GMS_object.from_disk((path_gmsFile, ['cube', None]))
# get spectral characteristics
ref_cwl, shift_cwl = [[float(i) for i in GMS_obj.meta_odict['wavelength']] for GMS_obj in [ref_obj, self]]
......
......@@ -955,7 +955,6 @@ class AtmCorr(object):
# join SURFACE REFLECTANCE as 3D int16 array, scaled to scale factor from config #
##################################################################################
# FIXME AC output nodata values = 0 -> new nodata areas but mask not updated
oF_refl, oZ_refl, oS_refl = get_outFillZeroSaturated(inObj.arr.dtype)
surf_refl = np.dstack((self.results.data_ac[bandN] for bandN in ac_bandNs))
surf_refl *= CFG.scale_factor_BOARef # scale using scale factor (output is float16)
......@@ -1040,7 +1039,7 @@ class AtmCorr(object):
for inObj in self.inObjs:
# delete all previous cloud masks
del inObj.mask_clouds
del inObj.mask_clouds # FIXME validate if FMask product is within AC results
# append mask_clouds only to the input GMS object with the same dimensions
if inObj.arr.shape[:2] == mask_clouds_ac.shape:
......
......@@ -368,16 +368,6 @@ class Dataset(object):
"""Convert LayerbandsAssignment from format ['1','2',...] to bandnames like this: [B01, .., B8A,]."""
return ['B%s' % i if len(i) == 2 else 'B0%s' % i for i in LayerBandsAssignment]
def from_disk(self, tuple_GMS_subset):
"""Fills an already instanced GMS object with data from disk. Excludes array attributes in Python mode.
:param tuple_GMS_subset: <tuple> e.g. ('/path/gms_file.gms', ['cube', None])
"""
# TODO
return copy.copy(self)
def get_tilepos(self, target_tileshape, target_tilesize):
self.tile_pos = [[target_tileshape, tb]
for tb in get_array_tilebounds(array_shape=self.shape_fullArr, tile_shape=target_tilesize)]
......
This diff is collapsed.
......@@ -169,7 +169,7 @@ def L2A_map(L1C_objs, block_size=None, return_tiles=True):
L2A_obj.correct_spatial_shifts(cliptoextent=CFG.clip_to_extent, clipextent=common_extent, clipextent_prj=4326)
# merge multiple subsystems belonging to the same scene ID to a single GMS object
L2A_obj = L2A_P.L2A_object().from_sensor_subsystems(L2A_objs) if len(L2A_objs) > 1 else L2A_objs[0]
L2A_obj = L2A_P.L2A_object.from_sensor_subsystems(L2A_objs) if len(L2A_objs) > 1 else L2A_objs[0]
# write output
if CFG.exec_L2AP[1]:
......@@ -255,7 +255,7 @@ def run_complete_preprocessing(list_dataset_dicts_per_scene): # map (scene-wise
if input_proc_level == 'L1A':
for ds in list_dataset_dicts_per_scene:
GMSfile = path_generator(ds, proc_level='L1A').get_path_gmsfile()
L1A_objects.append(L1A_P.L1A_object().from_disk([GMSfile, ['cube', None]]))
L1A_objects.append(L1A_P.L1A_object.from_disk([GMSfile, ['cube', None]]))
L1B_objects = [L1B_map(L1A_obj) for L1A_obj in L1A_objects]
del L1A_objects
......@@ -273,7 +273,7 @@ def run_complete_preprocessing(list_dataset_dicts_per_scene): # map (scene-wise
if input_proc_level == 'L1B':
for ds in list_dataset_dicts_per_scene:
GMSfile = path_generator(ds, proc_level='L1B').get_path_gmsfile()
L1B_objects.append(L1B_P.L1B_object().from_disk([GMSfile, ['cube', None]]))
L1B_objects.append(L1B_P.L1B_object.from_disk([GMSfile, ['cube', None]]))
L1C_objects = L1C_map(L1B_objects)
del L1B_objects
......@@ -292,7 +292,7 @@ def run_complete_preprocessing(list_dataset_dicts_per_scene): # map (scene-wise
if input_proc_level == 'L1C':
for ds in list_dataset_dicts_per_scene:
GMSfile = path_generator(ds, proc_level='L1C').get_path_gmsfile()
L1C_objects.append(L1C_P.L1C_object().from_disk([GMSfile, ['cube', None]]))
L1C_objects.append(L1C_P.L1C_object.from_disk([GMSfile, ['cube', None]]))
L2A_obj = L2A_map(L1C_objects, return_tiles=False)
del L1C_objects
......@@ -309,7 +309,7 @@ def run_complete_preprocessing(list_dataset_dicts_per_scene): # map (scene-wise
assert len(list_dataset_dicts_per_scene) == 1, \
'Expected only a single L2A dataset since subsystems are merged.'
GMSfile = path_generator(list_dataset_dicts_per_scene[0], proc_level='L2A').get_path_gmsfile()
L2A_obj = L2A_P.L2A_object().from_disk([GMSfile, ['cube', None]])
L2A_obj = L2A_P.L2A_object.from_disk([GMSfile, ['cube', None]])
L2B_obj = L2B_map(L2A_obj)
del L2A_obj
......@@ -326,7 +326,7 @@ def run_complete_preprocessing(list_dataset_dicts_per_scene): # map (scene-wise
assert len(list_dataset_dicts_per_scene) == 1, \
'Expected only a single L2B dataset since subsystems are merged.'
GMSfile = path_generator(list_dataset_dicts_per_scene[0], proc_level='L2B').get_path_gmsfile()
L2B_obj = L2B_P.L2B_object().from_disk([GMSfile, ['cube', None]])
L2B_obj = L2B_P.L2B_object.from_disk([GMSfile, ['cube', None]])
L2C_obj = L2C_map(L2B_obj) # type: Union[GMS_object, failed_GMS_object, List]
del L2B_obj
......
......@@ -341,8 +341,7 @@ class process_controller(object):
# NOTE: DON'T multiprocess that with MAP(GMS_object(*initargs).from_disk, work)
# in case of multiple subsystems GMS_object(*initargs) would always point to the same object in memory
# -> subsystem attribute will be overwritten each time
def init_GMS_obj(): return HLP_F.parentObjDict[prevLvl](*HLP_F.initArgsDict[prevLvl])
DB_objs = [init_GMS_obj().from_disk(tuple_GMS_subset=w) for w in work] # init
DB_objs = [HLP_F.parentObjDict[prevLvl].from_disk(tuple_GMS_subset=w) for w in work]
if DB_objs:
DB_objs = list(chain.from_iterable(DB_objs)) if list in [type(i) for i in DB_objs] else list(DB_objs)
......@@ -567,7 +566,7 @@ class process_controller(object):
grouped_L1A_Tiles = HLP_F.group_objects_by_attributes(
L1A_obj_tiles, 'scene_ID', 'subsystem') # group results
L1A_objects = MAP(L1A_P.L1A_object().from_tiles, grouped_L1A_Tiles) # reduce
L1A_objects = MAP(L1A_P.L1A_object.from_tiles, grouped_L1A_Tiles) # reduce
L1A_resObjects = MAP(L1A_map_3, L1A_objects) # map_3
......@@ -678,7 +677,7 @@ class process_controller(object):
grouped_L2A_Tiles = HLP_F.group_objects_by_attributes(self.L2A_tiles, 'scene_ID') # group results
# reduce # will be too slow because it has to pickle back really large L2A_newObjects
# L2A_newObjects = MAP(HLP_F.merge_GMS_tiles_to_GMS_obj, grouped_L2A_Tiles)
L2A_newObjects = [L2A_P.L2A_object().from_tiles(tileList) for tileList in grouped_L2A_Tiles]
L2A_newObjects = [L2A_P.L2A_object.from_tiles(tileList) for tileList in grouped_L2A_Tiles]
"""combine newly and earlier processed L2A data"""
L2A_DBObjects = self.get_DB_objects('L2B', self.L2A_tiles)
......@@ -696,12 +695,11 @@ class process_controller(object):
L2B_tiles = MAP(L2B_map, L2A_tiles)
grouped_L2B_Tiles = \
HLP_F.group_objects_by_attributes(L2B_tiles,
'scene_ID') # group results # FIXME nötig an dieser Stelle?
# group results # FIXME nötig an dieser Stelle?
grouped_L2B_Tiles = HLP_F.group_objects_by_attributes(L2B_tiles, 'scene_ID')
[L2B_tiles_group[0].delete_tempFiles() for L2B_tiles_group in grouped_L2B_Tiles]
L2B_resObjects = [L2B_P.L2B_object().from_tiles(tileList) for tileList in grouped_L2B_Tiles]
L2B_resObjects = [L2B_P.L2B_object.from_tiles(tileList) for tileList in grouped_L2B_Tiles]
self.L2B_newObjects = [obj for obj in L2B_resObjects if isinstance(obj, L2B_P.L2B_object)]
self.failed_objects += [obj for obj in L2B_resObjects if isinstance(obj, failed_GMS_object) and
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment