Commit be17e860 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

- replaced the duplicated fork of COREG within L1B_P by an imports from external packages

- GeoMultiSens now depends on CoReg_Sat and py_tools_ds!
GEOP:
- moved warp_ndarray to external package 'py_tools_ds'
- updated warp_ndarray calls
L1A_P:
- added property coreg_needed
- added cubic resampling for MGRS tiles that have to be reprojected to the next UTM zone
- to_MGRS_tiles(): added verbose mode
L1B_P:
- COREG and DESHIFTER are now imported from external package CoReg_Sat
- deleted deprecated draft of COREG_GMS
- added class Scene_getter(): a class used to find a proper geospatial reference scene for a given target scene
- added class ref_Scene
- L1B_object:
    - revised the whole class (it now generates its coreg_info and deshift_results on its own)
    - added property spatRef_available
    - added get_spatial_reference_scene()
    - added get_opt_bands4matching (based on an earlier version from COREG)
    - added coregister_spatially()
    - added correct_spatial_shifts(): not yet working
L1C_P:
- L1C_object: updated __init__() args of super class L1B_object
L2A_P:
- updated warp_ndarray calls
L2B_P:
- spectral homogenization is now only executed if target CWLs are different to source CWLs
INP_R:
- fixed an unclosed file within GMSfile2dict
OUT_W:
- fixed an unclosed file within Obj2ENVI
PC:
- added a new version of L1B_map_1()
parent 3c70c06a
......@@ -27,7 +27,6 @@ import math
import subprocess
import builtins
import time
import rasterio
import warnings
import collections
from scipy.interpolate import RegularGridInterpolator
......@@ -45,9 +44,7 @@ except ImportError:
import gdal
import gdalnumeric
from gdalconst import *
from rasterio.warp import reproject as rio_reproject
from rasterio.warp import calculate_default_transform as rio_calc_transform
from rasterio.warp import Resampling
import pyproj
from pyorbital import astronomy
import ephem
......@@ -59,6 +56,8 @@ from numba import jit
from ..io import envifilehandling_BD as ef
from ..misc import helper_functions as HLP_F
from py_tools_ds.ptds.geo.raster.reproject import warp_ndarray
job, usecase = builtins.GMS_config.job, builtins.GMS_config.usecase # read from builtins (set by process_controller)
class GEOPROCESSING(object):
......@@ -989,9 +988,9 @@ class GEOPROCESSING(object):
out_nodataVal = HLP_F.get_outFillZeroSaturated(in_arr.dtype)[0]
t0= time.time()
warped = warp_ndarray(in_arr, self.geotransform, self.projection, out_prj,
out_extent=([xmin, ymin, xmax, ymax]), rsp_alg=2,
in_nodata=inFill,out_nodata=out_nodataVal) [0] # cubic resampling
warped, gt, prj = warp_ndarray(in_arr, self.geotransform, self.projection, out_prj,
out_bounds=([xmin, ymin, xmax, ymax]), rspAlg='cubic',
in_nodata=inFill,out_nodata=out_nodataVal) [0]
# multiprocessing: not implemented further because multiproceesing within Python Mappers is not possible
#args = [(in_arr[:,:,i],self.geotransform,self.projection,out_prj,([xmin, ymin, xmax, ymax]),
......@@ -2763,158 +2762,6 @@ def get_point_bounds(points):
return min_lon, min_lat, max_lon, max_lat
def warp_ndarray(ndarray, in_gt, in_prj, out_prj, out_gt=None, outRowsCols=None, outUL=None, out_res=None,
out_extent=None, out_dtype=None, rsp_alg=0, in_nodata=None, out_nodata=None, outExtent_within=True):
"""Reproject / warp a numpy array with given geo information to target coordinate system.
:param ndarray: numpy.ndarray [rows,cols,bands]
:param in_gt: input gdal GeoTransform
:param in_prj: input projection as WKT string
:param out_prj: output projection as WKT string
:param out_gt: output gdal GeoTransform as float tuple in the source coordinate system (optional)
:param outUL: [X,Y] output upper left coordinates as floats in the source coordinate system
(requires outRowsCols)
:param outRowsCols: [rows, cols] (optional)
:param out_res: output resolution as tuple of floats (x,y) in the TARGET coordinate system
:param out_extent: [left, bottom, right, top] as floats in the source coordinate system
:param out_dtype: output data type as numpy data type
:param rsp_alg: Resampling method to use. One of the following (int, default is 0):
0 = nearest neighbour, 1 = bilinear, 2 = cubic, 3 = cubic spline, 4 = lanczos,
5 = average, 6 = mode
:param in_nodata: no data value of the input image
:param out_nodata: no data value of the output image
:param outExtent_within: Ensures that the output extent is within the input extent.
Otherwise an exception is raised.
:return out_arr: warped numpy array
:return out_gt: warped gdal GeoTransform
:return out_prj: warped projection as WKT string
"""
if not ndarray.flags['OWNDATA']:
temp = np.empty_like(ndarray)
temp[:] = ndarray
ndarray = temp # deep copy: converts view to its own array in order to avoid wrong output
with rasterio.env.Env():
if outUL is not None:
assert outRowsCols is not None, 'outRowsCols must be given if outUL is given.'
outUL = [in_gt[0], in_gt[3]] if outUL is None else outUL
inEPSG, outEPSG = [WKT2EPSG(prj) for prj in [in_prj, out_prj]]
assert inEPSG, 'Could not derive input EPSG code.'
assert outEPSG, 'Could not derive output EPSG code.'
assert in_nodata is None or type(in_nodata) in [int, float]
assert out_nodata is None or type(out_nodata) in [int, float]
src_crs = {'init': 'EPSG:%s' % inEPSG}
dst_crs = {'init': 'EPSG:%s' % outEPSG}
if len(ndarray.shape) == 3:
# convert input array axis order to rasterio axis order
ndarray = np.swapaxes(np.swapaxes(ndarray, 0, 2), 1, 2)
bands, rows, cols = ndarray.shape
rows, cols = outRowsCols if outRowsCols else (rows, cols)
else:
rows, cols = ndarray.shape if outRowsCols is None else outRowsCols
# set dtypes ensuring at least int16 (int8 is not supported by rasterio)
in_dtype = ndarray.dtype
out_dtype = ndarray.dtype if out_dtype is None else out_dtype
out_dtype = np.int16 if str(out_dtype) == 'int8' else out_dtype
ndarray = ndarray.astype(np.int16) if str(in_dtype) == 'int8' else ndarray
gt2bounds = lambda gt, r, c: [gt[0], gt[3] + r * gt[5], gt[0] + c * gt[1], gt[3]] # left, bottom, right, top
# get dst_transform
if out_gt is None and out_extent is None:
if outRowsCols:
outUL = [in_gt[0], in_gt[3]] if outUL is None else outUL
ulRC2bounds = lambda ul, r, c: [ul[0], ul[1] + r * in_gt[5], ul[0] + c * in_gt[1], ul[1]] # left, bottom, right, top
left, bottom, right, top = ulRC2bounds(outUL, rows, cols)
else: # outRowsCols is None and outUL is None: use in_gt
left, bottom, right, top = gt2bounds(in_gt, rows, cols)
# ,im_xmax,im_ymin,im_ymax = corner_coord_to_minmax(get_corner_coordinates(self.ds_im2shift))
elif out_extent:
left, bottom, right, top = out_extent
else: # out_gt is given
left, bottom, right, top = gt2bounds(in_gt, rows, cols)
if outExtent_within:
# input array is only a window of the actual input array
assert left >= in_gt[0] and right <= (in_gt[0] + (cols + 1) * in_gt[1]) and \
bottom >= in_gt[3] + (rows + 1) * in_gt[5] and top <= in_gt[3], \
"out_extent has to be completely within the input image bounds."
if out_res is None:
# get pixel resolution in target coord system
prj_in_out = (isProjectedOrGeographic(in_prj), isProjectedOrGeographic(out_prj))
assert None not in prj_in_out, 'prj_in_out contains None.'
if prj_in_out[0] == prj_in_out[1]:
out_res = (in_gt[1], abs(in_gt[5]))
elif prj_in_out == ('geographic', 'projected'):
raise NotImplementedError('Different projections are currently not supported.')
else: # ('projected','geographic')
px_size_LatLon = np.array(pixelToLatLon([1, 1], geotransform=in_gt, projection=in_prj)) - \
np.array(pixelToLatLon([0, 0], geotransform=in_gt, projection=in_prj))
out_res = tuple(reversed(abs(px_size_LatLon)))
print('OUT_RES NOCHMAL CHECKEN: ', out_res)
dst_transform, out_cols, out_rows = rio_calc_transform(
src_crs, dst_crs, cols, rows, left, bottom, right, top, resolution=out_res) # TODO keyword densify_pts=21 does not exist anymore (moved to transform_bounds()) -> could that be a problem? check warp outputs
rows_expected, cols_expected = abs(top - bottom) / out_res[1], abs(right - left) / out_res[0]
if rows == cols and rows_expected == cols_expected and inEPSG == outEPSG and out_rows != out_cols:
out_rows, out_cols = rows_expected, cols_expected
# fixes an issue where rio_calc_transform() does not return quadratic output image although input parameters
# correspond to a quadratic image and inEPSG equals outEPSG
aff = list(dst_transform)
out_gt = out_gt if out_gt else (aff[2], aff[0], aff[1], aff[5], aff[3], aff[4])
src_transform = rasterio.transform.from_origin(in_gt[0], in_gt[3], in_gt[1], in_gt[5])
dict_rspInt_rspAlg = \
{0: Resampling.nearest, 1: Resampling.bilinear, 2: Resampling.cubic,
3: Resampling.cubic_spline, 4: Resampling.lanczos, 5: Resampling.average, 6: Resampling.mode}
out_arr = np.zeros((bands, out_rows, out_cols), out_dtype) \
if len(ndarray.shape) == 3 else np.zeros((out_rows, out_cols), out_dtype)
# FIXME direct passing of src_transform and dst_transform results in a wrong output image. Maybe a rasterio-bug?
# rio_reproject(ndarray, out_arr, src_transform=src_transform, src_crs=src_crs, dst_transform=dst_transform,
# dst_crs=dst_crs, resampling=dict_rspInt_rspAlg[rsp_alg])
# FIXME indirect passing causes Future warning
with warnings.catch_warnings():
warnings.simplefilter('ignore') # FIXME supresses: FutureWarning: GDAL-style transforms are deprecated and will not be supported in Rasterio 1.0.
try:
#print('INPUTS')
#print(ndarray.shape, ndarray.dtype, out_arr.shape, out_arr.dtype)
#print(in_gt)
#print(src_crs)
#print(out_gt)
#print(dst_crs)
#print(dict_rspInt_rspAlg[rsp_alg])
#print(in_nodata)
#print(out_nodata)
rio_reproject(ndarray, out_arr,
src_transform=in_gt, src_crs=src_crs, dst_transform=out_gt, dst_crs=dst_crs,
resampling=dict_rspInt_rspAlg[rsp_alg], src_nodata=in_nodata, dst_nodata=out_nodata)
from matplotlib import pyplot as plt
#print(out_arr.shape)
#plt.figure()
#plt.imshow(out_arr[:,:,1])
except KeyError:
print(in_dtype, str(in_dtype))
print(ndarray.dtype)
# convert output array axis order to GMS axis order [rows,cols,bands]
out_arr = out_arr if len(ndarray.shape) == 2 else np.swapaxes(np.swapaxes(out_arr, 0, 1), 1, 2)
if outRowsCols:
out_arr = out_arr[:outRowsCols[0], :outRowsCols[1]]
return out_arr, out_gt, out_prj
def reproject_shapelyPoly(shapelyPoly, tgt_prj):
# type: (shapely.Polygon, str) -> shapely.Polygon
"""Repojects a shapely polygon that has LonLat coordinates into the given target projection.
......
......@@ -50,7 +50,8 @@ from ..io import Output_writer as OUT_W
from ..misc import path_generator as PG
from ..misc import database_tools as DB_T
from ..misc.mgrs_tile import MGRS_tile
from ..io.GeoArray import GeoArray
#from ..io.GeoArray import GeoArray
from py_tools_ds.ptds import GeoArray
job, usecase = builtins.GMS_config.job, builtins.GMS_config.usecase # read from builtins (set by process_controller)
......@@ -211,6 +212,13 @@ class L1A_object(object):
self.MetaObj.logger = self.logger # TODO unpickle meta to MetaObj
@property
def coreg_needed(self):
gt = GEOP.mapinfo2geotransform(self.meta['map info'])
return (GEOP.is_coord_grid_equal(gt, usecase.spatial_ref_gridx, usecase.spatial_ref_gridy) and
self.dataset_ID == usecase.datasetid_spatial_ref) is False
def fill_from_disk(self,tuple_GMS_subset):
"""Fills an already instanced GMS object with data from disk. Excludes array attributes in Python mode.
......@@ -1003,7 +1011,7 @@ class L1A_object(object):
assert hasattr(self,'mask_1bit') and self.mask_1bit is not None, "The L1A object needs to have a nodata mask."
self.logger.info('Calculating true data corner positions (image and world coordinates)...')
if re.search('ETM+', self.sensor) and self.acquisition_date > datetime.datetime(year=2003, month=5, day=31):
if re.search('ETM+', self.sensor) and self.acquisition_date > datetime.datetime(year=2003, month=5, day=31): # FIXME add time
self.trueDataCornerPos = GEOP.calc_FullDataset_corner_positions(self.mask_1bit,algorithm='numpy')
else:
self.trueDataCornerPos = GEOP.calc_FullDataset_corner_positions(self.mask_1bit)
......@@ -1257,10 +1265,11 @@ class L1A_object(object):
# get subsetted and (possibly) reprojected array
xmin,xmax,ymin,ymax = mapBounds
rspAlg = 'near' if arrname=='masks' else 'cubic'
sub_arr,sub_gt,sub_prj = geoArr.get_mapPos((xmin,ymin,xmax,ymax), mapBounds_prj,
fillVal=nodata, arr_gt=gt, arr_prj=prj)
fillVal=nodata, arr_gt=gt, arr_prj=prj, rspAlg=rspAlg)
# show result
if v: GeoArray(sub_arr).show()
if v: GeoArray(sub_arr).show(figsize=(10,10))
# update array-related attributes of sub_GMS_obj
setattr(sub_GMS_obj, arrname, sub_arr)
......@@ -1304,11 +1313,13 @@ class L1A_object(object):
sub_GMS_obj.data_corners_imXY = [(YX[1], YX[0]) for YX in corners_imYX]
# calculate data_corners_LonLat
data_corners_LatLon = GEOP.pixelToLatLon(sub_GMS_obj.data_corners_imXY, geotransform=sub_gt, projection=sub_prj)
data_corners_LatLon = GEOP.pixelToLatLon(sub_GMS_obj.data_corners_imXY,
geotransform=sub_gt, projection=sub_prj)
sub_GMS_obj.data_corners_LonLat = [(YX[1], YX[0]) for YX in data_corners_LatLon]
# calculate data_corners_utm
data_corners_utmYX = GEOP.pixelToMapYX([ULxy, URxy, LLxy, LRxy], geotransform=sub_gt, projection=sub_prj) # FIXME asserts gt in UTM coordinates
data_corners_utmYX = GEOP.pixelToMapYX([ULxy, URxy, LLxy, LRxy],
geotransform=sub_gt, projection=sub_prj) # FIXME asserts gt in UTM coordinates
sub_GMS_obj.data_corners_utm = [(YX[1], YX[0]) for YX in data_corners_utmYX]
# rename trueDataCornerLonLat to fullScene_corner_lonlat
......@@ -1318,11 +1329,12 @@ class L1A_object(object):
return sub_GMS_obj
def to_MGRS_tiles(self, pixbuffer=10):
def to_MGRS_tiles(self, pixbuffer=10, v=False):
# type: (int) -> list
"""Returns a list of GMS objects representing the MGRS tiles for the given GMS object.
:param pixbuffer: <int> a buffer in pixel values used to generate an overlap between the returned MGRS tiles
:param v: <bool> verbose mode
:return:
"""
assert self.arr_shape == 'cube', "Only 'cube' objects can be cut into MGRS tiles. Got %s." % self.arr_shape
......@@ -1349,7 +1361,8 @@ class L1A_object(object):
self.get_subset_obj(mapBounds = GDF_row.map_bounds_MGRS,
mapBounds_prj = GEOP.EPSG2WKT(GDF_row['MGRStileObj'].EPSG),
logmsg = 'Producing MGRS tile %s from scene %s (entity ID %s).'
%(GDF_row.granuleid, self.scene_ID, self.entity_ID))
%(GDF_row.granuleid, self.scene_ID, self.entity_ID),
v = v)
GDF_MGRS_tiles['GMS_obj_MGRS_tile'] = GDF_MGRS_tiles.apply(lambda GDF_row: get_GMSobj_MGRS(GDF_row), axis=1)
......
This diff is collapsed.
......@@ -39,7 +39,7 @@ job = builtins.GMS_config.job
########################### core functions ####################################
class L1C_object(L1B_object):
def __init__(self, L1B_obj):
super().__init__(None,None)
super().__init__(None)
if L1B_obj: [setattr(self, key, value) for key,value in L1B_obj.__dict__.items()]
self.proc_level = 'L1C'
self.lonlat_arr = None # set by self.get_lonlat_coord_array()
......
......@@ -176,8 +176,8 @@ class DESHIFTER(object):
self.ref_gt = dict_GMS_obj['coreg_info']['reference geotransform']
self.ref_cols = dict_GMS_obj['coreg_info']['reference extent']['cols']
self.ref_rows = dict_GMS_obj['coreg_info']['reference extent']['rows']
self.is_shifted = dict_GMS_obj['coreg_info']['is shifted']
self.is_resampled = dict_GMS_obj['coreg_info']['is resampled']
self.is_shifted = False
self.is_resampled = False
self.x_shift_px = dict_GMS_obj['coreg_info']['corrected_shifts_px']['x']
self.y_shift_px = dict_GMS_obj['coreg_info']['corrected_shifts_px']['y']
self.updated_projection = self.ref_prj
......@@ -402,11 +402,11 @@ class DESHIFTER(object):
if self.cliptoextent:
out_arr, out_gt, out_prj = GEOP.warp_ndarray(in_arr,self.shift_gt,self.shift_prj,self.ref_prj,
rsp_alg=rsp_alg, in_nodata=in_nodata, out_nodata=in_nodata, out_res=(xgsd,xgsd),
out_extent=[xmin,ymin,xmax,ymax]) # FIXME resamp_alg: "average" causes sinus-shaped patterns in fft images
rspAlg=rsp_alg, in_nodata=in_nodata, out_nodata=in_nodata, out_gsd=(xgsd,xgsd),
out_bounds=[xmin,ymin,xmax,ymax]) # FIXME resamp_alg: "average" causes sinus-shaped patterns in fft images
else:
out_arr, out_gt, out_prj = GEOP.warp_ndarray(in_arr,self.shift_gt,self.shift_prj,self.ref_prj,
rsp_alg=rsp_alg, in_nodata=in_nodata, out_nodata=in_nodata, out_res=(xgsd,xgsd), out_gt=out_gt)
rspAlg=rsp_alg, in_nodata=in_nodata, out_nodata=in_nodata, out_gsd=(xgsd,xgsd), out_gt=out_gt)
# output array is shifted AND has no odd corner coordinates because it is aligned to reference grid!
......
......@@ -25,19 +25,22 @@ class L2B_object(L2A_object):
def spectral_homogenization(self, subset=None, kind='linear'):
src_cwls = self.meta['wavelength']
tgt_cwls = usecase.target_CWL # FIXME exclude or include thermal bands; respect sorted CWLs in context of LayerBandsAssignment
assert kind in ['linear',], "%s is not a supported kind of homogenization." %kind
self.log_for_fullArr_or_firstTile('Performing spectral homogenization (%s) with target wavelength '
'positions at %s nm.' %(kind,', '.join(np.array(tgt_cwls[:-1]).astype(str))+' and %s' %tgt_cwls[-1]))
inArray = self.arr if isinstance(self.arr,np.ndarray) else \
INP_R.read_ENVIfile(self.arr, self.arr_shape, self.arr_pos, self.logger, q=1)
self.arr = self.interpolate_cube_linear(inArray,src_cwls,tgt_cwls) if kind=='linear' else self.arr
self.meta['wavelength'] = list(tgt_cwls)
self.meta['bands'] = len(tgt_cwls)
self.meta['LayerBandsAssignment'] = [] # TODO
if 'band names' in self.meta: # FIXME bug workaround
del self.meta['band names'] # TODO
if src_cwls!=tgt_cwls:
assert kind in ['linear',], "%s is not a supported kind of homogenization." %kind
self.log_for_fullArr_or_firstTile('Performing spectral homogenization (%s) with target wavelength '
'positions at %s nm.' %(kind,', '.join(np.array(tgt_cwls[:-1]).astype(str))+' and %s' %tgt_cwls[-1]))
inArray = self.arr if isinstance(self.arr,np.ndarray) else \
INP_R.read_ENVIfile(self.arr, self.arr_shape, self.arr_pos, self.logger, q=1)
self.arr = self.interpolate_cube_linear(inArray,src_cwls,tgt_cwls) if kind=='linear' else self.arr
self.meta['wavelength'] = list(tgt_cwls)
self.meta['bands'] = len(tgt_cwls)
self.meta['LayerBandsAssignment'] = [] # TODO
if 'band names' in self.meta: # FIXME bug workaround
del self.meta['band names'] # TODO
else:
pass # FIXME log spec homo skipped
@staticmethod
def interpolate_cube_linear(arrcube, source_CWLs, target_CWLs):
......
......@@ -118,7 +118,9 @@ def GMSfile2dict(path_GMSfile):
:param path_GMSfile: absolute path on disk
:return: the corresponding Python dictionary
"""
return json.load(open(path_GMSfile))
with open(path_GMSfile) as inF:
GMSdict = json.load(inF)
return GMSdict
def unify_envi_header_keys(header_dict):
......
......@@ -183,7 +183,8 @@ def ASCII_writer(In,path_out_baseN):
# # Let the base class default method raise the TypeError
# return json.JSONEncoder.default(self, obj)
# json.dump(In, open(path_out_baseN,'w'),skipkeys=True,sort_keys=True,cls=customJSONEncoder,separators=(',', ': '),indent =4)
json.dump(In, open(path_out_baseN,'w'),skipkeys=True,sort_keys=True,separators=(',', ': '),indent =4)
with open(path_out_baseN,'w') as outF:
json.dump(In, outF,skipkeys=True,sort_keys=True,separators=(',', ': '),indent =4)
def Tiles_Writer(tileList_or_Array, out_path, out_shape, out_dtype, out_interleave, out_meta=None,
......
......@@ -220,10 +220,10 @@ class Scene:
:param bounds: scene bounds as list of lat/lon wgs84 coordinates (lon1, lat1, lon2, lat2, ...),
e.g. (10.00604, 49.19385, 7.45638, 49.64513, 8.13739, 51.3515, 10.77705, 50.89307)
"""
self.sceneid = sceneid
self.sceneid = sceneid
self.acquisitiondate = acquisitiondate
self.cloudcover = cloudcover
self.bounds = bounds
tempList = list(bounds) + [None] * 2
self.coordsLonLat = [tempList[n:n + 2] for n in range(0, len(bounds), 2)]
self.polyLonLat = Polygon(self.coordsLonLat)
self.cloudcover = cloudcover
self.bounds = bounds
tempList = list(bounds) + [None] * 2
self.coordsLonLat = [tempList[n:n + 2] for n in range(0, len(bounds), 2)]
self.polyLonLat = Polygon(self.coordsLonLat)
......@@ -72,7 +72,7 @@ dtype_lib_GDAL_Python= {"uint8": 1, "int8": 1, "uint16": 2, "int16": 3, "uint32"
"float64": 7, "complex64": 10, "complex128": 11}
parentObjDict = {'L1A': L1A_object, 'L1B': L1B_object, 'L1C': L1C_object,
'L2A': L2A_object, 'L2B': L2B_object, 'L2C': L2C_object}
initArgsDict = {'L1A': (None,), 'L1B': (None, None), 'L1C': (None,),
initArgsDict = {'L1A': (None,), 'L1B': (None,), 'L1C': (None,),
'L2A': (None,), 'L2B': (None,), 'L2C': (None,)} # FIXME initargs L2 processors
proc_chain = ['L0A','L0B','L1A','L1B','L1C','L2A','L2B','L2C']
......@@ -217,8 +217,8 @@ def log_uncaught_exceptions(GMS_mapper):
# log the exception and raise warning
failed_Obj.logger.error('\n'+traceback_, exc_info=False)
warnings.warn("\nLogged uncaught exception '%s' within %s during processing of scene ID %s (entity "
"ID %s)!" % (value_, GMS_mapper.__name__, failed_Obj.scene_ID, failed_Obj.entity_ID))
warnings.warn("\nLogged an uncaught exception within %s during processing of scene ID %s (entity "
"ID %s):\n '%s'\n" % (GMS_mapper.__name__, failed_Obj.scene_ID, failed_Obj.entity_ID, value_))
# add the scene ID to failed_sceneids column in jobs table of postgreSQL database
res = DB_T.get_info_from_postgreSQLdb(config.job.conn_database, 'jobs', ['failed_sceneids'],
......
......@@ -9,15 +9,16 @@
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import sys
import os
import multiprocessing
__author__='Daniel Scheffler'
import datetime
import multiprocessing
import os
import builtins
import sys
import time
from itertools import chain
__author__='Daniel Scheffler'
print('##################################################################################################')
called_from_iPyNb = 1 if 'ipykernel/__main__.py' in sys.argv[0] else 0
......@@ -63,7 +64,7 @@ if isdebugging: #override the existing settings in order to get write access eve
from . import config
builtins.GMS_config = config
for i in [attr for attr in dir(config) if attr.startswith('exec__')]:
globals()[i] = getattr(config,i)
globals()[i] = getattr(config, i)
job, usecase, GMS_call_type = builtins.GMS_config.job, builtins.GMS_config.usecase, builtins.GMS_config.GMS_call_type
parallelization_level = 'scenes'
assert parallelization_level in ['scenes', 'tiles']
......@@ -175,14 +176,9 @@ def L1B_map_1(L1A_obj):
"""L1A_obj enthält in Python- (im Gegensatz zur Flink-) Implementierung KEINE ARRAY-DATEN!,
nur die für die ganze Szene gültigen Metadaten"""
"""1. calculate shifts"""
COREG_obj = L1B_P.COREG(L1A_obj.__dict__.copy(),v=0,q=1)
if COREG_obj.coreg_needed and COREG_obj.success: # skip coregistration if L1B_P.COREG returns 0 (e.g. if no reference scene exists)
COREG_obj.calculate_spatial_shifts()
if not COREG_obj: print(L1A_obj.arr_pos)
L1B_obj = L1B_P.L1B_object(L1A_obj)
L1B_obj.coregister_spatially()
"""2. get L1B object with attribute coreg_info"""
L1B_obj = L1B_P.L1B_object(L1A_obj,COREG_obj)
if job.exec__L1BP[1]:
OUT_W.Obj2ENVI(L1B_obj)
L1B_obj.delete_tempFiles()
......@@ -230,8 +226,8 @@ def L2A_map_1(L1C_obj):
# type: (L1C_P.L1C_object) -> L2A_P.DESHIFTER
"""get Deshift instances by executing DESHIFTER.__init__()"""
deshift_configs = L2A_P.get_DESHIFTER_configs(L1C_obj.__dict__.copy(),
['arr','masks','mask_clouds'], proc_bandwise=(True if job.exec_mode=='Flink' else False)) # array-wise or band-wise parallelization
DESHIFT_instances = [L2A_P.DESHIFTER(obj,attr,**kwargs) for obj,attr,kwargs in deshift_configs]
['arr','masks','mask_clouds'], proc_bandwise=(True if job.exec_mode=='Flink' else False)) # array-wise or band-wise parallelization
DESHIFT_instances = [L2A_P.DESHIFTER(obj, attr, **kwargs) for obj, attr, kwargs in deshift_configs]
return DESHIFT_instances
@HLP_F.log_uncaught_exceptions
......@@ -245,7 +241,7 @@ def L2A_map_2(L2A_obj): # TODO muss auch subsysteme, wenn vorhanden mergen (S2/A
if job.exec__L2AP[1]:
OUT_W.Obj2ENVI(L2A_obj)
L2A_obj.delete_tempFiles()
L2A_tiles = HLP_F.cut_GMS_obj_into_blocks((L2A_obj,[2048,2048]))
L2A_tiles = HLP_F.cut_GMS_obj_into_blocks((L2A_obj,[2048,2048])) # this is needed due to in-mem size restrictions of resulting objects during back-serialization
return L2A_tiles
@HLP_F.log_uncaught_exceptions
......@@ -338,7 +334,7 @@ def run_processController_in_multiprocessing(usecase_data_list):
with multiprocessing.Pool() as pool: L1B_resObjects = pool.map(L1B_map_1, L1A_Instances)
L1B_newObjects = [obj for obj in L1B_resObjects if isinstance(obj,L1B_P.L1B_object)]
L1B_newObjects = [obj for obj in L1B_resObjects if isinstance(obj, L1B_P.L1B_object)]
failed_objects += [obj for obj in L1B_resObjects if isinstance(obj,HLP_F.failed_GMS_object) and
obj.scene_ID not in sceneids_failed]
sceneids_failed = [obj.scene_ID for obj in failed_objects]
......@@ -355,7 +351,7 @@ def run_processController_in_multiprocessing(usecase_data_list):
if parallelization_level == 'scenes':
work = [[GMS, ['cube', None]] for GMS in GMSfile_list_L1B_inDB]
with multiprocessing.Pool() as pool:
L1B_DBObjects = pool.map(L1B_P.L1B_object(None, None).fill_from_disk, work)
L1B_DBObjects = pool.map(L1B_P.L1B_object(None).fill_from_disk, work)
L1B_DBObjects = list(L1B_DBObjects)
L1B_Instances = L1B_newObjects + L1B_DBObjects # combine newly and earlier processed L1B data
......@@ -466,10 +462,9 @@ def run_processController_in_multiprocessing(usecase_data_list):
L2A_Instances = L2A_newObjects + L2A_DBObjects # combine newly and earlier processed L2A data
# print('L2A_Instances', L2A_Instances)
with multiprocessing.Pool(15) as pool: # FIXME
with multiprocessing.Pool(15) as pool:
L2B_resObjects = pool.map(L2B_map_1, L2A_Instances)
else: # tiles
blocksize = (2048, 2048) # must be equal to the blocksize of L2A_newTiles specified in L2A_map_2
L2A_newTiles = L2A_tiles
......
......@@ -9,16 +9,18 @@
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
from multiprocessing import dummy
import sys
import os
import multiprocessing
import datetime
import dill
import builtins
import inspect
import time
import multiprocessing
import os
import shutil
from multiprocessing import dummy
import builtins
import dill
import sys
import time
print('##################################################################################################')
called_from_iPyNb = 1 if 'ipykernel/__main__.py' in sys.argv[0] else 0
......@@ -36,7 +38,7 @@ if isdebugging: #override the existing settings in order to get write access eve
import config
builtins.GMS_config = config
for i in [attr for attr in dir(config) if attr.startswith('exec__')]:
globals()[i] = getattr(config,i)
globals()[i] = getattr(config, i)
job, usecase, GMS_call_type = builtins.GMS_config.job, builtins.GMS_config.usecase, builtins.GMS_config.GMS_call_type
#shutil.rmtree('/dev/shm/GeoMultiSens/')
......@@ -188,7 +190,7 @@ def run_processController_in_singleprocessing(usecase_data_list):
def run_processController_in_multiprocessing(usecase_data_list):
pool = multiprocessing.Pool(job.CPUs)
usecase_data_list = pool.map(L0A_P.add_local_availability,usecase_data_list)
usecase_data_list = pool.map(L0A_P.add_local_availability, usecase_data_list)
#[print(i) for i in usecase_data_list]
L1A_already_present = lambda dataset: HLP_F.proc_level_already_present(dataset['proc_level'],'L1A')
datalist_newData = [dataset for dataset in usecase_data_list if not L1A_already_present(dataset)]
......@@ -337,13 +339,13 @@ def run_processController_in_multiprocessing(usecase_data_list):
#[print(i) for i in scene_tilelist[0].meta]
"""1. calculate shifts"""
COREG_obj = L1B_P.COREG(L1A_obj.__dict__.copy(),v=0)
COREG_obj = L1B_P.COREG(L1A_obj.__dict__.copy(), v=0)
if not COREG_obj.success: # FIXME
continue # skip coregistration if L1B_P.COREG returns 0 (e.g. if no reference scene exists)
COREG_obj.calculate_spatial_shifts()
"""2. get L1B object with attribute coreg_info"""
L1B_obj = L1B_P.L1B_object(L1A_obj,COREG_obj)
L1B_obj = L1B_P.L1B_object(L1A_obj, COREG_obj)
del L1A_obj
"""3. perform deshifting"""
......
......@@ -9,16 +9,17 @@
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import sys
import os
import multiprocessing
import datetime