Commit 977fcdba authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

First commit (06.07.2015)


Signed-off-by: Daniel Scheffler's avatardanschef <daniel.scheffler@gfz-potsdam.de>
parents
*.zip filter=lfs diff=lfs merge=lfs -text
# -*- coding: utf-8 -*-
###############################################################################
#
# GeoMultiSens-Lib Main
#
# Written by Daniel Scheffler
# GFZ Potsdam, Section 1.4
#
###############################################################################
def _init():
'''Basic configuration of the GeoMultiSens package.'''
gms = __import__(__name__.split('.')[0])
\ No newline at end of file
###############################################################################
#
# GeoMultiSens/__init__.py - This file is part of the GeoMultiSens package.
#
# Written by Daniel Scheffler
# GFZ Potsdam, Section 1.4
#
###############################################################################
from __future__ import division, print_function, unicode_literals
__version__ = '0.1.0'
from .algorithms import *
from .gms_io import *
from .testing import *
from .GeoMultiSens import _init
_init()
del _init
\ No newline at end of file
# -*- coding: utf-8 -*-
###############################################################################
#
# Level 0A Processor:
#
# Performed operations:
# Searches for remote sensing date within given area of interest and returns
# a list of the found data that contains image type, satellite, sensor,
# aquisition date and data ID.
#
# Written by Daniel Scheffler
#
###############################################################################
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import os,re,datetime,glob
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
import gms_io.Input_reader as INP_R
import gms_io.Output_writer as OUT_W
import misc.helper_functions as HLP_F
import algorithms.L0B_P as L0B_P
########################### core functions ####################################
archive_fold = os.path.abspath('../database/sampledata/') ## to be replaced by Fileserver URL
def get_data_IDs_within_AOI(job,usecase):
">Code for Webserver data collection here<"
data_list =[]
if re.search('ALOS',','.join(usecase.filt_datasets)): # sensorname has to be in HLP_F.get_GMS_sensorcode
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'ALOS', 'sensor':'AVNIR-2', 'subsystem':'', 'aquisition_date':'2009-07-02', 'data_ID':'A1002553-001-P6100002-AODS-201007300008'}) # TAR-ID
# data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'ALOS', 'sensor':'AVNIR-2', 'subsystem':None, 'aquisition_date':'2009-07-02', 'data_ID':'20090702_L1B2'}) # extracted Folder
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'ALOS', 'sensor':'AVNIR-2', 'subsystem':None, 'aquisition_date':'2009-07-19', 'data_ID':'20090719_L1B2'}) # extracted Folder
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'ALOS', 'sensor':'AVNIR-2', 'subsystem':None, 'aquisition_date':'2010-04-21', 'data_ID':'20100421_L1B2'}) # extracted Folder
if re.search('Terra',','.join(usecase.filt_datasets)):
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Terra', 'sensor':'ASTER', 'subsystem':'VNIR1', 'aquisition_date':'2007-11-08', 'data_ID':'AST_L1B_00308192007061017_20071108171717_32444'}) # HDF-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Terra', 'sensor':'ASTER', 'subsystem':'VNIR2', 'aquisition_date':'2007-11-08', 'data_ID':'AST_L1B_00308192007061017_20071108171717_32444'}) # HDF-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Terra', 'sensor':'ASTER', 'subsystem':'SWIR', 'aquisition_date':'2007-11-08', 'data_ID':'AST_L1B_00308192007061017_20071108171717_32444'}) # HDF-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Terra', 'sensor':'ASTER', 'subsystem':'TIR', 'aquisition_date':'2007-11-08', 'data_ID':'AST_L1B_00308192007061017_20071108171717_32444'}) # HDF-ID
if re.search('Landsat',','.join(usecase.filt_datasets)): # sensorname has to be in HLP_F.get_GMS_sensorcode
# data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Landsat-5', 'sensor':'TM', 'subsystem':'SAM', 'aquisition_date':'1996-10-24', 'data_ID':'LT51510321996298XXX01'}) # TAR-ID
# data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Landsat-7', 'sensor':'ETM+', 'subsystem':'SAM', 'aquisition_date':'2000-04-02', 'data_ID':'LE71510321999218SGS00'}) # BSQ-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Landsat-7', 'sensor':'ETM+', 'subsystem':'SAM', 'aquisition_date':'2000-04-02', 'data_ID':'LE71510322000093SGS00'}) # TAR-ID
data_list = data_list + LandsatID2dataset([os.path.basename(i).split('.tar.gz')[0] for i in glob.glob(os.path.join(archive_fold,'landsat/landsat-etm-on_99-03/151-32/*.tar.gz'))],job) # TAR-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Landsat-8', 'sensor':'OLI_TIRS','subsystem':None, 'aquisition_date':'2013-07-03', 'data_ID':'LC81510322013184LGN00'}) # TAR-ID
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'Landsat-8', 'sensor':'OLI_TIRS','subsystem':None, 'aquisition_date':'2013-06-01', 'data_ID':'LC81510322013152LGN00'}) # TAR-ID ~6% Cloud cover
if re.search('SPOT',','.join(usecase.filt_datasets)):
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'SPOT-1', 'sensor':'HRV1', 'subsystem':None, 'aquisition_date':'1986-07-17', 'data_ID':'00197112001'})
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'SPOT-5', 'sensor':'HRG2', 'subsystem':None, 'aquisition_date':'2010-04-21', 'data_ID':'00197112009'})
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'SPOT-5', 'sensor':'HRG2', 'subsystem':None, 'aquisition_date':'2010-04-21', 'data_ID':'00197112010'})
if re.search('RapidEye',','.join(usecase.filt_datasets)):
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':'RapidEye-5','sensor':'MSI', 'subsystem':None, 'aquisition_date':'2014-04-23', 'data_ID':'4357606_2014-04-23_RE5_3A_180259'})
if re.search('SRTM',','.join(usecase.filt_datasets)):
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'DGM','satellite':'SRTM', 'sensor':'SRTM2', 'subsystem':None, 'aquisition_date':'unknown', 'data_ID':'srtm-1arcsec-version2jan2015-39-42n-70-85'})
if re.search('ATM',','.join(usecase.filt_datasets)):
data_list.append({'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'ATM','satellite':'ATM-data', 'sensor':'unknown', 'subsystem':None, 'aquisition_date':'unknown', 'data_ID':'dummy_ID'})
for ds in data_list:
ds['skip_thermal'] = usecase.skip_thermal
ds['sort_bands_by_cwl'] = usecase.sort_bands_by_cwl
ds['subsystem'] = '' if ds['subsystem']==None else ds['subsystem']
return data_list
def LandsatID2dataset(ID_list, job):
dataset_list =[]
for ID in ID_list:
dataset = {'job_ID':job.ID,'job_CPUs':job.CPUs,'image_type':'RSD','satellite':None,'sensor':None,'subsystem':None,'aquisition_date':None, 'data_ID':ID}
dataset['satellite'] = 'Landsat-5' if ID[:3]=='LT5' else 'Landsat-7' if ID[:3]=='LE7' else 'Landsat-8' if ID[:3]=='LC8' else dataset['satellite']
dataset['sensor'] = 'TM' if ID[:3]=='LT5' else 'ETM+' if ID[:3]=='LE7' else 'OLI_TIRS' if ID[:3]=='LC8' else dataset['satellite']
dataset['subsystem'] = 'SAM' if ID[:3]=='LE7' else None
dataset['aquisition_date'] = (datetime.datetime(int(ID[9:13]), 1, 1) + datetime.timedelta(int(ID[13:16]) - 1)).strftime('%Y-%m-%d')
dataset_list.append(dataset)
return dataset_list
def add_local_availability(dataset):
DB_match = INP_R.get_dataset_by_SQLquery(['processed_data',['proc_level','path_procdata','baseN'],{'image_type':dataset['image_type'],'satellite':dataset['satellite'], 'sensor':dataset['sensor'],'subsystem':dataset['subsystem'], 'data_ID':dataset['data_ID']}])
def check_for_successfully_written_data(path_logfile,dataset):
if os.path.isdir(os.path.dirname(path_logfile)):
if os.path.isfile(path_logfile):
logfile = open(path_logfile,'r').read()
AllWrittenProcL_dueLog = re.findall(":*(\S*\s*) data successfully saved.",logfile,re.I)
if AllWrittenProcL_dueLog != []:
HighestProcL_dueLog = HLP_F.sorted_nicely(AllWrittenProcL_dueLog)[-1]
return HighestProcL_dueLog
else:
print ('%s: According to logfile no completely processed data exist at any processing level. Dataset has to be reprocessed.' %dataset['data_ID'])
return None
else:
print ('No logfile found for %s at %s. Dataset has to be reprocessed.' %(dataset['data_ID'],path_logfile))
return None
else:
return None
if DB_match == [] or DB_match == 'DB_file_not_found':
L0B_obj = L0B_P.L0B_object(dataset,onlyStringAttributes=True)
HighestProcL_dueLog = check_for_successfully_written_data(L0B_obj.path_logfile,dataset)
if HighestProcL_dueLog is not None:
print ('The dataset %s is not included in the job database but according to logfile %s has been written successfully. Recreating missing database entry.' %(dataset['data_ID'],HighestProcL_dueLog))
assumed_path_GMS_file = os.path.join(L0B_obj.path_procdata,L0B_obj.baseN+'_%s.gms' %HighestProcL_dueLog)
OUT_W.data_DB_updater(INP_R.GMSfile2dict(assumed_path_GMS_file))
OUT_W.data_DB_to_csv()
dataset['GMS_proc_level'] = HighestProcL_dueLog
else:
dataset['GMS_proc_level'] = None
elif len(DB_match) == 1 and os.path.isdir(DB_match[0][1]):
path_logfile = os.path.join(DB_match[0][1],DB_match[0][2]+'.log')
HighestProcL_dueLog = check_for_successfully_written_data(path_logfile,dataset)
if DB_match[0][0] == HighestProcL_dueLog:
dataset['GMS_proc_level'] = DB_match[0][0]
else:
dataset['GMS_proc_level'] = HighestProcL_dueLog
elif len(DB_match) > 1:
print ('According to database there are multiple matches for the dataset %s. Dataset has to be reprocessed.' %(dataset['data_ID']))
dataset['GMS_proc_level'] = None
else:
dataset['GMS_proc_level'] = None
return dataset
\ No newline at end of file
# -*- coding: utf-8 -*-
###############################################################################
#
# BigData 0B Processor:
#
# Performed operations:
# Downloads data from servers and creates a python object with the path of the
# downloaded data and some information about the data
# Inputs:
# - list of datasets to be downloaded
# Output:
# - python object with filepath of the downloaded archive, information about data, basename
#
# Written by Daniel Scheffler
# Section 1.4 Remote Sensing, GFZ Potsdam
#
###############################################################################
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import os, re,sys
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
import misc.helper_functions as HLP_F
########################### core functions ####################################
class L0B_object(object):
def __init__(self, data_list_posX,onlyStringAttributes=False):
self.proc_level = 'L0B'
self.job_ID = data_list_posX['job_ID']
self.job_CPUs = data_list_posX['job_CPUs']
self.skip_thermal = data_list_posX['skip_thermal']
self.sort_bands_by_cwl = data_list_posX['sort_bands_by_cwl']
self.image_type = data_list_posX['image_type']
self.satellite = data_list_posX['satellite']
self.sensor = data_list_posX['sensor']
self.subsystem = data_list_posX['subsystem']
self.aquisition_date = data_list_posX['aquisition_date']
self.data_ID = data_list_posX['data_ID']
self.baseN = self.sensor+'__'+self.data_ID if self.subsystem in ['',None] else ('__').join([self.sensor,self.subsystem,self.data_ID])
self.path_procdata = self._get_local_path_baseN(self.satellite,self.sensor,self.aquisition_date,self.data_ID)
self.path_logfile = os.path.join(self.path_procdata, self.baseN+'.log')
if not onlyStringAttributes:
self.logger = HLP_F.setup_logger('log__'+self.baseN, self.path_logfile,self.job_CPUs, append=0)
self.path_archive = self._get_local_archive_path_baseN(self.image_type,self.satellite,self.sensor,self.aquisition_date,self.data_ID)
if not os.path.isfile(self.path_archive) and not os.path.isdir(self.path_archive) and not onlyStringAttributes:
self.logger.info("The %s dataset '%s' has not been processed earlier and no corresponding raw data archive has been found at %s." %(self.sensor,self.data_ID,self.path_archive))
self.logger.info('Trying to download the dataset...')
self.path_archive_valid = self._data_downloader(self.sensor,self.data_ID)
else:
self.path_archive_valid = True
self.georef = 'Slave'
if self.image_type == 'RSD' and self.sensor == 'LDCM':
self.georef = 'Master'
if self.path_archive_valid and not onlyStringAttributes:
self.logger.info('Level 0B object for %s %s%s (data-ID %s) successfully created.' %(self.satellite, self.sensor, (' '+self.subsystem) if self.subsystem not in [None,''] else '', self.data_ID))
if hasattr(self,'logger'): del self.logger
def _get_local_archive_path_baseN(self,image_type,satellite,sensor,aquisition_date,data_ID):
archive_fold = os.path.abspath('../database/sampledata/') ## to be replaced by Fileserver URL
if image_type == 'RSD':
if satellite != None and re.search(satellite,'ALOS',re.I) != None:
if sensor == 'AVNIR-2':
if self.data_ID == 'A1002553-001-P6100002-AODS-201007300008':
return os.path.join(archive_fold,'alos/avnir/orig_L1B1/20090702_L1B1/', data_ID+'.tar.gz')
if self.data_ID == '20090702_L1B2':
return os.path.join(archive_fold,'alos/avnir/orig_L1B2/20090702_L1B2/data_jaxa/')
if self.data_ID == '20090719_L1B2':
return os.path.join(archive_fold,'alos/avnir/orig_L1B2/20090719_L1B2/data_jaxa/')
if self.data_ID == '20100421_L1B2':
return os.path.join(archive_fold,'alos/avnir/orig_L1B2/20100421_L1B2/data_jaxa/')
if satellite != None and re.search(satellite,'Terra',re.I) != None:
if sensor == 'ASTER':
return os.path.join(archive_fold,'aster/fergana_usgs/level_1b_nov07/kurgart_19aug07/', data_ID+'.hdf')
if satellite != None and re.search(satellite,'Landsat-5',re.I) != None:
if sensor == 'TM':
return os.path.join(archive_fold,'landsat/landsat-tm_92-now/151-32/', data_ID+'.tar.gz')
if satellite != None and re.search(satellite,'Landsat-7',re.I) != None:
if sensor == 'ETM+':
if self.data_ID == 'LE71510321999218SGS00': # == BSQ
# return os.path.join(archive_fold,'landsat/landsat-etm-on_99-03/151-32/', data_ID, 'L71151032_03219990806_stack_rad.bsq')
return os.path.join(archive_fold,'landsat/landsat-etm-on_99-03/151-32/', data_ID, 'L71151032_03219990806_stack_rad_sub.bsq')
else: # == TAR
return os.path.join(archive_fold,'landsat/landsat-etm-on_99-03/151-32/', data_ID+'.tar.gz')
if satellite != None and re.search(satellite,'Landsat-8',re.I) != None:
if sensor == 'OLI_TIRS':
return os.path.join(archive_fold,'landsat/landsat_8/', data_ID+'.tar.gz')
if satellite != None and re.search(satellite,'SPOT-1',re.I) != None:
if sensor == 'HRV1':
return os.path.join(archive_fold,'spot1/orig/', data_ID+'.zip')
if satellite != None and re.search(satellite,'SPOT-5',re.I) != None:
if sensor == 'HRG2':
return os.path.join(archive_fold,'spot5/orig/', data_ID+'.zip')
if satellite != None and re.search(satellite[:8],'RapidEye',re.I) != None: # includes all RapidEye satellites
if sensor == 'MSI':
return os.path.join(archive_fold,'rapideye/', data_ID+'.zip')
if image_type == 'DGM':
if satellite != None and re.search(satellite,'SRTM',re.I) != None:
return os.path.join(archive_fold,'srtm2/', data_ID+'_sub.bsq')
if image_type == 'ATM':
return os.path.join(archive_fold,'atm_data/', data_ID + '.bsq')
try: self.logger.critical('Given dataset specification is not yet supported. Specified parameters: image_type: %s; satellite: %s; sensor: %s' %(image_type,satellite,sensor))
except AttributeError: print('Given dataset specification is not yet supported. Specified parameters: image_type: %s; satellite: %s; sensor: %s' %(image_type,satellite,sensor))
def _get_local_path_baseN(self,satellite,sensor,date,data_ID):
return os.path.join(os.path.abspath('../database/processed_data/'),satellite,sensor,str(date),data_ID)
# return os.path.join(os.path.abspath('/misc/fluo6/andre/projekte/GeoMultiSens/out/'),satellite,sensor,str(date),data_ID)
def _data_downloader(self,sensor, data_ID):
self.logger.info('Level 0B Processing started')
success = False
" > download source code for Landsat here < "
if success == False:
self.logger.critical("Download for %s dataset '%s' failed. No further processing possible." %(sensor,data_ID))
return success
\ No newline at end of file
This diff is collapsed.
# -*- coding: utf-8 -*-
###############################################################################
#
# Level 1B Processor:
#
# Performed operations:
# Generation of RPCs for later Orthorectification:
# - for satellite data
# - for DGMs (SRTM)
# - for atmospheric layers (ozone,aerosols,water vapour)
# => results are added to header files
#
# Written by Daniel Scheffler
#
###############################################################################
########################### Library import ####################################
#from __future__ import (division, print_function, unicode_literals,absolute_import)
import numpy as np,os,spectral.io.envi as envi,logging,sys,socket,itertools
if socket.gethostname() == 'geoms':
sys.path.append('/usr/lib/otb/python/')
os.environ['ITK_AUTOLOAD_PATH'] = "/usr/lib/otb/applications"
sys.path.append('/usr/lib/python2.7/dist-packages') # cv2
sys.path.append('/usr/local/lib/python2.7/site-packages')
if socket.gethostname() == 'mefe18':
sys.path.append('/usr/lib64/otb/python/')
os.environ['ITK_AUTOLOAD_PATH'] = "/usr/lib64/otb/applications"
try:
import otbApplication
except: print('otbApplication-lib missing..')
try:
import cv2
except: print('cv2-lib missing..')
########################### core functions ####################################
def calculate_TiePoints(im_ref,im_rpc,distance = 200):
detector = cv2.FeatureDetector_create ("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
TieP_ref = detector.detect(im_ref) # gibt Liste von Keypoints aus -> type(skp[0]): cv2.KeyPoint
TieP_ref, Detector_ref = descriptor.compute(im_ref, TieP_ref) # Detector = numpy-array - shape: Anzahl Keypoints x 128
print('%s temporary master tie points found.' %len(TieP_ref))
TieP_rpc = detector.detect(im_rpc)
TieP_rpc, Detector_rpc = descriptor.compute(im_rpc, TieP_rpc)
print('%s temporary slave tie points found.' %len(TieP_rpc))
flann_params = dict(algorithm=1, trees=4)
flann = cv2.flann_Index(Detector_ref, flann_params)
idx, dist = flann.knnSearch(Detector_rpc, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
TieP_ref_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
TieP_ref_final.append(TieP_ref[i])
flann = cv2.flann_Index(Detector_rpc, flann_params)
idx, dist = flann.knnSearch(Detector_ref, 1, params={})
del flann
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
TieP_rpc_final = []
for i, dis in itertools.izip(idx, dist):
if dis < distance:
TieP_rpc_final.append(TieP_rpc[i])
return TieP_ref_final,TieP_rpc_final
def Orfeo_homologous_points_extraction(im_ref,im_rpc):
HomologousPointsExtraction = otbApplication.Registry.CreateApplication("HomologousPointsExtraction")
# The following lines set all the application parameters:
# HomologousPointsExtraction.SetParameterString("in1", "sensor_stereo_left.tif")
# HomologousPointsExtraction.SetParameterString("in2", "sensor_stereo_right.tif")
HomologousPointsExtraction.SetParameterString("in1", im_ref)
HomologousPointsExtraction.SetParameterString("in2", im_rpc)
HomologousPointsExtraction.SetParameterString("mode","full")
HomologousPointsExtraction.SetParameterString("out", "homologous.txt")
# The following line execute the application
HomologousPointsExtraction.ExecuteAndWriteOutput()
def generate_RPCs(RSD_L1A, DGM_L1A, masks_L1A,path_out_baseN =''):
''' Generates RPC model and returns RPC points as list. '''
print('\n##################### Level 1B Processing #####################')
logging.info('Level 1B Processing started.')
if isinstance(RSD_L1A,np.ndarray):
# The following line creates an instance of the GenerateRPCSensorModel application
GenerateRPCSensorModel = otbApplication.Registry.CreateApplication("GenerateRPCSensorModel")
# The following lines set all the application parameters:
GenerateRPCSensorModel.SetParameterString("outgeom", "output.geom")
GenerateRPCSensorModel.SetParameterString("inpoints", "points.txt")
GenerateRPCSensorModel.SetParameterString("map","epsg")
GenerateRPCSensorModel.SetParameterInt ("map.epsg.code", 32631)
# The following line execute the application
GenerateRPCSensorModel.ExecuteAndWriteOutput()
else:
logging.info('L1B-Processor accepts only numpy arrays as first input argument. Execution stopped.')
raise ValueError('L1B-Processor accepts only numpy arrays as first input argument.')
print('Generating dummy RPCs...')
logging.info('Generating dummy RPCs...')
list_RPCs = [0]*93
return list_RPCs
def update_metadata(list_RPCs, L1A_meta2update, path_out_baseN =''):
''' Adds RPC points to metadata of RS data, masks, atmospheric layers and DGM. '''
if isinstance(L1A_meta2update,dict):
# metadata dictionary updater
L1A_meta2update['rpc coeffs'] = list_RPCs
L1B_meta = L1A_meta2update
# L1B_meta = L1A_meta2update.update({'rpc coeffs':list_RPCs}) # result = None
return L1B_meta
elif isinstance(L1A_meta2update,basestring) and os.path.splitext(L1A_meta2update)[1] in ['.BSQ', '.bsq'] and os.path.isfile(os.path.splitext(L1A_meta2update)[0] + '.hdr'):
# header file updater
hdr_path = os.path.splitext(L1A_meta2update)[0] + '.hdr'
L1A_meta2update = envi.read_envi_header(hdr_path)
L1A_meta2update['rpc coeffs'] = list_RPCs
L1B_meta = L1A_meta2update
return L1B_meta
else:
logging.info('L1B-Processor accepts only L1A metadata dictionaries or path-strings to L1A header files as second input argument. Execution stopped.')
raise ValueError('L1B-Processor accepts only L1A metadata dictionaries or path-strings to L1A header files as second input argument.')
def convert_to_8bit(Inimage,In_dtype):
image_min,image_max = (np.min(Inimage), np.max(Inimage))
# lut = np.arange(np.iinfo(In_dtype).max, dtype=In_dtype)
lut = np.arange(2**16, dtype='uint16')
Outimage = np.array(lut, copy=True)
Outimage.clip(image_min, image_max, out=Outimage)
Outimage -= image_min
Outimage //= (image_max - image_min + 1) / 256.
lut = Outimage.astype(np.uint8)
return np.take(lut, Inimage)
def L1B_P__main(L1A_Instances):
for i in L1A_Instances:
if i.image_type == 'RSD':
if i.georef =='Master':
if i.arr_shape == 'cube':
im_ref = convert_to_8bit(i.arr[:,:,0],i.arr.dtype)
else:
im_ref = convert_to_8bit(i.arr,i.arr.dtype)
print('Calling tie point calculation with master image %s' %i.data_ID)
else:
if i.arr_shape == 'cube':
im_rpc = convert_to_8bit(i.arr[:,:,0],i.arr.dtype)
else:
im_rpc = convert_to_8bit(i.arr,i.arr.dtype)
print('Calling tie point calculation with slave image %s' %i.data_ID)
print(im_ref.shape,im_rpc.shape)
# Orfeo_homologous_points_extraction(im_ref,im_rpc) # funktioniert nur von Platte
TieP_ref,TieP_rpc = calculate_TiePoints(im_ref,im_rpc)
print('%s master tie points calculated.' %len(TieP_ref))
\ No newline at end of file
# -*- coding: utf-8 -*-
###############################################################################
#
# Level 1C Processor:
#
# Performed operations:
# Atmospheric correction of radiance data:
# Inputs:
# - radiance array
# - DGM array
# - atmospheric layers (ozone,aerosols,water vapour)
# - sensor SRFs
# Outputs:
# - reflectance array
# - metadata dictionary with new information about:
# - quality of atmospheric correction
# - atmopsheric water vapour content, ...
#
# Written by Daniel Scheffler
#
###############################################################################
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import numpy as np,os,glob,logging
########################### core functions ####################################
def atm_corr(RSD_L1B, RSD_md_L1B, masks_L1B, masks_md_L1B, DGM_L1B, DGM_md_L1B, ATM_L1B, ATM_md_L1B, SRF_fold):
''' Performs an atmospheric correction and returns atmospherically corrected reflectance data.'''
print('\n##################### Level 1C Processing #####################')
logging.info('Level 1C Processing started.')
SRF_dict = _SRF_reader(SRF_fold,RSD_md_L1B)
## CODE for atmospheric correction
RSD_L1C = RSD_L1B
RSD_md_L1C = RSD_md_L1B
return RSD_L1C, RSD_md_L1C
\ No newline at end of file
# -*- coding: utf-8 -*-
###############################################################################
#
# Level 1D Processor:
#
# Performed operations:
# Calculation of additional masks from reflectance data:
# - cloud mask
# - snow mask
# - ice mask
# - shadow mask
# Update of L1B masks by adding newly calculated layers.
#
# Inputs:
# - atmospherically corrected L1C array
# - L1B masks
# - appropriate metadata dictionaries
# Outputs:
# - updated masks
# - updated metadata dictionary for masks
#
# Written by Daniel Scheffler
#
###############################################################################
########################### Library import ####################################
from __future__ import (division, print_function, unicode_literals,absolute_import)
import numpy as np,logging
########################### core functions ####################################
def calculate_masks(RSD_L1C, RSD_md_L1C, masks_L1B, masks_md_L1B):
print('\n##################### Level 1D Processing #####################')
logging.info('Level 1D Processing started.')
masks_L1D = masks_L1B
masks_L1D[RSD_L1C[:,:,0] > 1500] = 11 # snow
masks_L1D[RSD_L1C[:,:,0] < 400 ] = 13 # shadow
return masks_L1D
def update_metadata(masks_L1D, masks_md_L1B):
''' Updates L1B mask metadata by adding information about new pixel values. '''
masks_md_L1D = masks_md_L1B
masks_md_L1D['description'] = masks_md_L1B['description']+ '\n10 = cloud\n11 = snow\n12 = ice\n13 = shadow'
return masks_md_L1D
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 11:57:31 2015
@author: danschef
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 11:57:31 2015
@author: danschef
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 11:57:31 2015
@author: danschef
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 11:57:31 2015
@author: danschef