Commit cd487679 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

Renamed project from 'GeoMultiSens' to 'gms_preprocessing'.

parent 53ea4a0c
Pipeline #868 failed with stage
in 2 minutes and 4 seconds
......@@ -426,6 +426,8 @@ class L1B_object(L1A_object):
# TODO implement earlier version of this function as a backup for SpatialIndexMediator
"""postgreSQL query: get IDs of overlapping scenes and select most suitable scene_ID
(with respect to DGM, cloud cover"""
warnings.warn('_get_reference_image_params_pgSQL is deprecated an will not work anymore.', DeprecationWarning)
# vorab-check anhand wolkenmaske, welche region von im2shift überhaupt für shift-corr tauglich ist
# -> diese region als argument in postgresql abfrage
#scene_ID = 14536400 # LE71510322000093SGS00 im2shift
......@@ -502,7 +504,7 @@ class L1B_object(L1A_object):
# start download of scene data not available and start L1A processing
dl_cmd = lambda scene_ID: \
print('%s %s %s' %(CFG.job.java_commands['keyword'].strip(),
print('%s %s %s' %(CFG.job.java_commands['keyword'].strip(), # FIXME CFG.job.java_commands is deprecated
CFG.job.java_commands["value_download"].strip(), scene_ID))
path = PG.path_generator(scene_ID = sc['scene_ID']).get_path_imagedata()
......
###############################################################################
#
# algorithms/__init__.py - This file is part of the GeoMultiSens package.
# algorithms/__init__.py - This file is part of the gms_preprocessing package.
#
# Written by Daniel Scheffler
# GFZ Potsdam, Section 1.4
......
......@@ -10,7 +10,7 @@ import dill
class GmsCloudClassifier(object):
def __init__(self, classifier):
"""
classifier for GeoMultiSens
classifier for gms_preprocessing
:param classifier: either file name of dilled 'general_classifier' object or an instance of such an object
this object needs only .n_channels and .predict property
:return: instance
......@@ -39,19 +39,19 @@ if __name__ == "__main__":
import sys
from datetime import datetime
sys.path.append("/home/danscheff/GeoMultiSens/") # FIXME
sys.path.append("/home/danscheff/gms_preprocessing/") # FIXME
fn_l1a = glob("./clfs/ETM+*.pkl")[0] # gms l1a object
with open(fn_l1a, "rb") as fl:
l1a = dill.load(fl)
fns_clf = glob("./clfs/*.dill") # classifier object filenames
now_str = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
print("suffix:",now_str)
for fn_clf in fns_clf:
print(fn_clf)
for fn_clf in fns_clf:
print(fn_clf)
gms_clf = GmsCloudClassifier(classifier=fn_clf)
res = gms_clf(l1a)
......@@ -65,11 +65,11 @@ if __name__ == "__main__":
with open(fn_clf,"rb") as fl:
inf = dill.load(fl)
clf = dill.load(fl)
clf = dill.load(fl)
for key,value in inf.items():
print(key,"->",value)
gms_clf = GmsCloudClassifier(classifier=clf)
res = gms_clf(l1a)
......
......@@ -21,7 +21,7 @@ from inspect import getargvalues, stack, getfullargspec, signature, _empty
def set_config(call_type, job_ID, exec_mode='Python', db_host='localhost', reset=False, job_kwargs=None):
# type: (str, int, str, str, bool, dict) -> None
"""Set up a configuration for a new GeoMultiSens job!
"""Set up a configuration for a new gms_preprocessing job!
:param call_type: 'console' or 'webapp'
:param job_ID: job ID of the job to be executed, e.g. 123456 (must be present in database)
......@@ -29,7 +29,7 @@ def set_config(call_type, job_ID, exec_mode='Python', db_host='localhost', reset
'Flink': keeps intermediate results in memory in order to save IO time
:param db_host: host name of the server that runs the postgreSQL database
:param reset: whether to reset the job status or not (default=False)
:param job_kwargs: keyword arguments to be passed to geomultisens.config.Job() (see documentation there)
:param job_kwargs: keyword arguments to be passed to gms_preprocessing.config.Job() (see documentation there)
"""
if not hasattr(builtins, 'GMS_job') or not hasattr(builtins, 'GMS_usecase') or reset:
......@@ -171,9 +171,6 @@ class Job(object):
self.path_SNR_models = self.DB_config['path_SNR_models']
self.path_dem_proc_srtm_90m = self.DB_config['path_dem_proc_srtm_90m']
self.path_ECMWF_db = self.DB_config['path_ECMWF_db']
self.java_commands = collections.OrderedDict([
("keyword", self.DB_config['command_keyword']),
("value_download", self.DB_config['command_value_download'])])
if not self.is_test:
self.path_fileserver = self.DB_config['path_data_root']
......@@ -187,9 +184,6 @@ class Job(object):
self.path_testing = self.DB_config['path_testing']
self.path_benchmarks = self.DB_config['path_benchmarks']
self.path_job_logs = self.DB_config['path_job_logs']
self.java_commands = collections.OrderedDict([
("keyword", self.DB_config['command_keyword']),
("value_download", self.DB_config['command_value_download'])])
else:
# in test mode, the repository should be self-contained -> use only relative paths
self.path_archive = self.absP('../tests/data/')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment