test_gms_preprocessing.py 32 KB
Newer Older
1
2
3
#!/usr/bin/env python
# -*- coding: utf-8 -*-

4
###################################################################################
5

6
"""
7
test_gms_preprocessing
8
----------------------------------
9

10
The testcases contained in this testscript, are parametrized testcases. They test
11
12
the level-processing steps defined in the 'gms_preprocessing' module in the
"gms_preprocessing"-project with the help of the test datasets:
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
- Landsat-5, Pre-Collection Data,
- Landsat-5, Collection Data,
- Landsat-7, SLC on, Pre-Collection Data,
- Landsat-7, SLC off, Pre-Collection Data,
- Landsat-7, SLC off, Collection Data,
- Landsat-8, Pre-Collection Data,
- Landsat-8, Collection Data,
- Sentinel-2A, Pre-Collection Data and
- Sentinel-2A, Collection Data.
The test datasets can be found in the directory "tests/data/archive_data/...". The
respective SRTM-datasets needed in the data-processing can be found in the directory
"tests/data/archive_data/Endeavor".

The tests, defined in a base-testcase (not executed), are triggered by creating
jobs (based on given job-IDs) in individual testcases that inherit the tests
from the base-testcase. The exception: The job-ID used in the last testclass
contains 3 different test datasets of the above listed datasets.

Note that the testresults are outputted in the console as well as a log-textfile
that can be found in the directory "tests/logs".

Program edited in July 2017.
"""

37
# Import python standard libraries.
38
39
40
41
42
43
import itertools
import logging
import os
import pandas
import sys
import time
44
45
import unittest

46
47
# Imports regarding the 'gms_preprocessing' module.
from gms_preprocessing import process_controller, __file__
48
from gms_preprocessing.model.gms_object import GMS_object
49
50
51
52
53
from gms_preprocessing.algorithms.L1A_P import L1A_object
from gms_preprocessing.algorithms.L1B_P import L1B_object
from gms_preprocessing.algorithms.L1C_P import L1C_object
from gms_preprocessing.algorithms.L2A_P import L2A_object
from gms_preprocessing.algorithms.L2B_P import L2B_object
54
# from gms_preprocessing.algorithms.L2C_P import L2C_object
55
from gms_preprocessing.misc.database_tools import get_info_from_postgreSQLdb
56
from gms_preprocessing.model.gms_object import GMS_object_2_dataset_dict
57

58
from . import db_host, index_host
59

60
__author__ = 'Daniel Scheffler'  # edited by Jessica Palka.
61

62
# Rootpath of the gms_preprocessing-repository.
63
64
65
66
gmsRepo_rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

# Defining the configurations needed to start a job containing the different dataset scenes.
# TODO Change the job-configurations for selected datasets.
67
job_config_kwargs = dict(parallelization_level='scenes', db_host=db_host, spatial_index_server_host=index_host,
68
69
70
71
                         delete_old_output=True, is_test=True,
                         inmem_serialization=False,
                         exec_L1AP=[True, True, True], exec_L1BP=[True, True, True], exec_L1CP=[True, True, True],
                         exec_L2AP=[True, True, True], exec_L2BP=[True, True, False], exec_L2CP=[True, True,False])
72

Daniel Scheffler's avatar
Daniel Scheffler committed
73
##########################
74
# Test case: BaseTestCases
Daniel Scheffler's avatar
Daniel Scheffler committed
75
76
##########################

77
78
79
80

class BaseTestCases:
    """
    General testclass. The tests defined in this testclass test the processing steps Level-1A, Level-1B, Level-1C,
81
    Level-2A, Level-2B and Level-2C defined in the "gms_preprocessing"-repository.
82
83
84
85
    Note that the tests in this testclass are not executed directly. They are re-used in the other classes defined
    in this test-script.
    """
    class TestAll(unittest.TestCase):
86
        PC = None  # default
87
88
89

        @classmethod
        def tearDownClass(cls):
90
            cls.PC.config.DB_job_record.delete_procdata_of_entire_job(force=True)
91
92
93

        @classmethod
        def validate_db_entry(cls, filename):
94
95
            sceneID_res = get_info_from_postgreSQLdb(cls.PC.config.conn_database, 'scenes', ['id'],
                                                     {'filename': filename})
96
97
98
99
            assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

        @classmethod
        def create_job(cls, jobID, config):
100
            cls.PC = process_controller(jobID, **config)
101
102

            # update attributes of DB_job_record and related DB entry
103
            cls.PC.config.DB_job_record.reset_job_progress()
104

105
            GMS_object.proc_status_all_GMSobjs.clear()  # reset
106

Daniel Scheffler's avatar
Bugfix.    
Daniel Scheffler committed
107
            cls.PC.config.data_list = cls.PC.add_local_availability(cls.PC.config.data_list)
108

109
            [cls.validate_db_entry(ds['filename']) for ds in cls.PC.config.data_list]
110

111
112
            cls.PC.config.ac_estimate_accuracy = True  # FIXME
            cls.PC.config.spechomo_estimate_accuracy = True  # FIXME
113

114
115
116
117
        def check_availability(self, GMS_objs, tgt_procL):
            dss = self.PC.add_local_availability([GMS_object_2_dataset_dict(obj) for obj in GMS_objs])
            for ds in dss:
                self.assertEqual(ds['proc_level'], tgt_procL,
Daniel Scheffler's avatar
Daniel Scheffler committed
118
119
                                 msg='Written %s dataset cannot be found by PC.add_local_availability().'
                                     % (' '.join([ds['satellite'], ds['sensor'], ds['subsystem'], tgt_procL])))
120

121
122
123
        def test_L1A_processing(self):
            self.L1A_newObjects = self.PC.L1A_processing()
            self.assertIsInstance(self.L1A_newObjects, list)
124
            self.assertNotEqual(len(self.L1A_newObjects), 0, msg='L1A_processing did not output an L1A object.')
125
126
            self.assertIsInstance(self.L1A_newObjects[0], L1A_object)

127
            # check if PC.add_local_availability finds the written dataset
Daniel Scheffler's avatar
Daniel Scheffler committed
128
129
            if self.PC.config.exec_L1AP[1]:
                self.check_availability(self.L1A_newObjects, 'L1A')
130

131
132
133
        def test_L1B_processing(self):
            self.L1B_newObjects = self.PC.L1B_processing()
            self.assertIsInstance(self.L1B_newObjects, list)
134
            self.assertNotEqual(len(self.L1B_newObjects), 0, msg='L1B_processing did not output an L1B object.')
135
136
            self.assertIsInstance(self.L1B_newObjects[0], L1B_object)

137
            # check if PC.add_local_availability finds the written dataset
Daniel Scheffler's avatar
Daniel Scheffler committed
138
139
            if self.PC.config.exec_L1BP[1]:
                self.check_availability(self.L1B_newObjects, 'L1B')
140

141
142
143
        def test_L1C_processing(self):
            self.L1C_newObjects = self.PC.L1C_processing()
            self.assertIsInstance(self.L1C_newObjects, list)
144
            self.assertNotEqual(len(self.L1C_newObjects), 0, msg='L1C_processing did not output an L1C object.')
145
146
            self.assertIsInstance(self.L1C_newObjects[0], L1C_object)

147
            # check if PC.add_local_availability finds the written dataset
Daniel Scheffler's avatar
Daniel Scheffler committed
148
149
            # if self.PC.config.exec_L1CP[1]:
            #     self.check_availability(self.L1C_newObjects, 'L1C')
150

151
152
153
        def test_L2A_processing(self):
            self.L2A_newObjects = self.PC.L2A_processing()
            self.assertIsInstance(self.L2A_newObjects, list)
154
            self.assertNotEqual(len(self.L2A_newObjects), 0, msg='L2A_processing did not output an L2A object.')
155
156
            self.assertIsInstance(self.L2A_newObjects[0], L2A_object)

157
            # check if PC.add_local_availability finds the written dataset
158
            # FIXME this will fail because AC outputs TOA-Ref if ECMWF data are missing
Daniel Scheffler's avatar
Daniel Scheffler committed
159
160
            # if self.PC.config.exec_L2AP[1]:
            #     self.check_availability(self.L2A_newObjects, 'L2A')
161

162
163
164
        def test_L2B_processing(self):
            self.L2B_newObjects = self.PC.L2B_processing()
            self.assertIsInstance(self.L2B_newObjects, list)
165
            self.assertNotEqual(len(self.L2B_newObjects), 0, msg='L2B_processing did not output an L2B object.')
166
167
            self.assertIsInstance(self.L2B_newObjects[0], L2B_object)

168
            # check if PC.add_local_availability finds the written dataset
169
            # FIXME this will fail because AC outputs TOA-Ref if ECMWF data are missing
Daniel Scheffler's avatar
Daniel Scheffler committed
170
171
            # if self.PC.config.exec_L2BP[1]:
            #     self.check_availability(self.L2B_newObjects, 'L2B')
172

173
174
175
        def test_L2C_processing(self):
            self.L2C_newObjects = self.PC.L2C_processing()
            self.assertIsInstance(self.L2C_newObjects, list)
176
            self.assertNotEqual(len(self.L2C_newObjects), 0, msg='L2C_processing did not output an L2C object.')
177
            # self.assertIsInstance(self.L2C_newObjects[0], L2C_object)
178
179

            # check if PC.add_local_availability finds the written dataset
180
            # FIXME this will fail because AC outputs TOA-Ref if ECMWF data are missing
Daniel Scheffler's avatar
Daniel Scheffler committed
181
182
            # if self.PC.config.exec_L2CP[1]:
            #     self.check_availability(self.L2C_newObjects, 'L2C')  # FIXME fails (not yet working)
183

Daniel Scheffler's avatar
Daniel Scheffler committed
184
            # Setting the config.status manually.
185
            # if self.L2C_newObjects:
186
            #     self.PC.config.status = "finished"
187
188
            # FIXME after updating the job.status-attribute for the level-processes, delete the code that is commented
            # FIXME out.
189

Daniel Scheffler's avatar
Daniel Scheffler committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
    class TestCompletePipeline(unittest.TestCase):
        PC = None  # default

        @classmethod
        def tearDownClass(cls):
            cls.PC.config.DB_job_record.delete_procdata_of_entire_job(force=True)

        @classmethod
        def validate_db_entry(cls, filename):
            sceneID_res = get_info_from_postgreSQLdb(cls.PC.config.conn_database, 'scenes', ['id'],
                                                     {'filename': filename})
            assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

        @classmethod
        def create_job(cls, jobID, config):
            cls.PC = process_controller(jobID, **config)

            [cls.validate_db_entry(ds['filename']) for ds in cls.PC.config.data_list]

209
210
            cls.PC.config.CPUs_all_jobs = 3
            cls.PC.config.max_parallel_reads_writes = 3
211
            # cls.PC.config.spathomo_estimate_accuracy = True
212
213
            # cls.PC.config.ac_estimate_accuracy = True  # FIXME
            # cls.PC.config.spechomo_estimate_accuracy = True  # FIXME
214
215
            # cls.PC.config.exec_L1CP = [1, 1, 0]
            # cls.PC.config.exec_2ACP = [1, 1, 0]
216

Daniel Scheffler's avatar
Bugfix.    
Daniel Scheffler committed
217
218
        def test_run_all_processors(self):
            self.PC.run_all_processors()
Daniel Scheffler's avatar
Daniel Scheffler committed
219
            self.assertIsInstance(self.PC.L2C_newObjects, list)
220
221
222
223
            self.assertIsInstance(self.PC.summary_detailed, pandas.DataFrame)
            self.assertFalse(self.PC.summary_detailed.empty)
            self.assertIsInstance(self.PC.summary_quick, pandas.DataFrame)
            self.assertFalse(self.PC.summary_quick.empty)
224
225
226
227
228
229
230
231
232
233

###################################################################################
# Test cases 1-9: Test_<Satelite-Dataset>_<PreCollection or Collection>Data
# Test case 10: Test_MultipleDatasetsInOneJob


# TESTDATA-CLASSES.
class Test_Landsat5_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (pre-collection data).
234
    More information on the dataset will be output after the tests-classes are executed.
235
236
237
238
239
240
241
242
243
244
245
246
247
248
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186263, job_config_kwargs)

# class Test_Landsat5_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186263, job_config_kwargs) # FIXME job_ID!

Daniel Scheffler's avatar
Daniel Scheffler committed
249

250
251
252
class Test_Landsat7_SLC_on_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_ON scene (pre-collection data).
253
    More information on the dataset will be output after after the tests-classes are executed.
254
255
256
257
258
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186262, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
259

260
261
262
class Test_Landsat7_SLC_off_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (pre-collection data).
263
    More information on the dataset will be output after the tests-classes are executed.
264
265
266
267
268
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186267, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
269

270
271
272
273
274
275
276
277
278
# class Test_Landsat7_SLC_off_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186267, job_config_kwargs) # FIXME job_ID!

279
#
280
281
282
class Test_Landsat8_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (pre-collection data).
283
    More information on the dataset will be output after the tests-classes are executed.
284
285
286
287
288
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186196, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
289

290
291
292
class Test_Landsat8_CollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (collection data).
293
    More information on the dataset will be output after the tests-classes are executed.
294
295
296
    """
    @classmethod
    def setUpClass(cls):
297
        cls.create_job(26187391, job_config_kwargs)
298

Daniel Scheffler's avatar
Daniel Scheffler committed
299

300
301
302
303
304
305
306
307
308
309
310
311
class Test_Landsat8_CollectionData_CompletePipeline(BaseTestCases.TestCompletePipeline):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (collection data).
    More information on the dataset will be output after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cfg = job_config_kwargs
        # cfg.update(dict(inmem_serialization=True))
        cls.create_job(26187391, cfg)


312
class Test_Sentinel2A_SingleGranuleFormat(BaseTestCases.TestAll):
313
    """
314
315
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
316
317
318
319
320
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186268, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
321

Daniel Scheffler's avatar
Daniel Scheffler committed
322
323
324
325
326
327
328
329
330
class Test_Sentinel2A_SingleGranuleFormat_CompletePipeline(BaseTestCases.TestCompletePipeline):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186268, job_config_kwargs)

331
332
333
    # @classmethod
    # def tearDownClass(cls):
    #     super().tearDownClass()
334
335
        # PC = cls.PC

Daniel Scheffler's avatar
Daniel Scheffler committed
336

337
class Test_Sentinel2A_MultiGranuleFormat(BaseTestCases.TestAll):
338
    """
339
340
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (multiple granules in archive: < 2017).
    More information on the dataset will be output after the tests-classes are executed.
341
342
343
344
345
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186272, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
346

347
348
349
350
351
352
353
354
355
356
class Test_Sentinel2B_SingleGranuleFormat(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2B MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186937, job_config_kwargs)


357
358
359
360
361
362
363
364
365
366
class Test_MultipleDatasetsInOneJob(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186273, job_config_kwargs)


Daniel Scheffler's avatar
Daniel Scheffler committed
367
368
369
370
371
372
373
374
375
class Test_MultipleDatasetsInOneJob_CompletePipeline(BaseTestCases.TestCompletePipeline):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186273, job_config_kwargs)

376
377
378
    # @classmethod
    # def tearDownClass(cls):
    #     super().tearDownClass()
379
380
        # PC = cls.PC

Daniel Scheffler's avatar
Daniel Scheffler committed
381

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
class Test_ProcessContinuing_CompletePipeline(unittest.TestCase):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    PC = None  # default

    @classmethod
    def tearDownClass(cls):
        cls.PC.config.DB_job_record.delete_procdata_of_entire_job(force=True)

    @classmethod
    def validate_db_entry(cls, filename):
        sceneID_res = get_info_from_postgreSQLdb(cls.PC.config.conn_database, 'scenes', ['id'],
                                                 {'filename': filename})
        assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

    @classmethod
    def create_job(cls, jobID, config):
        cls.PC = process_controller(jobID, **config)

        cls.PC.logger.info('Execution of entire GeoMultiSens pre-processing chain started for job ID %s...'
                           % cls.PC.config.ID)

        [cls.validate_db_entry(ds['filename']) for ds in cls.PC.config.data_list]

    def setUp(self):
Daniel Scheffler's avatar
Daniel Scheffler committed
409
        self.cfg_kw = job_config_kwargs.copy()  # copy, because job_config_kwargs is modified otherwise
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
        self.cfg_kw.update(dict(
            reset_status=True,
            exec_L1BP=[False, False, False],
            exec_L1CP=[False, False, False],
            exec_L2AP=[False, False, False],
            exec_L2BP=[False, False, False],
            exec_L2CP=[False, False, False]
        ))

        # produce L1A data and stop processing there
        self.create_job(26186263, self.cfg_kw)  # 1x L5 pre-collection
        self.PC.run_all_processors()

    def test_continue_from_L1A(self):
        # create a new job and try to continue from L1A
        cfg_kw = self.cfg_kw
        cfg_kw.update(dict(
            exec_L1BP=[True, True, False],
            delete_old_output=False
        ))
        self.create_job(26186263, cfg_kw)  # 1x L5 pre-collection
        self.PC.run_all_processors()


434
435
436
437
438
439
440
441
###################################################################################
# Summarizing the information regarding the test datasets.

# The information: 'country' (3-letter country code, UN), 'characteristic features of the shown scene', 'cloud cover
# present' and 'overlap area present' of each dataset are summarized in the dictionary "testdata_scenes". The
# information are sorted according to the testdata.
# 3-letter code:
# UKR-Ukraine, KGZ-Kyrgyztan, POL-Poland, AUT-Austria, JPN-Japan, BOL-Bolivia, TUR-Turkey, DEU-Germany, CHE-Switzerland.
442
443
444
445
446
447
448
449
450
451
452
testdata_scenes = \
    {'Landsat5_PreCollectionData': list(['UKR', 'City region, forest', 'Sparsely', 'Zone 34/35']),
     # 'Landsat5_CollectionData': list(['KGZ', 'Snowy Mountains', 'Yes', 'None']),
     'Landsat7_SLC_on_PreCollectionData': list(['POL', 'City region, lakes', 'Yes', 'None']),
     'Landsat7_SLC_off_PreCollectionData': list(['AUT', 'Stripes (partly), Mountains', 'None', 'None']),
     # 'Landsat7_SLC_off_CollectionData': list(['JPN', 'Stripes (completly), Mountains', 'Yes', 'Zone 53/54']),
     'Landsat8_PreCollectionData': list(['BOL', 'Forest', 'Yes', 'None']),
     'Landsat8_CollectionData': list(['TUR', 'Snowy Mountains', 'Yes', 'None']),
     'Sentinel2A_PreCollectionData': list(['DEU', 'Potsdam', 'Sparsely', 'None']),
     'Sentinel2A_CollectionData': list(['CHE', 'City region, on the Rhine', 'Yes', 'None'])
     }
453
454
455
456
457
458
459
460

# The key of the dictionary is the key-value to parametrize the testclasses so that each testclass is executed
# automatically.
testdata = list(testdata_scenes.keys())
testdata.append('MultipleDatasetsInOneJob')


###################################################################################
461
# Parameterizing the test cases and creating a summary of the test results.
462
463
464

summary_testResults, summary_errors, summary_failures, summary_skipped, jobstatus = [[] for _ in range(5)]

465

466
@unittest.SkipTest
467
468
class Test_in_normal_mode(unittest.TestCase):
    def setUp(self):
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
        # self.job_id = 26184107
        # self.job_id = 26185175   # 1x TM5
        # self.job_id = 26185176   # 1x Landsat
        # self.job_id = 26185177  # 1. Sentinel-2-Testszene
        # self.job_id = 26185189   # direkt benachbarte Granules von 1. Sentinel-2-Testszene
        # self.job_id = 26185237  # 4 x Landsat-8 -> Job per database tools erstellt
        # self.job_id = 26185239  # 50 x Landsat-8 -> Job per database tools erstellt - 1. L8 Beta-Testjob
        # self.job_id = 26185242  # 1 x Landsat-8 - Bug files_in_archive=None
        # self.job_id = 26185250  # Beta-Job - 219 x L8, 172 x L7, 111 x S2, spatref S2
        # self.job_id = 26185251  # 1x L8, Zielsensor L8
        # self.job_id = 26185252  # 1x L8, Zielsensor L8, spat.ref L8
        # self.job_id = 26185253  # 25x L8, Zielsensor L8, spat.ref L8
        # self.job_id = 26185254  # 10x L8, Zielsensor L8, spat.ref L8
        # Grund=Schreibfehler L1A im tiled Python-mode bei mehr als 1 Szene im Job:
        # self.job_id = 26185255  # 1x L8 Bug 5 corners found
        # self.job_id = 26185256  # 1x L7 SLC off, Zielsensor L8, spat.ref L8
        # self.job_id = 26185257  # Beta-Job - 219 x L8, 172 x L7, 111 x S2, spatref L8
        # self.job_id = 26185258  # Beta-Job - 219 x L8, spatref L8
        # self.job_id = 26185259  # Beta-Job - 172 x L7, spatref L8
        # self.job_id = 26185260  # Beta-Job - 111 x S2, spatref L8
        # self.job_id = 26185268  # 25x L7 SLC off, Zielsensor L8, spat.ref L8
        # self.job_id = 26185269  # 1x L7 SLC off, Bug SpatialIndexMediator
        # self.job_id = 26185270  # 5x L7 SLC off, Bug SpatialIndexMediator
        # self.job_id = 26185275  # 1x L8, spat. Ref. L8 Bug L1B_mask not found
        # self.job_id = 26185264  # 1x L8, Bug L1B_masks not found
        # self.job_id = 26185265  # 1x L8, Bug L2B_masks not found
        # self.job_id = 26185268  # "2x L8, Bug L2B_masks not found, incl. 1x bad archive"
        # self.job_id = 26185269 # "10x L8, Bug L2B_masks not found"
        # self.job_id = 26185272 # "1x S2A Sips"
        # self.job_id = 26185273  # "1x L7, target L8, spat.ref L8"
        # self.job_id = 26185275 # "1x L7, target L8, spat.ref L8 L1B Matching failed"
        # self.job_id = 26185276 # "1x L7, target L8, spat.ref L8 L1B Matching window became too small."
        # self.job_id = 26185279 # "GEOMS: 25x L7, target L8, spat.ref L8"
        # "GEOMS: 1x L7, target L8, spat.ref L8, debugging NoneType object is not subscriptable within
        # mapinfo2geotransform":
        # self.job_id = 26185280
        # self.job_id = 26185281 # "GEOMS: 4x L7, target L8, spat.ref L8, freeze of pool.map"
        # self.job_id = 26185283 # "GEOMS: 10x L7, target L8, spat.ref L8, freeze of pool.map"
        # self.job_id = 26185284 # "GEOMS: 11x L7, target L8, spat.ref L8, freeze of pool.map"
        # self.job_id = 26185321 # "GEOMS: 1x L7, target L8, spat.ref L8, debugging L1B_P"
        # "GEOMS: 1x L7, target L8, spat.ref L8, Bug calc_shifted_cross_power_spectrum: NoneType object not iterable":
        # self.job_id = 26185322
        # self.job_id = 26185277 # "GMS41: 10x L7, target L8, spat.ref L8, Permission errors during logging"
        # self.job_id = 26185278 # "Beta-Job - 172 x L7, spatref L8"
        # self.job_id = 26185284 # "GMS41: "all beta-L8 with cloud cover <30% (74 scenes)"
        # self.job_id = 26185285 # "GMS41: "all beta-L7 with cloud cover <30% (49 scenes)"
        # self.job_id = 26185396 # "GEOMS: 1x S2A multi GSD testing"
        # self.job_id = 26185398  # "GEOMS: 1x S2A granule multi GSD testing"

518
        # self.job_id = 26186740  # Testjob Landsat-8
519
        # self.job_id = 26186906  # Bug Input Validator
520
        # self.job_id = 26186925  # 1 Sentinel-2A, Bug NoneType' object has no attribute 'find'
521
522
        # self.job_id = 26187051  # GMS41: 1 Landsat, FileNotFoundError
        # self.job_id = 26187052  # GMS41: 1 Landsat, DB query returns no DEM
523
        # self.job_id = 26187053  # GMS41: AC: The input 'list_GMS_objs' contains duplicates: ['', '']
524
        # self.job_id = 26187750  # GEOMS: [AC]: RuntimeWarning: All-NaN slice encountered
525
        # self.job_id = 26187760  # GEOMS: [L2C]: ValueError: 'axis' entry is out of bounds
526
        # self.job_id = 26187804  # GEOMS: Spatial homogenization leaves resampling artifacs at the image edges.
527
        # self.job_id = 26187922  # GEOMS: AssertionError (self.job_id = 26187922  # GEOMS: AssertionError)
528
        self.job_id = 26188163  # GEOMS: pandas.errors.ParserError: Expected 2 fields in line 31, saw 3
529

530
        self.PC = process_controller(self.job_id, **dict(is_test=False, parallelization_level='scenes', db_host=db_host,
531
                                                         delete_old_output=True, disable_exception_handler=True))
532
533
534
        # self.PC.config.spathomo_estimate_accuracy = True
        # self.PC.config.ac_estimate_accuracy = True
        # self.PC.config.spechomo_estimate_accuracy = True
535
536
537
        # self.PC.config.exec_L1CP = [1, 1, 0]
        # self.PC.config.exec_2ACP = [1, 1, 0]
        # self.PC.config.path_procdata_scenes = '/storage/gms/processed_scenes/20180227_MGRS33UUU_S2_L8_L7/'
538
539
540
541
542

    def test(self):
        self.PC.run_all_processors()


543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
if __name__ == '__main__':
    # Part 1: Creating and running a testsuite for each dataset-testcase, and querying the job.status of the job.
    for items in testdata:
        suite = unittest.TestLoader().loadTestsFromTestCase(eval("Test_"+items))
        alltests = unittest.TestSuite(suite)

        # Part 2: Saving the results of each testsuite and the query for the job.status in individual variables.
        testResult = unittest.TextTestRunner(verbosity=2).run(alltests)

        summary_testResults.append([testResult.testsRun, testResult.wasSuccessful(),
                                    len(testResult.errors), len(testResult.failures),
                                    len(testResult.skipped)])
        summary_errors.append(testResult.errors)
        summary_failures.append(testResult.failures)
        summary_skipped.append(testResult.skipped)

559
        # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
560
        # jobstatus.append(eval("Test_"+items).PC.status)
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576

    # Part 3: Summarizing the testresults of each testsuite and outputting the results in an orderly fashion on the
    # console and in a textfile.
    # Note that the testresults are outputted as usual after each test is executed. Since the output of each
    # level-process is rather long, the output of the testresults become lost. Therefore, the purpose to output the
    # testresults again is simply to summarize the testresults in one place and to give an overview over the results.

    # Output: a) Information on the test datasets (table), b) testresults summarized in a table, c)if existing,
    # a list of errors, failures and skips in the testcases and d) the job.status that is not set to "finished".

    time.sleep(0.5)

    # Path of the textfile the results will be logged to.
    test_log_path = os.path.join(gmsRepo_rootpath, 'tests', 'data', 'logs', time.strftime('%Y%m%d_%H%M%S_log.txt'))

    # Creating a logging system for the testresults.
577
578
    # Source: The "GMS_logger"-function in the "gms_preprocessing" --> "misc" --> "logging.py"-script was used and
    # slightly altered to meet the needs of the current problem.
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
    logger = logging.getLogger("log_Test")
    logger.setLevel(logging.INFO)

    # Defining the format of the console and the file-output.
    formatter_fileH = logging.Formatter('')
    formatter_ConsoleH = logging.Formatter('')

    # Creating a handler for the file for the logging level "INFO".
    fileHandler = logging.FileHandler(test_log_path)
    fileHandler.setFormatter(formatter_fileH)
    fileHandler.setLevel(logging.INFO)

    # Creating a handler for the console for the logging level "INFO". "sys.stdout" is used for the logging output.
    consoleHandler_out = logging.StreamHandler(stream=sys.stdout)
    consoleHandler_out.setFormatter(formatter_ConsoleH)
    consoleHandler_out.set_name('console handler stdout')
    consoleHandler_out.setLevel(logging.INFO)

    # Adding the defined handlers to the instantiated logger.
    logger.addHandler(fileHandler)
    logger.addHandler(consoleHandler_out)

    # OUPUT, START.
    # Header of the file.
603
    logger.info("\ntest_gms_preprocessing.py"
604
605
606
607
608
609
                "\nREVIEW OF ALL TEST RESULTS, SUMMARY:"
                "\n***************************************************************************************"
                "\n--> SPECIFIC FEATURES OF DATA:")

    # Adding a table displaying the characteristic features of each dataset.
    logger.info(pandas.DataFrame.from_items(testdata_scenes.items(),
610
611
                                            orient='index',
                                            columns=['Country', 'Characteristic', 'Clouds', 'Overlap_area']))
612
613
614
615
616
617
618
619
620
621
    logger.info("\nThe jobID used in Test_" + testdata[-1] + " contains the datasets: "
                "\n-Landsat5_PreCollectionData,\n-Landsat7_SLC_off_PreCollectionData and "
                "\n-Sentinel2A_CollectionData.")

    # Adding a table displaying the testresults.
    logger.info("\n***************************************************************************************"
                "\n--> TESTRESULTS:")

    results = ["Run", "Success", "Errors", "Failures", "Skips"]
    testdata_index = ["Test_" + item for item in testdata]
622
    logger.info(pandas.DataFrame(summary_testResults, columns=results, index=testdata_index))
623
624
625
626
627

    # If errors, failures or skips (there is yet nothing to skip in the code) occurres, the respective message will
    # be printed.
    logger.info("\n***************************************************************************************")
    if list(itertools.chain(*summary_errors)) or list(itertools.chain(*summary_failures)) or \
628
       list(itertools.chain(*summary_skipped)):
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
        logger.info("--> ERRORS/FAILURES/SKIPS:")
        logger.info("\n---------------------------------------------------------------------------------------")

        for index, test in enumerate(testdata):
            logger.info("Test_" + test + ", ERRORS:")
            if summary_errors[index]:
                logger.info(summary_errors[index][0][1])
            else:
                logger.info("None. \n")

            logger.info("Test_" + test + ", FAILURES:")
            if summary_failures[index]:
                logger.info(summary_failures[index][0][1])
            else:
                logger.info("None. \n")
644

645
646
647
648
649
            logger.info("Test_" + test + ", SKIPS:")
            if summary_skipped[index]:
                logger.info(summary_skipped[index][0][1])
            else:
                logger.info("None.")
650

651
652
            if not index == (len(testdata) - 1):
                logger.info("\n---------------------------------------------------------------------------------------")
653

654
        logger.info("\n***************************************************************************************")
655

656
657
    else:
        pass
658

659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
    # Checking, if the job.status of each job is set to "finished". Is it not set to "finished", a dataframe is created
    # containing the test-name with and the different job.status itself.
    # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
    # jobstatus_table, index_table = [[] for _ in range(2)]
    # for index, test in enumerate(testdata):
    #     if jobstatus[index] != "finished":
    #         jobstatus_table.append(jobstatus[index])
    #         index_table.append("Test_" + test)
    #
    # if jobstatus_table:
    #     logger.info("--> WARNING!!! JOBSTATUS of the following testcase(s) is not set to 'finished': \n")
    #     logger.info(pandas.DataFrame(jobstatus_table, columns=["jobstatus"], index=index_table))
    #     logger.info("\n***************************************************************************************")
    # else:
    #     pass
674

675
    logger.info("END.")  # OUTPUT, END.
676

677
678
679
    # Delete the handlers added to the "log_Test"-logger to ensure that no message is outputted twice in a row, when
    # the logger is used again.
    logger.handlers = []