test_gms_preprocessing.py 18.2 KB
Newer Older
1
2
3
#!/usr/bin/env python
# -*- coding: utf-8 -*-

4
###################################################################################
5

6
"""
7
test_gms_preprocessing
8
----------------------------------
9

10
The testcases contained in this testscript, are parametrized testcases. They test
11
12
the level-processing steps defined in the 'gms_preprocessing' module in the
"gms_preprocessing"-project with the help of the test datasets:
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
- Landsat-5, Pre-Collection Data,
- Landsat-5, Collection Data,
- Landsat-7, SLC on, Pre-Collection Data,
- Landsat-7, SLC off, Pre-Collection Data,
- Landsat-7, SLC off, Collection Data,
- Landsat-8, Pre-Collection Data,
- Landsat-8, Collection Data,
- Sentinel-2A, Pre-Collection Data and
- Sentinel-2A, Collection Data.
The test datasets can be found in the directory "tests/data/archive_data/...". The
respective SRTM-datasets needed in the data-processing can be found in the directory
"tests/data/archive_data/Endeavor".

The tests, defined in a base-testcase (not executed), are triggered by creating
jobs (based on given job-IDs) in individual testcases that inherit the tests
from the base-testcase. The exception: The job-ID used in the last testclass
contains 3 different test datasets of the above listed datasets.

Note that the testresults are outputted in the console as well as a log-textfile
that can be found in the directory "tests/logs".

Program edited in July 2017.
"""

###################################################################################
__author__ = 'Daniel Scheffler' # edited by Jessica Palka.


# Imports from the python standard library.
import itertools
import logging
import os
import pandas
import sys
import time
48
49
import unittest

50
51
52
53
54
55
56
57
58
# Imports regarding the 'gms_preprocessing' module.
from gms_preprocessing import process_controller, __file__
from gms_preprocessing.algorithms.L1A_P import L1A_object
from gms_preprocessing.algorithms.L1B_P import L1B_object
from gms_preprocessing.algorithms.L1C_P import L1C_object
from gms_preprocessing.algorithms.L2A_P import L2A_object
from gms_preprocessing.algorithms.L2B_P import L2B_object
from gms_preprocessing.algorithms.L2C_P import L2C_object
from gms_preprocessing.misc.database_tools import get_info_from_postgreSQLdb
59
60


61
# Rootpath of the gms_preprocessing-repository.
62
63
64
65
66
gmsRepo_rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))


# Defining the configurations needed to start a job containing the different dataset scenes.
# TODO Change the job-configurations for selected datasets.
67
job_config_kwargs = dict(is_test = True)
68
69


Daniel Scheffler's avatar
Daniel Scheffler committed
70
##########################
71
# Test case: BaseTestCases
Daniel Scheffler's avatar
Daniel Scheffler committed
72
73
##########################

74
75
76
77

class BaseTestCases:
    """
    General testclass. The tests defined in this testclass test the processing steps Level-1A, Level-1B, Level-1C,
78
    Level-2A, Level-2B and Level-2C defined in the "gms_preprocessing"-repository.
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    Note that the tests in this testclass are not executed directly. They are re-used in the other classes defined
    in this test-script.
    """
    class TestAll(unittest.TestCase):
        PC = None # default

        @classmethod
        def tearDownClass(cls):
            cls.PC.DB_job_record.delete_procdata_of_entire_job(force=True)

        @classmethod
        def validate_db_entry(cls, filename):
            sceneID_res = get_info_from_postgreSQLdb(cls.PC.job.conn_database, 'scenes', ['id'], {'filename': filename})
            assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

        @classmethod
        def create_job(cls, jobID, config):
            cls.PC = process_controller(jobID, parallelization_level='scenes', db_host='geoms', delete_old_output=True,
                                        job_config_kwargs=config)
            [cls.PC.add_local_availability(ds) for ds in cls.PC.usecase.data_list]

            [cls.validate_db_entry(ds['filename']) for ds in cls.PC.usecase.data_list]

        def test_L1A_processing(self):
            self.L1A_newObjects = self.PC.L1A_processing()
            self.assertIsInstance(self.L1A_newObjects, list)
            self.assertIsInstance(self.L1A_newObjects[0], L1A_object)

        def test_L1B_processing(self):
            self.L1B_newObjects = self.PC.L1B_processing()
            self.assertIsInstance(self.L1B_newObjects, list)
            self.assertIsInstance(self.L1B_newObjects[0], L1B_object)

        def test_L1C_processing(self):
            self.L1C_newObjects = self.PC.L1C_processing()
            self.assertIsInstance(self.L1C_newObjects, list)
            self.assertIsInstance(self.L1C_newObjects[0], L1C_object)

        def test_L2A_processing(self):
            self.L2A_newObjects = self.PC.L2A_processing()
            self.assertIsInstance(self.L2A_newObjects, list)
            self.assertIsInstance(self.L2A_newObjects[0], L2A_object)

        def test_L2B_processing(self):
            self.L2B_newObjects = self.PC.L2B_processing()
            self.assertIsInstance(self.L2B_newObjects, list)
            self.assertIsInstance(self.L2B_newObjects[0], L2B_object)

        def test_L2C_processing(self):
            self.L2C_newObjects = self.PC.L2C_processing()
            self.assertIsInstance(self.L2C_newObjects, list)
            self.assertIsInstance(self.L2C_newObjects[0], L2C_object)
            # Setting the job.status manually.
            # if self.L2C_newObjects:
            #     self.PC.job.status = "finished" # FIXME after updating the job.status-attribute for the level-processes, delete the code that is commented out.


###################################################################################
# Test cases 1-9: Test_<Satelite-Dataset>_<PreCollection or Collection>Data
# Test case 10: Test_MultipleDatasetsInOneJob


# TESTDATA-CLASSES.
class Test_Landsat5_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (pre-collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186263, job_config_kwargs)

# class Test_Landsat5_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186263, job_config_kwargs) # FIXME job_ID!

Daniel Scheffler's avatar
Daniel Scheffler committed
160

161
162
163
164
165
166
167
168
169
class Test_Landsat7_SLC_on_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_ON scene (pre-collection data).
    More information on the dataset will be outputted after after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186262, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
170

171
172
173
174
175
176
177
178
179
class Test_Landsat7_SLC_off_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (pre-collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186267, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
180

181
182
183
184
185
186
187
188
189
# class Test_Landsat7_SLC_off_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186267, job_config_kwargs) # FIXME job_ID!

Daniel Scheffler's avatar
Daniel Scheffler committed
190

191
192
193
194
195
196
197
198
199
class Test_Landsat8_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (pre-collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186196, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
200

201
202
203
204
205
206
207
208
209
class Test_Landsat8_CollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186261, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
210

211
212
213
214
215
216
217
218
219
class Test_Sentinel2A_CollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (pre-collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186268, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
220

221
222
223
224
225
226
227
228
229
class Test_Sentinel2A_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (collection data).
    More information on the dataset will be outputted after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186272, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
230

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
class Test_MultipleDatasetsInOneJob(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186273, job_config_kwargs)


###################################################################################
# Summarizing the information regarding the test datasets.

# The information: 'country' (3-letter country code, UN), 'characteristic features of the shown scene', 'cloud cover
# present' and 'overlap area present' of each dataset are summarized in the dictionary "testdata_scenes". The
# information are sorted according to the testdata.
# 3-letter code:
# UKR-Ukraine, KGZ-Kyrgyztan, POL-Poland, AUT-Austria, JPN-Japan, BOL-Bolivia, TUR-Turkey, DEU-Germany, CHE-Switzerland.
testdata_scenes = {'Landsat5_PreCollectionData'        : list(['UKR', 'City region, forest', 'Sparsely', 'Zone 34/35']),
Daniel Scheffler's avatar
Daniel Scheffler committed
250
                   # 'Landsat5_CollectionData'           : list(['KGZ', 'Snowy Mountains', 'Yes', 'None']),
251
252
                   'Landsat7_SLC_on_PreCollectionData' : list(['POL', 'City region, lakes', 'Yes', 'None']),
                   'Landsat7_SLC_off_PreCollectionData': list(['AUT', 'Stripes (partly), Mountains', 'None', 'None']),
Daniel Scheffler's avatar
Daniel Scheffler committed
253
                   # 'Landsat7_SLC_off_CollectionData'   : list(['JPN', 'Stripes (completly), Mountains', 'Yes', 'Zone 53/54']),
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
                   'Landsat8_PreCollectionData'        : list(['BOL', 'Forest', 'Yes', 'None']),
                   'Landsat8_CollectionData'           : list(['TUR', 'Snowy Mountains', 'Yes', 'None']),
                   'Sentinel2A_PreCollectionData'      : list(['DEU', 'Potsdam', 'Sparsely', 'None']),
                   'Sentinel2A_CollectionData'         : list(['CHE', 'City region, on the Rhine', 'Yes', 'None'])
                   }

# The key of the dictionary is the key-value to parametrize the testclasses so that each testclass is executed
# automatically.
testdata = list(testdata_scenes.keys())
testdata.append('MultipleDatasetsInOneJob')


###################################################################################
# Parametrizing the test cases and creating a summary of the testresults.

summary_testResults, summary_errors, summary_failures, summary_skipped, jobstatus = [[] for _ in range(5)]

if __name__ == '__main__':
    # Part 1: Creating and running a testsuite for each dataset-testcase, and querying the job.status of the job.
    for items in testdata:
        suite = unittest.TestLoader().loadTestsFromTestCase(eval("Test_"+items))
        alltests = unittest.TestSuite(suite)

        # Part 2: Saving the results of each testsuite and the query for the job.status in individual variables.
        testResult = unittest.TextTestRunner(verbosity=2).run(alltests)

        summary_testResults.append([testResult.testsRun, testResult.wasSuccessful(),
                                    len(testResult.errors), len(testResult.failures),
                                    len(testResult.skipped)])
        summary_errors.append(testResult.errors)
        summary_failures.append(testResult.failures)
        summary_skipped.append(testResult.skipped)

Daniel Scheffler's avatar
Daniel Scheffler committed
287
        # jobstatus.append(eval("Test_"+items).PC.job.status) # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304


    # Part 3: Summarizing the testresults of each testsuite and outputting the results in an orderly fashion on the
    # console and in a textfile.
    # Note that the testresults are outputted as usual after each test is executed. Since the output of each
    # level-process is rather long, the output of the testresults become lost. Therefore, the purpose to output the
    # testresults again is simply to summarize the testresults in one place and to give an overview over the results.

    # Output: a) Information on the test datasets (table), b) testresults summarized in a table, c)if existing,
    # a list of errors, failures and skips in the testcases and d) the job.status that is not set to "finished".

    time.sleep(0.5)

    # Path of the textfile the results will be logged to.
    test_log_path = os.path.join(gmsRepo_rootpath, 'tests', 'data', 'logs', time.strftime('%Y%m%d_%H%M%S_log.txt'))

    # Creating a logging system for the testresults.
305
    # Source: The "GMS_logger"-function in the "gms_preprocessing" --> "misc" --> "logging.py"-script was used and slightly
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
    # altered to meet the needs of the current problem.
    logger = logging.getLogger("log_Test")
    logger.setLevel(logging.INFO)

    # Defining the format of the console and the file-output.
    formatter_fileH = logging.Formatter('')
    formatter_ConsoleH = logging.Formatter('')

    # Creating a handler for the file for the logging level "INFO".
    fileHandler = logging.FileHandler(test_log_path)
    fileHandler.setFormatter(formatter_fileH)
    fileHandler.setLevel(logging.INFO)

    # Creating a handler for the console for the logging level "INFO". "sys.stdout" is used for the logging output.
    consoleHandler_out = logging.StreamHandler(stream=sys.stdout)
    consoleHandler_out.setFormatter(formatter_ConsoleH)
    consoleHandler_out.set_name('console handler stdout')
    consoleHandler_out.setLevel(logging.INFO)

    # Adding the defined handlers to the instantiated logger.
    logger.addHandler(fileHandler)
    logger.addHandler(consoleHandler_out)

    # OUPUT, START.
    # Header of the file.
331
    logger.info("\ntest_gms_preprocessing.py"
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
                "\nREVIEW OF ALL TEST RESULTS, SUMMARY:"
                "\n***************************************************************************************"
                "\n--> SPECIFIC FEATURES OF DATA:")

    # Adding a table displaying the characteristic features of each dataset.
    logger.info(pandas.DataFrame.from_items(testdata_scenes.items(),
                                      orient='index', columns=['Country', 'Characteristic', 'Clouds', 'Overlap_area']))
    logger.info("\nThe jobID used in Test_" + testdata[-1] + " contains the datasets: "
                "\n-Landsat5_PreCollectionData,\n-Landsat7_SLC_off_PreCollectionData and "
                "\n-Sentinel2A_CollectionData.")

    # Adding a table displaying the testresults.
    logger.info("\n***************************************************************************************"
                "\n--> TESTRESULTS:")

    results = ["Run", "Success", "Errors", "Failures", "Skips"]
    testdata_index = ["Test_" + item for item in testdata]
    logger.info(pandas.DataFrame(summary_testResults, columns= results, index=testdata_index))

    # If errors, failures or skips (there is yet nothing to skip in the code) occurres, the respective message will
    # be printed.
    logger.info("\n***************************************************************************************")
    if list(itertools.chain(*summary_errors)) or list(itertools.chain(*summary_failures)) or \
        list(itertools.chain(*summary_skipped)):
        logger.info("--> ERRORS/FAILURES/SKIPS:")
        logger.info("\n---------------------------------------------------------------------------------------")

        for index, test in enumerate(testdata):
            logger.info("Test_" + test + ", ERRORS:")
            if summary_errors[index]:
                logger.info(summary_errors[index][0][1])
            else:
                logger.info("None. \n")

            logger.info("Test_" + test + ", FAILURES:")
            if summary_failures[index]:
                logger.info(summary_failures[index][0][1])
            else:
                logger.info("None. \n")
371

372
373
374
375
376
            logger.info("Test_" + test + ", SKIPS:")
            if summary_skipped[index]:
                logger.info(summary_skipped[index][0][1])
            else:
                logger.info("None.")
377

378
379
            if not index == (len(testdata) - 1):
                logger.info("\n---------------------------------------------------------------------------------------")
380

381
        logger.info("\n***************************************************************************************")
382

383
384
    else:
        pass
385

386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
    # Checking, if the job.status of each job is set to "finished". Is it not set to "finished", a dataframe is created
    # containing the test-name with and the different job.status itself.
    # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
    # jobstatus_table, index_table = [[] for _ in range(2)]
    # for index, test in enumerate(testdata):
    #     if jobstatus[index] != "finished":
    #         jobstatus_table.append(jobstatus[index])
    #         index_table.append("Test_" + test)
    #
    # if jobstatus_table:
    #     logger.info("--> WARNING!!! JOBSTATUS of the following testcase(s) is not set to 'finished': \n")
    #     logger.info(pandas.DataFrame(jobstatus_table, columns=["jobstatus"], index=index_table))
    #     logger.info("\n***************************************************************************************")
    # else:
    #     pass
401

402
    logger.info("END.")  # OUTPUT, END.
403

404
405
406
    # Delete the handlers added to the "log_Test"-logger to ensure that no message is outputted twice in a row, when
    # the logger is used again.
    logger.handlers = []