test_gms_preprocessing.py 23.5 KB
Newer Older
1
2
3
#!/usr/bin/env python
# -*- coding: utf-8 -*-

4
###################################################################################
5

6
"""
7
test_gms_preprocessing
8
----------------------------------
9

10
The testcases contained in this testscript, are parametrized testcases. They test
11
12
the level-processing steps defined in the 'gms_preprocessing' module in the
"gms_preprocessing"-project with the help of the test datasets:
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
- Landsat-5, Pre-Collection Data,
- Landsat-5, Collection Data,
- Landsat-7, SLC on, Pre-Collection Data,
- Landsat-7, SLC off, Pre-Collection Data,
- Landsat-7, SLC off, Collection Data,
- Landsat-8, Pre-Collection Data,
- Landsat-8, Collection Data,
- Sentinel-2A, Pre-Collection Data and
- Sentinel-2A, Collection Data.
The test datasets can be found in the directory "tests/data/archive_data/...". The
respective SRTM-datasets needed in the data-processing can be found in the directory
"tests/data/archive_data/Endeavor".

The tests, defined in a base-testcase (not executed), are triggered by creating
jobs (based on given job-IDs) in individual testcases that inherit the tests
from the base-testcase. The exception: The job-ID used in the last testclass
contains 3 different test datasets of the above listed datasets.

Note that the testresults are outputted in the console as well as a log-textfile
that can be found in the directory "tests/logs".

Program edited in July 2017.
"""

37
# Import python standard libraries.
38
39
40
41
42
43
import itertools
import logging
import os
import pandas
import sys
import time
44
45
import unittest

46
47
# Imports regarding the 'gms_preprocessing' module.
from gms_preprocessing import process_controller, __file__
48
from gms_preprocessing.model.gms_object import GMS_object
49
50
51
52
53
54
55
from gms_preprocessing.algorithms.L1A_P import L1A_object
from gms_preprocessing.algorithms.L1B_P import L1B_object
from gms_preprocessing.algorithms.L1C_P import L1C_object
from gms_preprocessing.algorithms.L2A_P import L2A_object
from gms_preprocessing.algorithms.L2B_P import L2B_object
from gms_preprocessing.algorithms.L2C_P import L2C_object
from gms_preprocessing.misc.database_tools import get_info_from_postgreSQLdb
56
from gms_preprocessing.model.gms_object import GMS_object_2_dataset_dict
57

58
59
from . import db_host

60
__author__ = 'Daniel Scheffler'  # edited by Jessica Palka.
61

62
# Rootpath of the gms_preprocessing-repository.
63
64
65
66
gmsRepo_rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

# Defining the configurations needed to start a job containing the different dataset scenes.
# TODO Change the job-configurations for selected datasets.
67
job_config_kwargs = dict(parallelization_level='scenes', db_host=db_host, delete_old_output=True, is_test=True)
68

Daniel Scheffler's avatar
Daniel Scheffler committed
69
##########################
70
# Test case: BaseTestCases
Daniel Scheffler's avatar
Daniel Scheffler committed
71
72
##########################

73
74
75
76

class BaseTestCases:
    """
    General testclass. The tests defined in this testclass test the processing steps Level-1A, Level-1B, Level-1C,
77
    Level-2A, Level-2B and Level-2C defined in the "gms_preprocessing"-repository.
78
79
80
81
    Note that the tests in this testclass are not executed directly. They are re-used in the other classes defined
    in this test-script.
    """
    class TestAll(unittest.TestCase):
82
        PC = None  # default
83
84
85

        @classmethod
        def tearDownClass(cls):
86
            cls.PC.config.DB_job_record.delete_procdata_of_entire_job(force=True)
87
88
89

        @classmethod
        def validate_db_entry(cls, filename):
90
91
            sceneID_res = get_info_from_postgreSQLdb(cls.PC.config.conn_database, 'scenes', ['id'],
                                                     {'filename': filename})
92
93
94
95
            assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

        @classmethod
        def create_job(cls, jobID, config):
96
            cls.PC = process_controller(jobID, **config)
97
98

            cls.PC.logger.info('Execution of entire GeoMultiSens pre-processing chain started for job ID %s...'
99
                               % cls.PC.config.ID)
100
101

            # update attributes of DB_job_record and related DB entry
102
            cls.PC.config.DB_job_record.reset_job_progress()
103
            GMS_object.proc_status_all_GMSobjs.clear()  # reset
104

Daniel Scheffler's avatar
Bugfix.    
Daniel Scheffler committed
105
            cls.PC.config.data_list = cls.PC.add_local_availability(cls.PC.config.data_list)
106

107
            [cls.validate_db_entry(ds['filename']) for ds in cls.PC.config.data_list]
108

109
110
111
112
113
114
115
        def check_availability(self, GMS_objs, tgt_procL):
            dss = self.PC.add_local_availability([GMS_object_2_dataset_dict(obj) for obj in GMS_objs])
            for ds in dss:
                self.assertEqual(ds['proc_level'], tgt_procL,
                                 msg='Written %s dataset %s %s %s is not found by PC.add_local_availability.'
                                     % (ds['proc_level'], ds['satellite'], ds['sensor'], ds['subsystem']))

116
117
118
        def test_L1A_processing(self):
            self.L1A_newObjects = self.PC.L1A_processing()
            self.assertIsInstance(self.L1A_newObjects, list)
119
            self.assertNotEqual(len(self.L1A_newObjects), 0, msg='L1A_processing did not output an L1A object.')
120
121
            self.assertIsInstance(self.L1A_newObjects[0], L1A_object)

122
123
124
            # check if PC.add_local_availability finds the written dataset
            self.check_availability(self.L1A_newObjects, 'L1A')

125
126
127
        def test_L1B_processing(self):
            self.L1B_newObjects = self.PC.L1B_processing()
            self.assertIsInstance(self.L1B_newObjects, list)
128
            self.assertNotEqual(len(self.L1B_newObjects), 0, msg='L1B_processing did not output an L1B object.')
129
130
            self.assertIsInstance(self.L1B_newObjects[0], L1B_object)

131
132
133
            # check if PC.add_local_availability finds the written dataset
            self.check_availability(self.L1B_newObjects, 'L1B')

134
135
136
        def test_L1C_processing(self):
            self.L1C_newObjects = self.PC.L1C_processing()
            self.assertIsInstance(self.L1C_newObjects, list)
137
            self.assertNotEqual(len(self.L1C_newObjects), 0, msg='L1C_processing did not output an L1C object.')
138
139
            self.assertIsInstance(self.L1C_newObjects[0], L1C_object)

140
141
142
            # check if PC.add_local_availability finds the written dataset
            self.check_availability(self.L1C_newObjects, 'L1C')

143
144
145
        def test_L2A_processing(self):
            self.L2A_newObjects = self.PC.L2A_processing()
            self.assertIsInstance(self.L2A_newObjects, list)
146
            self.assertNotEqual(len(self.L2A_newObjects), 0, msg='L2A_processing did not output an L2A object.')
147
148
            self.assertIsInstance(self.L2A_newObjects[0], L2A_object)

149
150
151
            # check if PC.add_local_availability finds the written dataset
            self.check_availability(self.L2A_newObjects, 'L2A')

152
153
154
        def test_L2B_processing(self):
            self.L2B_newObjects = self.PC.L2B_processing()
            self.assertIsInstance(self.L2B_newObjects, list)
155
            self.assertNotEqual(len(self.L2B_newObjects), 0, msg='L2B_processing did not output an L2B object.')
156
157
            self.assertIsInstance(self.L2B_newObjects[0], L2B_object)

158
159
160
            # check if PC.add_local_availability finds the written dataset
            self.check_availability(self.L2B_newObjects, 'L2B')

161
162
163
        def test_L2C_processing(self):
            self.L2C_newObjects = self.PC.L2C_processing()
            self.assertIsInstance(self.L2C_newObjects, list)
164
            self.assertNotEqual(len(self.L2C_newObjects), 0, msg='L2C_processing did not output an L2C object.')
165
            self.assertIsInstance(self.L2C_newObjects[0], L2C_object)
166
167
168
169

            # check if PC.add_local_availability finds the written dataset
            # self.check_availability(self.L2C_newObjects, 'L2C')  # FIXME fails (not yet working)

Daniel Scheffler's avatar
Daniel Scheffler committed
170
            # Setting the config.status manually.
171
            # if self.L2C_newObjects:
172
            #     self.PC.config.status = "finished"
173
174
            # FIXME after updating the job.status-attribute for the level-processes, delete the code that is commented
            # FIXME out.
175

Daniel Scheffler's avatar
Daniel Scheffler committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
    class TestCompletePipeline(unittest.TestCase):
        PC = None  # default

        @classmethod
        def tearDownClass(cls):
            cls.PC.config.DB_job_record.delete_procdata_of_entire_job(force=True)

        @classmethod
        def validate_db_entry(cls, filename):
            sceneID_res = get_info_from_postgreSQLdb(cls.PC.config.conn_database, 'scenes', ['id'],
                                                     {'filename': filename})
            assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'

        @classmethod
        def create_job(cls, jobID, config):
            cls.PC = process_controller(jobID, **config)

            cls.PC.logger.info('Execution of entire GeoMultiSens pre-processing chain started for job ID %s...'
                               % cls.PC.config.ID)

            [cls.validate_db_entry(ds['filename']) for ds in cls.PC.config.data_list]

Daniel Scheffler's avatar
Bugfix.    
Daniel Scheffler committed
198
199
        def test_run_all_processors(self):
            self.PC.run_all_processors()
Daniel Scheffler's avatar
Daniel Scheffler committed
200
            self.assertIsInstance(self.PC.L2C_newObjects, list)
201
202
203
204
205
206
207
208
209
210

###################################################################################
# Test cases 1-9: Test_<Satelite-Dataset>_<PreCollection or Collection>Data
# Test case 10: Test_MultipleDatasetsInOneJob


# TESTDATA-CLASSES.
class Test_Landsat5_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (pre-collection data).
211
    More information on the dataset will be output after the tests-classes are executed.
212
213
214
215
216
217
218
219
220
221
222
223
224
225
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186263, job_config_kwargs)

# class Test_Landsat5_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186263, job_config_kwargs) # FIXME job_ID!

Daniel Scheffler's avatar
Daniel Scheffler committed
226

227
228
229
class Test_Landsat7_SLC_on_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_ON scene (pre-collection data).
230
    More information on the dataset will be output after after the tests-classes are executed.
231
232
233
234
235
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186262, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
236

237
238
239
class Test_Landsat7_SLC_off_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (pre-collection data).
240
    More information on the dataset will be output after the tests-classes are executed.
241
242
243
244
245
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186267, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
246

247
248
249
250
251
252
253
254
255
# class Test_Landsat7_SLC_off_CollectionData(BaseTestCases.TestAll):
#     """
#     Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (collection data).
#     More information on the dataset will be outputted after the tests-classes are executed.
#     """
#     @classmethod
#     def setUpClass(cls):
#         cls.create_job(26186267, job_config_kwargs) # FIXME job_ID!

256
#
257
258
259
class Test_Landsat8_PreCollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (pre-collection data).
260
    More information on the dataset will be output after the tests-classes are executed.
261
262
263
264
265
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186196, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
266

267
268
269
class Test_Landsat8_CollectionData(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (collection data).
270
    More information on the dataset will be output after the tests-classes are executed.
271
272
273
274
275
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186261, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
276

277
class Test_Sentinel2A_SingleGranuleFormat(BaseTestCases.TestAll):
278
    """
279
280
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
281
282
283
284
285
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186268, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
286

Daniel Scheffler's avatar
Daniel Scheffler committed
287
288
289
290
291
292
293
294
295
class Test_Sentinel2A_SingleGranuleFormat_CompletePipeline(BaseTestCases.TestCompletePipeline):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186268, job_config_kwargs)

296
297
298
    # @classmethod
    # def tearDownClass(cls):
    #     super().tearDownClass()
299
300
        # PC = cls.PC

Daniel Scheffler's avatar
Daniel Scheffler committed
301

302
class Test_Sentinel2A_MultiGranuleFormat(BaseTestCases.TestAll):
303
    """
304
305
    Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (multiple granules in archive: < 2017).
    More information on the dataset will be output after the tests-classes are executed.
306
307
308
309
310
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186272, job_config_kwargs)

Daniel Scheffler's avatar
Daniel Scheffler committed
311

312
313
314
315
316
317
318
319
320
321
class Test_Sentinel2B_SingleGranuleFormat(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a Sentinel-2B MSI scene (1 granule in archive: > 2017).
    More information on the dataset will be output after the tests-classes are executed.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186937, job_config_kwargs)


322
323
324
325
326
327
328
329
330
331
class Test_MultipleDatasetsInOneJob(BaseTestCases.TestAll):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186273, job_config_kwargs)


Daniel Scheffler's avatar
Daniel Scheffler committed
332
333
334
335
336
337
338
339
340
class Test_MultipleDatasetsInOneJob_CompletePipeline(BaseTestCases.TestCompletePipeline):
    """
    Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
    Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
    """
    @classmethod
    def setUpClass(cls):
        cls.create_job(26186273, job_config_kwargs)

341
342
343
    # @classmethod
    # def tearDownClass(cls):
    #     super().tearDownClass()
344
345
        # PC = cls.PC

Daniel Scheffler's avatar
Daniel Scheffler committed
346

347
348
349
350
351
352
353
354
###################################################################################
# Summarizing the information regarding the test datasets.

# The information: 'country' (3-letter country code, UN), 'characteristic features of the shown scene', 'cloud cover
# present' and 'overlap area present' of each dataset are summarized in the dictionary "testdata_scenes". The
# information are sorted according to the testdata.
# 3-letter code:
# UKR-Ukraine, KGZ-Kyrgyztan, POL-Poland, AUT-Austria, JPN-Japan, BOL-Bolivia, TUR-Turkey, DEU-Germany, CHE-Switzerland.
355
356
357
358
359
360
361
362
363
364
365
testdata_scenes = \
    {'Landsat5_PreCollectionData': list(['UKR', 'City region, forest', 'Sparsely', 'Zone 34/35']),
     # 'Landsat5_CollectionData': list(['KGZ', 'Snowy Mountains', 'Yes', 'None']),
     'Landsat7_SLC_on_PreCollectionData': list(['POL', 'City region, lakes', 'Yes', 'None']),
     'Landsat7_SLC_off_PreCollectionData': list(['AUT', 'Stripes (partly), Mountains', 'None', 'None']),
     # 'Landsat7_SLC_off_CollectionData': list(['JPN', 'Stripes (completly), Mountains', 'Yes', 'Zone 53/54']),
     'Landsat8_PreCollectionData': list(['BOL', 'Forest', 'Yes', 'None']),
     'Landsat8_CollectionData': list(['TUR', 'Snowy Mountains', 'Yes', 'None']),
     'Sentinel2A_PreCollectionData': list(['DEU', 'Potsdam', 'Sparsely', 'None']),
     'Sentinel2A_CollectionData': list(['CHE', 'City region, on the Rhine', 'Yes', 'None'])
     }
366
367
368
369
370
371
372
373
374
375
376
377

# The key of the dictionary is the key-value to parametrize the testclasses so that each testclass is executed
# automatically.
testdata = list(testdata_scenes.keys())
testdata.append('MultipleDatasetsInOneJob')


###################################################################################
# Parametrizing the test cases and creating a summary of the testresults.

summary_testResults, summary_errors, summary_failures, summary_skipped, jobstatus = [[] for _ in range(5)]

378
379
380
381

@unittest.SkipTest
class Test_in_normal_mode(unittest.TestCase):
    def setUp(self):
382
        # self.job_id = 26186740  # Testjob Landsat-8
383
384
        # self.job_id = 26186906  # Bug Input Validator
        self.job_id = 26186925  # 1 Sentinel-2A, Bug NoneType' object has no attribute 'find'
385

386
        self.PC = process_controller(self.job_id, **dict(is_test=False, parallelization_level='scenes', db_host=db_host,
387
                                                         delete_old_output=True, disable_exception_handler=True))
388
389
390
391
392

    def test(self):
        self.PC.run_all_processors()


393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
if __name__ == '__main__':
    # Part 1: Creating and running a testsuite for each dataset-testcase, and querying the job.status of the job.
    for items in testdata:
        suite = unittest.TestLoader().loadTestsFromTestCase(eval("Test_"+items))
        alltests = unittest.TestSuite(suite)

        # Part 2: Saving the results of each testsuite and the query for the job.status in individual variables.
        testResult = unittest.TextTestRunner(verbosity=2).run(alltests)

        summary_testResults.append([testResult.testsRun, testResult.wasSuccessful(),
                                    len(testResult.errors), len(testResult.failures),
                                    len(testResult.skipped)])
        summary_errors.append(testResult.errors)
        summary_failures.append(testResult.failures)
        summary_skipped.append(testResult.skipped)

409
        # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
410
        # jobstatus.append(eval("Test_"+items).PC.status)
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

    # Part 3: Summarizing the testresults of each testsuite and outputting the results in an orderly fashion on the
    # console and in a textfile.
    # Note that the testresults are outputted as usual after each test is executed. Since the output of each
    # level-process is rather long, the output of the testresults become lost. Therefore, the purpose to output the
    # testresults again is simply to summarize the testresults in one place and to give an overview over the results.

    # Output: a) Information on the test datasets (table), b) testresults summarized in a table, c)if existing,
    # a list of errors, failures and skips in the testcases and d) the job.status that is not set to "finished".

    time.sleep(0.5)

    # Path of the textfile the results will be logged to.
    test_log_path = os.path.join(gmsRepo_rootpath, 'tests', 'data', 'logs', time.strftime('%Y%m%d_%H%M%S_log.txt'))

    # Creating a logging system for the testresults.
427
428
    # Source: The "GMS_logger"-function in the "gms_preprocessing" --> "misc" --> "logging.py"-script was used and
    # slightly altered to meet the needs of the current problem.
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    logger = logging.getLogger("log_Test")
    logger.setLevel(logging.INFO)

    # Defining the format of the console and the file-output.
    formatter_fileH = logging.Formatter('')
    formatter_ConsoleH = logging.Formatter('')

    # Creating a handler for the file for the logging level "INFO".
    fileHandler = logging.FileHandler(test_log_path)
    fileHandler.setFormatter(formatter_fileH)
    fileHandler.setLevel(logging.INFO)

    # Creating a handler for the console for the logging level "INFO". "sys.stdout" is used for the logging output.
    consoleHandler_out = logging.StreamHandler(stream=sys.stdout)
    consoleHandler_out.setFormatter(formatter_ConsoleH)
    consoleHandler_out.set_name('console handler stdout')
    consoleHandler_out.setLevel(logging.INFO)

    # Adding the defined handlers to the instantiated logger.
    logger.addHandler(fileHandler)
    logger.addHandler(consoleHandler_out)

    # OUPUT, START.
    # Header of the file.
453
    logger.info("\ntest_gms_preprocessing.py"
454
455
456
457
458
459
                "\nREVIEW OF ALL TEST RESULTS, SUMMARY:"
                "\n***************************************************************************************"
                "\n--> SPECIFIC FEATURES OF DATA:")

    # Adding a table displaying the characteristic features of each dataset.
    logger.info(pandas.DataFrame.from_items(testdata_scenes.items(),
460
461
                                            orient='index',
                                            columns=['Country', 'Characteristic', 'Clouds', 'Overlap_area']))
462
463
464
465
466
467
468
469
470
471
    logger.info("\nThe jobID used in Test_" + testdata[-1] + " contains the datasets: "
                "\n-Landsat5_PreCollectionData,\n-Landsat7_SLC_off_PreCollectionData and "
                "\n-Sentinel2A_CollectionData.")

    # Adding a table displaying the testresults.
    logger.info("\n***************************************************************************************"
                "\n--> TESTRESULTS:")

    results = ["Run", "Success", "Errors", "Failures", "Skips"]
    testdata_index = ["Test_" + item for item in testdata]
472
    logger.info(pandas.DataFrame(summary_testResults, columns=results, index=testdata_index))
473
474
475
476
477

    # If errors, failures or skips (there is yet nothing to skip in the code) occurres, the respective message will
    # be printed.
    logger.info("\n***************************************************************************************")
    if list(itertools.chain(*summary_errors)) or list(itertools.chain(*summary_failures)) or \
478
       list(itertools.chain(*summary_skipped)):
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
        logger.info("--> ERRORS/FAILURES/SKIPS:")
        logger.info("\n---------------------------------------------------------------------------------------")

        for index, test in enumerate(testdata):
            logger.info("Test_" + test + ", ERRORS:")
            if summary_errors[index]:
                logger.info(summary_errors[index][0][1])
            else:
                logger.info("None. \n")

            logger.info("Test_" + test + ", FAILURES:")
            if summary_failures[index]:
                logger.info(summary_failures[index][0][1])
            else:
                logger.info("None. \n")
494

495
496
497
498
499
            logger.info("Test_" + test + ", SKIPS:")
            if summary_skipped[index]:
                logger.info(summary_skipped[index][0][1])
            else:
                logger.info("None.")
500

501
502
            if not index == (len(testdata) - 1):
                logger.info("\n---------------------------------------------------------------------------------------")
503

504
        logger.info("\n***************************************************************************************")
505

506
507
    else:
        pass
508

509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
    # Checking, if the job.status of each job is set to "finished". Is it not set to "finished", a dataframe is created
    # containing the test-name with and the different job.status itself.
    # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
    # jobstatus_table, index_table = [[] for _ in range(2)]
    # for index, test in enumerate(testdata):
    #     if jobstatus[index] != "finished":
    #         jobstatus_table.append(jobstatus[index])
    #         index_table.append("Test_" + test)
    #
    # if jobstatus_table:
    #     logger.info("--> WARNING!!! JOBSTATUS of the following testcase(s) is not set to 'finished': \n")
    #     logger.info(pandas.DataFrame(jobstatus_table, columns=["jobstatus"], index=index_table))
    #     logger.info("\n***************************************************************************************")
    # else:
    #     pass
524

525
    logger.info("END.")  # OUTPUT, END.
526

527
528
529
    # Delete the handlers added to the "log_Test"-logger to ensure that no message is outputted twice in a row, when
    # the logger is used again.
    logger.handlers = []