Commit 4456e769 authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

Replaced dummy version of tests with the one from tests-branch.

Former-commit-id: 667175cd
Former-commit-id: c955d44c
parent 6a9d5686
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `testproject` package."""
###################################################################################
"""
test_geomultisens
----------------------------------
The testcases contained in this testscript, are parametrized testcases. They test
the level-processing steps defined in the 'geomultisens' module in the
"GeoMultiSens"-project with the help of the test datasets:
- Landsat-5, Pre-Collection Data,
- Landsat-5, Collection Data,
- Landsat-7, SLC on, Pre-Collection Data,
- Landsat-7, SLC off, Pre-Collection Data,
- Landsat-7, SLC off, Collection Data,
- Landsat-8, Pre-Collection Data,
- Landsat-8, Collection Data,
- Sentinel-2A, Pre-Collection Data and
- Sentinel-2A, Collection Data.
The test datasets can be found in the directory "tests/data/archive_data/...". The
respective SRTM-datasets needed in the data-processing can be found in the directory
"tests/data/archive_data/Endeavor".
The tests, defined in a base-testcase (not executed), are triggered by creating
jobs (based on given job-IDs) in individual testcases that inherit the tests
from the base-testcase. The exception: The job-ID used in the last testclass
contains 3 different test datasets of the above listed datasets.
Note that the testresults are outputted in the console as well as a log-textfile
that can be found in the directory "tests/logs".
Program edited in July 2017.
"""
###################################################################################
__author__ = 'Daniel Scheffler' # edited by Jessica Palka.
# Imports from the python standard library.
import itertools
import logging
import os
import pandas
import sys
import time
import unittest
# Imports regarding the 'geomultisens' module.
from geomultisens import process_controller, __file__
from geomultisens.algorithms.L1A_P import L1A_object
from geomultisens.algorithms.L1B_P import L1B_object
from geomultisens.algorithms.L1C_P import L1C_object
from geomultisens.algorithms.L2A_P import L2A_object
from geomultisens.algorithms.L2B_P import L2B_object
from geomultisens.algorithms.L2C_P import L2C_object
from geomultisens.misc.database_tools import get_info_from_postgreSQLdb
# Rootpath of the GeoMultiSens-repository.
gmsRepo_rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Defining the configurations needed to start a job containing the different dataset scenes.
# TODO Change the job-configurations for selected datasets.
job_config_kwargs = dict(
path_archive=os.path.join(gmsRepo_rootpath, 'tests', 'data', 'archive_data'),
path_procdata_scenes=os.path.join(gmsRepo_rootpath, 'tests', 'data', 'output_scenes'),
path_procdata_MGRS=os.path.join(gmsRepo_rootpath, 'tests', 'data', 'output_mgrs_tiles'),
is_test = True)
###################################################################################
# Test case: BaseTestCases
class BaseTestCases:
"""
General testclass. The tests defined in this testclass test the processing steps Level-1A, Level-1B, Level-1C,
Level-2A, Level-2B and Level-2C defined in the "GeoMultiSens"-repository.
Note that the tests in this testclass are not executed directly. They are re-used in the other classes defined
in this test-script.
"""
class TestAll(unittest.TestCase):
PC = None # default
@classmethod
def tearDownClass(cls):
cls.PC.DB_job_record.delete_procdata_of_entire_job(force=True)
@classmethod
def validate_db_entry(cls, filename):
sceneID_res = get_info_from_postgreSQLdb(cls.PC.job.conn_database, 'scenes', ['id'], {'filename': filename})
assert sceneID_res and isinstance(sceneID_res[0][0], int), 'Invalid database entry.'
@classmethod
def create_job(cls, jobID, config):
cls.PC = process_controller(jobID, parallelization_level='scenes', db_host='geoms', delete_old_output=True,
job_config_kwargs=config)
[cls.PC.add_local_availability(ds) for ds in cls.PC.usecase.data_list]
[cls.validate_db_entry(ds['filename']) for ds in cls.PC.usecase.data_list]
def test_L1A_processing(self):
self.L1A_newObjects = self.PC.L1A_processing()
self.assertIsInstance(self.L1A_newObjects, list)
self.assertIsInstance(self.L1A_newObjects[0], L1A_object)
def test_L1B_processing(self):
self.L1B_newObjects = self.PC.L1B_processing()
self.assertIsInstance(self.L1B_newObjects, list)
self.assertIsInstance(self.L1B_newObjects[0], L1B_object)
def test_L1C_processing(self):
self.L1C_newObjects = self.PC.L1C_processing()
self.assertIsInstance(self.L1C_newObjects, list)
self.assertIsInstance(self.L1C_newObjects[0], L1C_object)
def test_L2A_processing(self):
self.L2A_newObjects = self.PC.L2A_processing()
self.assertIsInstance(self.L2A_newObjects, list)
self.assertIsInstance(self.L2A_newObjects[0], L2A_object)
def test_L2B_processing(self):
self.L2B_newObjects = self.PC.L2B_processing()
self.assertIsInstance(self.L2B_newObjects, list)
self.assertIsInstance(self.L2B_newObjects[0], L2B_object)
def test_L2C_processing(self):
self.L2C_newObjects = self.PC.L2C_processing()
self.assertIsInstance(self.L2C_newObjects, list)
self.assertIsInstance(self.L2C_newObjects[0], L2C_object)
# Setting the job.status manually.
# if self.L2C_newObjects:
# self.PC.job.status = "finished" # FIXME after updating the job.status-attribute for the level-processes, delete the code that is commented out.
###################################################################################
# Test cases 1-9: Test_<Satelite-Dataset>_<PreCollection or Collection>Data
# Test case 10: Test_MultipleDatasetsInOneJob
# TESTDATA-CLASSES.
class Test_Landsat5_PreCollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (pre-collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186263, job_config_kwargs)
# class Test_Landsat5_CollectionData(BaseTestCases.TestAll):
# """
# Parametrized testclass. Tests the level-processes on a Landsat-5 TM scene (collection data).
# More information on the dataset will be outputted after the tests-classes are executed.
# """
# @classmethod
# def setUpClass(cls):
# cls.create_job(26186263, job_config_kwargs) # FIXME job_ID!
class Test_Landsat7_SLC_on_PreCollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_ON scene (pre-collection data).
More information on the dataset will be outputted after after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186262, job_config_kwargs)
class Test_Landsat7_SLC_off_PreCollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (pre-collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186267, job_config_kwargs)
# class Test_Landsat7_SLC_off_CollectionData(BaseTestCases.TestAll):
# """
# Parametrized testclass. Tests the level-processes on a Landsat-7 ETM+_SLC_OFF scene (collection data).
# More information on the dataset will be outputted after the tests-classes are executed.
# """
# @classmethod
# def setUpClass(cls):
# cls.create_job(26186267, job_config_kwargs) # FIXME job_ID!
class Test_Landsat8_PreCollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (pre-collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186196, job_config_kwargs)
class Test_Landsat8_CollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Landsat-8 OLI_TIRS scene (collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186261, job_config_kwargs)
class Test_Sentinel2A_CollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (pre-collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186268, job_config_kwargs)
class Test_Sentinel2A_PreCollectionData(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a Sentinel-2A MSI scene (collection data).
More information on the dataset will be outputted after the tests-classes are executed.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186272, job_config_kwargs)
class Test_MultipleDatasetsInOneJob(BaseTestCases.TestAll):
"""
Parametrized testclass. Tests the level-processes on a job containing a Landsat-5 (pre-collection data),
Landsat-7 SLC_off (pre-collection data) and a Sentinel-2A (collection data) scene.
"""
@classmethod
def setUpClass(cls):
cls.create_job(26186273, job_config_kwargs)
###################################################################################
# Summarizing the information regarding the test datasets.
# The information: 'country' (3-letter country code, UN), 'characteristic features of the shown scene', 'cloud cover
# present' and 'overlap area present' of each dataset are summarized in the dictionary "testdata_scenes". The
# information are sorted according to the testdata.
# 3-letter code:
# UKR-Ukraine, KGZ-Kyrgyztan, POL-Poland, AUT-Austria, JPN-Japan, BOL-Bolivia, TUR-Turkey, DEU-Germany, CHE-Switzerland.
testdata_scenes = {'Landsat5_PreCollectionData' : list(['UKR', 'City region, forest', 'Sparsely', 'Zone 34/35']),
#'Landsat5_CollectionData' : list(['KGZ', 'Snowy Mountains', 'Yes', 'None']),
'Landsat7_SLC_on_PreCollectionData' : list(['POL', 'City region, lakes', 'Yes', 'None']),
'Landsat7_SLC_off_PreCollectionData': list(['AUT', 'Stripes (partly), Mountains', 'None', 'None']),
#'Landsat7_SLC_off_CollectionData' : list(['JPN', 'Stripes (completly), Mountains', 'Yes', 'Zone 53/54']),
'Landsat8_PreCollectionData' : list(['BOL', 'Forest', 'Yes', 'None']),
'Landsat8_CollectionData' : list(['TUR', 'Snowy Mountains', 'Yes', 'None']),
'Sentinel2A_PreCollectionData' : list(['DEU', 'Potsdam', 'Sparsely', 'None']),
'Sentinel2A_CollectionData' : list(['CHE', 'City region, on the Rhine', 'Yes', 'None'])
}
# The key of the dictionary is the key-value to parametrize the testclasses so that each testclass is executed
# automatically.
testdata = list(testdata_scenes.keys())
testdata.append('MultipleDatasetsInOneJob')
###################################################################################
# Parametrizing the test cases and creating a summary of the testresults.
summary_testResults, summary_errors, summary_failures, summary_skipped, jobstatus = [[] for _ in range(5)]
if __name__ == '__main__':
# Part 1: Creating and running a testsuite for each dataset-testcase, and querying the job.status of the job.
for items in testdata:
suite = unittest.TestLoader().loadTestsFromTestCase(eval("Test_"+items))
alltests = unittest.TestSuite(suite)
# Part 2: Saving the results of each testsuite and the query for the job.status in individual variables.
testResult = unittest.TextTestRunner(verbosity=2).run(alltests)
summary_testResults.append([testResult.testsRun, testResult.wasSuccessful(),
len(testResult.errors), len(testResult.failures),
len(testResult.skipped)])
summary_errors.append(testResult.errors)
summary_failures.append(testResult.failures)
summary_skipped.append(testResult.skipped)
#jobstatus.append(eval("Test_"+items).PC.job.status) # FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
# Part 3: Summarizing the testresults of each testsuite and outputting the results in an orderly fashion on the
# console and in a textfile.
# Note that the testresults are outputted as usual after each test is executed. Since the output of each
# level-process is rather long, the output of the testresults become lost. Therefore, the purpose to output the
# testresults again is simply to summarize the testresults in one place and to give an overview over the results.
# Output: a) Information on the test datasets (table), b) testresults summarized in a table, c)if existing,
# a list of errors, failures and skips in the testcases and d) the job.status that is not set to "finished".
time.sleep(0.5)
# Path of the textfile the results will be logged to.
test_log_path = os.path.join(gmsRepo_rootpath, 'tests', 'data', 'logs', time.strftime('%Y%m%d_%H%M%S_log.txt'))
# Creating a logging system for the testresults.
# Source: The "GMS_logger"-function in the "geomultisens" --> "misc" --> "logging.py"-script was used and slightly
# altered to meet the needs of the current problem.
logger = logging.getLogger("log_Test")
logger.setLevel(logging.INFO)
# Defining the format of the console and the file-output.
formatter_fileH = logging.Formatter('')
formatter_ConsoleH = logging.Formatter('')
# Creating a handler for the file for the logging level "INFO".
fileHandler = logging.FileHandler(test_log_path)
fileHandler.setFormatter(formatter_fileH)
fileHandler.setLevel(logging.INFO)
# Creating a handler for the console for the logging level "INFO". "sys.stdout" is used for the logging output.
consoleHandler_out = logging.StreamHandler(stream=sys.stdout)
consoleHandler_out.setFormatter(formatter_ConsoleH)
consoleHandler_out.set_name('console handler stdout')
consoleHandler_out.setLevel(logging.INFO)
# Adding the defined handlers to the instantiated logger.
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler_out)
# OUPUT, START.
# Header of the file.
logger.info("\ntest_geomultisens.py"
"\nREVIEW OF ALL TEST RESULTS, SUMMARY:"
"\n***************************************************************************************"
"\n--> SPECIFIC FEATURES OF DATA:")
# Adding a table displaying the characteristic features of each dataset.
logger.info(pandas.DataFrame.from_items(testdata_scenes.items(),
orient='index', columns=['Country', 'Characteristic', 'Clouds', 'Overlap_area']))
logger.info("\nThe jobID used in Test_" + testdata[-1] + " contains the datasets: "
"\n-Landsat5_PreCollectionData,\n-Landsat7_SLC_off_PreCollectionData and "
"\n-Sentinel2A_CollectionData.")
# Adding a table displaying the testresults.
logger.info("\n***************************************************************************************"
"\n--> TESTRESULTS:")
results = ["Run", "Success", "Errors", "Failures", "Skips"]
testdata_index = ["Test_" + item for item in testdata]
logger.info(pandas.DataFrame(summary_testResults, columns= results, index=testdata_index))
# If errors, failures or skips (there is yet nothing to skip in the code) occurres, the respective message will
# be printed.
logger.info("\n***************************************************************************************")
if list(itertools.chain(*summary_errors)) or list(itertools.chain(*summary_failures)) or \
list(itertools.chain(*summary_skipped)):
logger.info("--> ERRORS/FAILURES/SKIPS:")
logger.info("\n---------------------------------------------------------------------------------------")
for index, test in enumerate(testdata):
logger.info("Test_" + test + ", ERRORS:")
if summary_errors[index]:
logger.info(summary_errors[index][0][1])
else:
logger.info("None. \n")
logger.info("Test_" + test + ", FAILURES:")
if summary_failures[index]:
logger.info(summary_failures[index][0][1])
else:
logger.info("None. \n")
logger.info("Test_" + test + ", SKIPS:")
if summary_skipped[index]:
logger.info(summary_skipped[index][0][1])
else:
logger.info("None.")
if not index == (len(testdata) - 1):
logger.info("\n---------------------------------------------------------------------------------------")
class TestTestproject(unittest.TestCase):
"""Tests for `testproject` package."""
logger.info("\n***************************************************************************************")
def setUp(self):
"""Set up test fixtures, if any."""
else:
pass
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_sp(self):
"""Test something."""
self.assertGreater(2,1)
# Checking, if the job.status of each job is set to "finished". Is it not set to "finished", a dataframe is created
# containing the test-name with and the different job.status itself.
# FIXME: If the job.status-issue is fixed, the commented out section can be nullified.
# jobstatus_table, index_table = [[] for _ in range(2)]
# for index, test in enumerate(testdata):
# if jobstatus[index] != "finished":
# jobstatus_table.append(jobstatus[index])
# index_table.append("Test_" + test)
#
# if jobstatus_table:
# logger.info("--> WARNING!!! JOBSTATUS of the following testcase(s) is not set to 'finished': \n")
# logger.info(pandas.DataFrame(jobstatus_table, columns=["jobstatus"], index=index_table))
# logger.info("\n***************************************************************************************")
# else:
# pass
def test_mp(self):
self.assertIsNotNone(1)
logger.info("END.") # OUTPUT, END.
#if __name__ == '__main__':
# unittest.main(argv=['first-arg-is-ignored'],exit=False, verbosity=2)
# Delete the handlers added to the "log_Test"-logger to ensure that no message is outputted twice in a row, when
# the logger is used again.
logger.handlers = []
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment