Commit 0484229e authored by Daniel Scheffler's avatar Daniel Scheffler
Browse files

Merge branch 'feature/add_RandomForestRegression' into 'dev'

Feature/add random forest regression

See merge request !5
parents fbb1d692 544a8fd9
Pipeline #3757 passed with stage
in 18 minutes and 56 seconds
......@@ -122,7 +122,7 @@ def run_from_constraints(args):
def _run_job(dbJob, **config_kwargs):
# type: (GMS_JOB) -> None
# type: (GMS_JOB, dict) -> None
"""
:param dbJob:
......
This diff is collapsed.
......@@ -4,18 +4,35 @@ Algorithms for multispectral image classification.
"""
import numpy as np
from typing import Union, List # noqa F401 # flake8 issue
from typing import Union, List, Tuple # noqa F401 # flake8 issue
from multiprocessing import Pool
from tqdm import tqdm
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from pysptools.classification import SAM
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MaxAbsScaler
from geoarray import GeoArray
from py_tools_ds.numeric.array import get_array_tilebounds
global_shared_endmembers = None
global_shared_im2classify = None
def initializer(endmembers, im2classify):
"""Declare global variables needed for image classifiers.
:param endmembers:
:param im2classify:
"""
global global_shared_endmembers, global_shared_im2classify
global_shared_endmembers = endmembers
global_shared_im2classify = im2classify
class _ImageClassifier(object):
"""Base class for GMS image classifiers."""
def __init__(self, train_spectra, train_labels, CPUs=1):
# type: (np.ndarray, Union[np.ndarray, List[int]], int) -> None
# type: (np.ndarray, Union[np.ndarray, List[int]], Union[int, None]) -> None
self.CPUs = CPUs
self.train_spectra = train_spectra
self.train_labels = train_labels
......@@ -23,31 +40,49 @@ class _ImageClassifier(object):
self.n_features = train_spectra.shape[1]
self.clf = None # to be implemented by the subclass
self.cmap = None
self.clf_name = ''
def _predict(self, tilepos, tileimdata):
def _predict(self, tilepos):
raise NotImplementedError('This method has to be implemented by the subclass.')
def classify(self, image_cube, nodataVal=None, tiledims=(1000, 1000)):
image_cube_gA = GeoArray(image_cube, nodata=nodataVal)
def classify(self, image_cube, in_nodataVal=None, cmap_nodataVal=None, tiledims=(250, 250)):
"""
:param image_cube:
:param in_nodataVal:
:param cmap_nodataVal: written into classif_map at nodata pixels
:param tiledims:
:return:
"""
image_cube_gA = GeoArray(image_cube, nodata=in_nodataVal)
image_cube_gA.to_mem()
self.cmap = GeoArray(np.empty((image_cube_gA.rows, image_cube_gA.cols),
dtype=np.array(self.train_labels).dtype), nodata=nodataVal)
bounds_alltiles = get_array_tilebounds(image_cube_gA.shape, tiledims)
# use a local variable to avoid pickling in multiprocessing
cmap = GeoArray(np.empty((image_cube_gA.rows, image_cube_gA.cols),
dtype=np.array(self.train_labels).dtype), nodata=cmap_nodataVal)
print('Performing %s image classification...' % self.clf_name)
if self.CPUs is None or self.CPUs > 1:
with Pool(self.CPUs) as pool:
tiles_cm = pool.starmap(self._predict, image_cube_gA.tiles(tiledims))
for ((rS, rE), (cS, cE)), tile_cm in tiles_cm:
self.cmap[rS: rE + 1, cS: cE + 1] = tile_cm
with Pool(self.CPUs, initializer=initializer, initargs=(self.train_spectra, image_cube_gA)) as pool:
tiles_cm = pool.map(self._predict, bounds_alltiles)
for ((rS, rE), (cS, cE)), tile_cm in tiles_cm:
cmap[rS: rE + 1, cS: cE + 1] = tile_cm
else:
for ((rS, rE), (cS, cE)), tile in tqdm(image_cube_gA.tiles(tiledims)):
print('Performing classification for tile ((%s, %s), (%s, %s))...' % (rS, rE, cS, cE))
self.cmap[rS: rE + 1, cS: cE + 1] = self._predict(((rS, rE), (cS, cE)), tile)[1]
initializer(self.train_spectra, image_cube_gA)
for (rS, rE), (cS, cE) in tqdm(bounds_alltiles):
# print('Performing classification for tile ((%s, %s), (%s, %s))...' % (rS, rE, cS, cE))
cmap[rS: rE + 1, cS: cE + 1] = self._predict(((rS, rE), (cS, cE)))[1]
if nodataVal is not None:
self.cmap[image_cube_gA.mask_nodata.astype(np.int8) == 0] = nodataVal
if cmap_nodataVal is not None:
cmap[image_cube_gA.mask_nodata.astype(np.int8) == 0] = cmap_nodataVal
return self.cmap.astype(image_cube.dtype)
self.cmap = cmap.astype(image_cube.dtype)
return self.cmap
def show_cmap(self):
if self.cmap:
......@@ -59,63 +94,176 @@ class MinimumDistance_Classifier(_ImageClassifier):
NOTE: distance equation: D² = sqrt(sum((Xvi - Xvj)²)
"""
def __init__(self, train_spectra, train_labels, CPUs=1):
# type: (np.ndarray, Union[np.ndarray, List[int]], int) -> None
def __init__(self, train_spectra, train_labels, CPUs=1, **kwargs):
# type: (np.ndarray, Union[np.ndarray, List[int]], Union[int, None], dict) -> None
if CPUs is None or CPUs > 1:
CPUs = 1 # The NearestCentroid seens to parallelize automatically. So using multiprocessing is slower.
super(MinimumDistance_Classifier, self).__init__(train_spectra, train_labels, CPUs=CPUs)
self.clf = NearestCentroid()
self.clf_name = 'minimum distance (nearest centroid)'
self.clf = NearestCentroid(**kwargs)
self.clf.fit(train_spectra, train_labels)
def _predict(self, tilepos, tileimdata):
def _predict(self, tilepos):
assert global_shared_im2classify is not None
(rS, rE), (cS, cE) = tilepos
tileimdata = global_shared_im2classify[rS: rE + 1, cS: cE + 1, :]
spectra = tileimdata.reshape((tileimdata.shape[0] * tileimdata.shape[1], tileimdata.shape[2]))
return tilepos, self.clf.predict(spectra).reshape(*tileimdata.shape[:2])
class kNN_Classifier(_ImageClassifier):
def __init__(self, train_spectra, train_labels, CPUs=1, n_neighbors=10):
# type: (np.ndarray, Union[np.ndarray, List[int]], int, int) -> None
def __init__(self, train_spectra, train_labels, CPUs=1, **kwargs):
# type: (np.ndarray, Union[np.ndarray, List[int]], Union[int, None], dict) -> None
super(kNN_Classifier, self).__init__(train_spectra, train_labels, CPUs=CPUs)
self.clf = KNeighborsClassifier(n_neighbors=n_neighbors, n_jobs=CPUs)
self.clf_name = 'k-nearest neighbour (kNN)'
self.clf = KNeighborsClassifier(n_jobs=1, **kwargs)
self.clf.fit(train_spectra, train_labels)
def _predict(self, tilepos, tileimdata):
def _predict(self, tilepos):
assert global_shared_im2classify is not None
(rS, rE), (cS, cE) = tilepos
tileimdata = global_shared_im2classify[rS: rE + 1, cS: cE + 1, :]
spectra = tileimdata.reshape((tileimdata.shape[0] * tileimdata.shape[1], tileimdata.shape[2]))
return tilepos, self.clf.predict(spectra).reshape(*tileimdata.shape[:2])
class SAM_Classifier(_ImageClassifier):
def __init__(self, train_spectra, threshold=0.1, CPUs=1):
# type: (np.ndarray, Union[np.ndarray, List[int]], int) -> None
def __init__(self, train_spectra, CPUs=1):
# type: (np.ndarray, Union[int, None]) -> None
super(SAM_Classifier, self).__init__(train_spectra, np.array(range(train_spectra.shape[0])), CPUs=CPUs)
self.clf = SAM()
self.threshold = threshold
self.clf_name = 'spectral angle mapper (SAM)'
def _predict(self, tilepos, tileimdata):
return self.clf.classify(tileimdata, self.train_spectra, self.threshold)
def _predict(self, tilepos):
assert global_shared_endmembers is not None and global_shared_im2classify is not None
def classify(self, image_cube, nodataVal=None, tiledims=(1000, 1000), mask=None):
image_cube_gA = GeoArray(image_cube, nodata=nodataVal)
(rS, rE), (cS, cE) = tilepos
tileimdata = global_shared_im2classify[rS: rE + 1, cS: cE + 1, :]
endmembers = global_shared_endmembers # type: np.ndarray
# avoid "RuntimeWarning: invalid value encountered in less" during SAM.classify()
if mask:
image_cube_gA[mask] = np.max(image_cube_gA)
elif nodataVal is not None:
image_cube_gA[image_cube_gA[:] == nodataVal] = np.max(image_cube_gA)
if not tileimdata.shape[2] == self.train_spectra.shape[1]:
raise RuntimeError('Matrix dimensions are not aligned. Input image has %d bands but input spectra '
'have %d.' % (tileimdata.shape[2], self.train_spectra.shape[1]))
# normalize input data because SAM asserts only data between -1 and 1
train_spectra_norm, tileimdata_norm = normalize_endmembers_image(endmembers, tileimdata)
angles = np.zeros((tileimdata.shape[0], tileimdata.shape[1], self.n_samples), np.float)
# if np.std(tileimdata) == 0: # skip tiles that only contain the same value
for n_sample in range(self.n_samples):
train_spectrum = train_spectra_norm[n_sample, :].reshape(1, 1, self.n_features)
angles[:, :, n_sample] = self._calc_sam(tileimdata_norm,
train_spectrum,
axis=2)
cmap = np.argmin(angles, axis=2).astype(np.int16)
return tilepos, cmap
@staticmethod
def _calc_sam(s1_norm, s2_norm, axis=0):
"""Compute spectral angle between two vectors or images (in radians)."""
upper = np.sum(s1_norm * s2_norm, axis=axis)
lower = np.sqrt(np.sum(s1_norm * s1_norm, axis=axis)) * np.sqrt(np.sum(s2_norm * s2_norm, axis=axis))
if lower.ndim > 1:
lower[lower == 0] = 1e-10
else:
image_cube_gA[image_cube_gA.mask_nodata.astype(np.int8) == 0] = np.max(image_cube_gA)
lower = lower or 1e-10
quotient = upper / lower
quotient[np.isclose(quotient, 1)] = 1 # in case of pixels that are equal to the endmember
return np.arccos(quotient)
class SID_Classifier(_ImageClassifier):
def __init__(self, train_spectra, CPUs=1):
# type: (np.ndarray, Union[int, None]) -> None
super(SID_Classifier, self).__init__(train_spectra, np.array(range(train_spectra.shape[0])), CPUs=CPUs)
self.clf_name = 'spectral information divergence (SID)'
def _predict(self, tilepos):
assert global_shared_endmembers is not None and global_shared_im2classify is not None
(rS, rE), (cS, cE) = tilepos
tileimdata = global_shared_im2classify[rS: rE + 1, cS: cE + 1, :]
endmembers = global_shared_endmembers # type: np.ndarray
if not tileimdata.shape[2] == self.train_spectra.shape[1]:
raise RuntimeError('Matrix dimensions are not aligned. Input image has %d bands but input spectra '
'have %d.' % (tileimdata.shape[2], self.train_spectra.shape[1]))
# normalize input data because SID asserts only data between -1 and 1
train_spectra_norm, tileimdata_norm = normalize_endmembers_image(endmembers, tileimdata)
cmap = super(SAM_Classifier, self).classify(image_cube_gA, nodataVal=nodataVal, tiledims=tiledims)
sid = np.zeros((tileimdata.shape[0], tileimdata.shape[1], self.n_samples), np.float)
# if np.std(tileimdata) == 0: # skip tiles that only contain the same value
if mask:
cmap[mask] = -9999
for n_sample in range(self.n_samples):
train_spectrum = train_spectra_norm[n_sample, :].reshape(1, 1, self.n_features)
sid[:, :, n_sample] = self._calc_sid(tileimdata_norm,
train_spectrum,
axis=2)
return cmap
cmap = np.argmin(sid, axis=2).astype(np.int16)
return tilepos, cmap
def classify_image(image, train_spectra, train_labels, classif_alg,
kNN_n_neighbors=10, nodataVal=None, tiledims=(1000, 1000), CPUs=None):
@staticmethod
def _calc_sid(s1_norm, s2_norm, axis=0):
"""Compute the spectral information divergence between two vectors or images."""
def get_sum(x, axis=0):
s = np.sum(x, axis=axis)
s[s == 0] = 1e-10
return s
if s1_norm.ndim == 3 and s2_norm.ndim == 3:
p = (s1_norm / get_sum(s1_norm, axis=axis)[:, :, np.newaxis]) + np.spacing(1)
q = (s2_norm / get_sum(s1_norm, axis=axis)[:, :, np.newaxis]) + np.spacing(1)
else:
p = (s1_norm / get_sum(s1_norm, axis=axis)) + np.spacing(1)
q = (s2_norm / get_sum(s1_norm, axis=axis)) + np.spacing(1)
return np.sum(p * np.log(p / q) + q * np.log(q / p), axis=axis)
class RF_Classifier(_ImageClassifier):
"""Random forest classifier."""
def __init__(self, train_spectra, train_labels, CPUs=1, **kwargs):
# type: (np.ndarray, Union[np.ndarray, List[int]], Union[int, None], dict) -> None
# if CPUs is None or CPUs > 1:
# CPUs = 1 # The NearestCentroid seems to parallelize automatically. So using multiprocessing is slower.
super(RF_Classifier, self).__init__(train_spectra, train_labels, CPUs=CPUs)
self.clf_name = 'random forest'
self.clf = RandomForestClassifier(n_jobs=1, **kwargs)
self.clf.fit(train_spectra, train_labels)
def _predict(self, tilepos):
assert global_shared_im2classify is not None
(rS, rE), (cS, cE) = tilepos
tileimdata = global_shared_im2classify[rS: rE + 1, cS: cE + 1, :]
spectra = tileimdata.reshape((tileimdata.shape[0] * tileimdata.shape[1], tileimdata.shape[2]))
return tilepos, self.clf.predict(spectra).reshape(*tileimdata.shape[:2])
def classify_image(image, train_spectra, train_labels, classif_alg, in_nodataVal=None, cmap_nodataVal=None,
tiledims=(1000, 1000), CPUs=None, **kwargs):
# type: (Union[np.ndarray, GeoArray], np.ndarray, Union[np.ndarray, List[int]], str, int, ...) -> GeoArray
"""Classify image to find the cluster each spectrum belongs to.
......@@ -127,33 +275,68 @@ def classify_image(image, train_spectra, train_labels, classif_alg,
'MinDist': Minimum Distance (Nearest Centroid)
'kNN': k-nearest-neighbour
'SAM': spectral angle mapping
:param kNN_n_neighbors: The number of neighbors to be considered in case 'classif_alg' is set to
'kNN'. Otherwise, this parameter is ignored.
:param nodataVal:
'SID': spectral information divergence
'RF': random forest
:param in_nodataVal:
:param cmap_nodataVal:
:param tiledims:
:param CPUs: number of CPUs to be used for classification
:param kwargs: keyword arguments to be passed to classifiers if possible
"""
if classif_alg == 'kNN':
clf = kNN_Classifier(
train_spectra,
train_labels,
CPUs=CPUs,
n_neighbors=kNN_n_neighbors)
**kwargs)
elif classif_alg == 'MinDist':
clf = MinimumDistance_Classifier(
train_spectra,
train_labels,
CPUs=CPUs)
CPUs=CPUs,
**kwargs)
elif classif_alg == 'SAM':
clf = SAM_Classifier(
train_spectra,
CPUs=CPUs)
elif classif_alg == 'SID':
clf = SID_Classifier(
train_spectra,
CPUs=CPUs)
elif classif_alg == 'RF':
clf = RF_Classifier(
train_spectra,
train_labels,
CPUs=CPUs, **kwargs)
else:
raise NotImplementedError("Currently only the methods 'kNN', 'MinDist' and 'SAM' are implemented.")
raise NotImplementedError("Currently only the methods 'kNN', 'MinDist', 'SAM', 'SID' and 'RF' are implemented.")
cmap = clf.classify(image, nodataVal=nodataVal, tiledims=tiledims)
cmap = clf.classify(image, in_nodataVal=in_nodataVal, cmap_nodataVal=cmap_nodataVal, tiledims=tiledims)
return cmap
def normalize_endmembers_image(endmembers, image):
# type: (np.ndarray, np.ndarray) -> Tuple[np.ndarray, np.ndarray]
from .L2B_P import im2spectra, spectra2im # avoid circular import
em = endmembers.astype(np.float)
im = image.astype(np.float)
allVals = np.hstack([em.flat, im.flat]).reshape(-1, 1)
if allVals.min() < -1 or allVals.max() > 1:
max_abs_scaler = MaxAbsScaler()
max_abs_scaler.fit_transform(allVals)
endmembers_norm = max_abs_scaler.transform(em)
image_norm = spectra2im(max_abs_scaler.transform(im2spectra(im)), tgt_rows=im.shape[0], tgt_cols=im.shape[1])
return endmembers_norm, image_norm
else:
return em, im
......@@ -26,7 +26,9 @@ from scipy.interpolate import interp1d
from geoarray import GeoArray
from py_tools_ds.geo.coord_calc import corner_coord_to_minmax
from py_tools_ds.geo.vector.geometry import boxObj
from py_tools_ds.geo.coord_trafo import transform_any_prj
from py_tools_ds.geo.projection import isProjectedOrGeographic
from py_tools_ds.numeric.vector import find_nearest
from ..options.config import GMS_config as CFG
......@@ -465,7 +467,8 @@ class DEM_Creator(object):
:param db_conn: database connection string
"""
if dem_sensor not in ['SRTM', 'ASTER']:
raise ValueError('%s is not a supported DEM sensor. Choose between SRTM and ASTER (both 30m native GSD).')
raise ValueError('%s is not a supported DEM sensor. Choose between SRTM and ASTER (both 30m native GSD).'
% dem_sensor)
self.dem_sensor = dem_sensor
self.db_conn = db_conn if db_conn else CFG.conn_database
......@@ -584,13 +587,18 @@ class DEM_Creator(object):
def from_extent(self, cornerCoords_tgt, prj, tgt_xgsd, tgt_ygsd):
"""Returns a GeoArray of a DEM according to the given target coordinates
:param cornerCoords_tgt: list of target coordinates [[X,Y], [X,Y], ...]]
:param cornerCoords_tgt: list of target coordinates [[X,Y], [X,Y], ...]] (at least 2 coordinates)
:param prj: WKT string of the projection belonging cornerCoords_tgt
:param tgt_xgsd: output X GSD
:param tgt_ygsd: output Y GSD
:return: DEM GeoArray
"""
# generate at least 4 coordinates in case less coords have been given in order to avoid nodata triangles in DEM
if len(cornerCoords_tgt) < 4 and isProjectedOrGeographic(prj) == 'projected':
co_yx = [(y, x) for x, y in cornerCoords_tgt]
cornerCoords_tgt = boxObj(boxMapYX=co_yx).boxMapXY
# handle coordinate infos
tgt_corner_coord_lonlat = self._get_corner_coords_lonlat(cornerCoords_tgt, prj)
......
......@@ -330,11 +330,18 @@ class JobConfig(object):
self.exec_L2BP = gp('exec_L2BP')
self.spechomo_method = gp('spechomo_method')
self.spechomo_n_clusters = gp('spechomo_n_clusters')
self.spechomo_rfr_n_trees = 50 # this is static confic value, not a user option
self.spechomo_rfr_tree_depth = 10 # this is static confic value, not a user option
self.spechomo_classif_alg = gp('spechomo_classif_alg')
self.spechomo_kNN_n_neighbors = gp('spechomo_kNN_n_neighbors')
self.spechomo_estimate_accuracy = gp('spechomo_estimate_accuracy')
self.spechomo_bandwise_accuracy = gp('spechomo_bandwise_accuracy')
if self.spechomo_method == 'RFR':
raise NotImplementedError("The spectral harmonization method 'RFR' is currently not completely implemented."
"Please us another one.")
# FIXME RFR classifiers are missing (cannot be added to the repository to to file size > 1 GB)
# L2C
self.exec_L2CP = gp('exec_L2CP')
......
......@@ -153,7 +153,9 @@
LI: Linear interpolation;
LR: Linear regression;
RR: Ridge regression;
QR: Quadratic regression*/
QR: Quadratic regression
RFR: Random forest regression with 50 trees
(no spectral sub-clustering available)*/
"spechomo_n_clusters": 50, /*Number of spectral clusters to be used during LR/ RR/ QR homogenization.
E.g., 50 means that the image to be converted to the spectral target sensor
is clustered into 50 spectral clusters and one separate machine learner per
......
......@@ -124,9 +124,9 @@ gms_schema_input = dict(
run_processor=dict(type='boolean', required=False),
write_output=dict(type='boolean', required=False),
delete_output=dict(type='boolean', required=False),
spechomo_method=dict(type='string', required=False, allowed=['LI', 'LR', 'RR', 'QR']),
spechomo_method=dict(type='string', required=False, allowed=['LI', 'LR', 'RR', 'QR', 'RFR']),
spechomo_n_clusters=dict(type='integer', required=False, allowed=[1, 5, 10, 15, 20, 30, 40, 50]),
spechomo_classif_alg=dict(type='string', required=False, allowed=['MinDist', 'kNN', 'SAM']),
spechomo_classif_alg=dict(type='string', required=False, allowed=['MinDist', 'kNN', 'SAM', 'SID']),
spechomo_kNN_n_neighbors=dict(type='integer', required=False, min=0),
spechomo_estimate_accuracy=dict(type='boolean', required=False),
spechomo_bandwise_accuracy=dict(type='boolean', required=False),
......
__version__ = '0.16.3'
__versionalias__ = '20180905.01'
__version__ = '0.16.4'
__versionalias__ = '20181114.01'
......@@ -21,4 +21,3 @@ redis
retools
redis-semaphore
psutil
pysptools
......@@ -19,7 +19,7 @@ requirements = [
'matplotlib', 'numpy', 'scikit-learn', 'scipy', 'gdal', 'pyproj', 'shapely', 'ephem', 'pyorbital', 'dill', 'pytz',
'pandas', 'numba', 'spectral>=0.16', 'geopandas', 'iso8601', 'pyinstrument', 'geoalchemy2', 'sqlalchemy',
'psycopg2', 'py_tools_ds>=0.12.4', 'geoarray>=0.8.0', 'arosics>=0.8.7', 'six', 'tqdm', 'jsmin', 'cerberus',
'nested_dict', 'openpyxl', 'timeout_decorator', 'redis', 'retools', 'redis-semaphore', 'psutil', 'pysptools'
'nested_dict', 'openpyxl', 'timeout_decorator', 'redis', 'retools', 'redis-semaphore', 'psutil',
# spectral<0.16 has some problems with writing signed integer 8bit data
# fmask # conda install -c conda-forge python-fmask
# 'pyhdf', # conda install --yes -c conda-forge pyhdf
......
......@@ -79,7 +79,6 @@ dependencies:
- retools
- redis-semaphore
- psutil
- pysptools
- py_tools_ds>=0.12.4
- geoarray>=0.8.0
- arosics>=0.8.7
......
......@@ -17,9 +17,8 @@ import numpy as np
from geoarray import GeoArray
from gms_preprocessing import set_config
from gms_preprocessing.algorithms.classification import MinimumDistance_Classifier
from gms_preprocessing.algorithms.classification import kNN_Classifier
from gms_preprocessing.algorithms.classification import SAM_Classifier
from gms_preprocessing.algorithms.classification import \
MinimumDistance_Classifier, kNN_Classifier, SAM_Classifier, SID_Classifier, RF_Classifier
from . import db_host
......@@ -41,26 +40,74 @@ with zipfile.ZipFile(path_classifier_zip, "r") as zf, tempfile.TemporaryDirector
class Test_MinimumDistance_Classifier(unittest.TestCase):
def test_classify(self):
MDC = MinimumDistance_Classifier(cluster_centers, cluster_labels)
cmap = MDC.classify(test_gA, nodataVal=-9999)
MDC = MinimumDistance_Classifier(cluster_centers, cluster_labels, CPUs=1)
cmap_sp = MDC.classify(test_gA, in_nodataVal=-9999)
self.assertIsInstance(cmap_sp, np.ndarray)
self.assertEqual(cmap_sp.shape, (1010, 1010))
self.assertIsInstance(cmap, np.ndarray)
self.assertEqual(cmap.shape, (1010, 1010))
MDC = MinimumDistance_Classifier(cluster_centers, cluster_labels, CPUs=None)
cmap_mp = MDC.classify(test_gA, in_nodataVal=-9999)
self.assertIsInstance(cmap_mp, np.ndarray)
self.assertEqual(cmap_mp.shape, (1010, 1010))
self.assertTrue(np.array_equal(cmap_sp, cmap_mp))
class Test_kNN_Classifier(unittest.TestCase):
def test_classify(self):
kNNC = kNN_Classifier(cluster_centers, cluster_labels)
cmap = kNNC.classify(test_gA, nodataVal=-9999)
kNNC = kNN_Classifier(cluster_centers, cluster_labels, CPUs=1)
cmap_sp = kNNC.classify(test_gA, in_nodataVal=-9999)
self.assertIsInstance(cmap_sp, np.ndarray)
self.assertEqual(cmap_sp.shape, (1010, 1010))
kNNC = kNN_Classifier(cluster_centers, cluster_labels, CPUs=None)
cmap_mp = kNNC.classify(test_gA, in_nodataVal=-9999)
self.assertIsInstance(cmap_mp, np.ndarray)
self.assertEqual(cmap_mp.shape, (1010, 1010))
self.assertIsInstance(cmap, np.ndarray)
self.assertEqual(cmap.shape, (1010, 1010))
self.assertTrue(np.array_equal(cmap_sp, cmap_mp))
class Test_SAM_Classifier(unittest.TestCase):
def test_classify(self):
SC = SAM_Classifier(cluster_centers)