a_priori.py 21.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#!/usr/bin/env python
# coding: utf-8

# SICOR is a freely available, platform-independent software designed to process hyperspectral remote sensing data,
# and particularly developed to handle data from the EnMAP sensor.

# This file contains some tools for the estimation of the a priori state vector needed for optimal estimation.

# Copyright (C) 2018  Niklas Bohn (GFZ, <nbohn@gfz-potsdam.de>),
# German Research Centre for Geosciences (GFZ, <https://www.gfz-potsdam.de>)

# This software was developed within the context of the EnMAP project supported by the DLR Space Administration with
# funds of the German Federal Ministry of Economic Affairs and Energy (on the basis of a decision by the German
# Bundestag: 50 EE 1529) and contributions from DLR, GFZ and OHB System AG.

# This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
# version.

# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

# You should have received a copy of the GNU General Public License along with this program.
# If not, see <https://www.gnu.org/licenses/>.


import numpy as np
28
from numpy import logical_and as aand
29
import dill
30
from tqdm import tqdm
31
32
import os
from os.path import split, abspath, expandvars
33
34
35
36
37
38
import scipy as s
from scipy.interpolate import interp1d
from scipy.io import loadmat
from scipy.linalg import norm
from spectral.io import envi
from sklearn.cluster import KMeans
39
import json
40
41
42
43
44
45

from sicor.Tools.EnMAP.LUT import read_lut_enmap_formatted, interpol_lut_c, get_data_file
from sicor.Tools.EnMAP.conversion import generate_filter
from sicor.Tools.EnMAP.metadata import varsol


46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def recursive_reencode(j, shell_replace=True):
    """
    Recursively re-encode a mutable object (ascii->str).

    :param j:             object to reencode
    :param shell_replace: boolean helper for recursive calls
    :return:              expanded, reencoded object
    """

    if isinstance(j, dict):
        for key, value in j.items():
            j[key] = recursive_reencode(value)
        return j
    elif isinstance(j, list):
        for i, k in enumerate(j):
            j[i] = recursive_reencode(k)
        return j
    elif isinstance(j, tuple):
        return tuple([recursive_reencode(k) for k in j])
    else:
        if shell_replace and isinstance(j, str):
            try:
                j = expandvars(j)
            except IndexError:
                pass
        return j


def json_load_ascii(filename, shell_replace=True):
    """
    Load a hierarchical structure, convert all unicode to ASCII and expand environment variables.

    :param filename:      json file to load from
    :param shell_replace: boolean
    :return:              encoded dictionary
    """

    with open(filename, 'r') as fin:
        j = json.load(fin)
        return recursive_reencode(j, shell_replace)


def expand_path(directory, subpath):
    """
    Expand a path variable to an absolute path, if it is not one already.

    :param directory:  absolute location
    :param subpath:    path to expand
    :return:           expanded path
    """

    if subpath.startswith('/'):
        return subpath
    return os.path.join(directory, subpath)


def svd_inv_sqrt(C, hashtable=None):
    """
    Matrix inversion, based on decomposition. Built to be stable, and positive.

    :param C: matrix to invert
        hashtable: if used, the hashtable to store/retrieve results in/from

    Return:
        (np.array, np.array): inverse of C and square root of the inverse of C

    """

    # If we have a hash table, look for the precalculated solution
    h = None
    if hashtable is not None:
        # If arrays are in Fortran ordering, they are not hashable.
        if not C.flags['C_CONTIGUOUS']:
            C = C.copy(order='C')
        h = xxhash.xxh64_digest(C)
        if h in hashtable:
            return hashtable[h]

    D, P = np.linalg.eigh(C)
    for count in range(3):
        if np.any(D < 0) or np.any(np.isnan(D)):
            inv_eps = 1e-6 * (count-1)*10
            D, P = np.linalg.eigh(
                C + np.diag(np.ones(C.shape[0]) * inv_eps))
        else:
            break

        if count == 2:
            raise ValueError('Matrix inversion contains negative values,' +
                             'even after adding {} to the diagonal.'.format(inv_eps))

    Ds = np.diag(1/np.sqrt(D))
    L = P@Ds
    Cinv_sqrt = L@P.T
    Cinv = L@L.T

    # If there is a hash table, cache our solution.  Bound the total cache
    # size by removing any extra items in FIFO order.
    if hashtable is not None:
        hashtable[h] = (Cinv, Cinv_sqrt)
        while len(hashtable) > max_table_size:
            hashtable.popitem(last=False)

    return Cinv, Cinv_sqrt


def svd_inv(C, hashtable=None):
    """
    Matrix inversion, based on decomposition. Built to be stable, and positive.

    :param C:         matrix to invert
    :param hashtable: if used, the hashtable to store/retrieve results in/from
    :return:          inverse of C
    """

    return svd_inv_sqrt(C, hashtable)[0]


164
class MultiComponentSurface(object):
165
166
167
    """
    A model of the surface based on a collection of multivariate Gaussians, with one or more equiprobable components
    and full covariance matrices.
168

169
170
    To evaluate the probability of a new spectrum, we calculate the Mahalanobis distance to each component cluster,
    and use that as our Multivariate Gaussian surface model.
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
    """

    def __init__(self, config):

        # Models are stored as dictionaries in .mat format
        config = json_load_ascii(config, shell_replace=True)
        model_dict = loadmat(config['output_model_file'])
        self.components = list(zip(model_dict['means'], model_dict['covs']))
        self.n_comp = len(self.components)
        self.wl = model_dict['wl'][0]
        self.n_wl = len(self.wl)

        # Set up normalization method
        self.normalize = model_dict['normalize']
        if self.normalize == 'Euclidean':
            self.e_norm = lambda r: norm(r)
        elif self.normalize == 'RMS':
188
            self.e_norm = lambda r: np.sqrt(np.mean(pow(r, 2)))
189
190
191
        elif self.normalize == 'None':
            self.e_norm = lambda r: 1.0
        else:
192
            raise ValueError('Unrecognized Normalization: %s\n' % self.normalize)
193
194
195

        self.selection_metric = 'Mahalanobis'

196
197
        # This field, if present and set to true, forces us to use any initialization state and never change.
        # The state is preserved in the geometry object so that this object stays stateless
198
199
        self.select_on_init = False

200
201
202
203
204
        # Reference values are used for normalizing the reflectances. In the VSWIR regime, reflectances are normalized
        # so that the model is agnostic to absolute magnitude
        self.refwl = np.squeeze(model_dict['refwl'])
        self.idx_ref = [np.argmin(abs(self.wl-w)) for w in np.squeeze(self.refwl)]
        self.idx_ref = np.array(self.idx_ref)
205
206
207
208
209

        # Cache some important computations
        self.Covs, self.Cinvs, self.mus = [], [], []
        for i in range(self.n_comp):
            Cov = self.components[i][1]
210
            self.Covs.append(np.array([Cov[j, self.idx_ref] for j in self.idx_ref]))
211
212
213
214
215
216
            self.Cinvs.append(svd_inv(self.Covs[-1]))
            self.mus.append(self.components[i][0][self.idx_ref])

        # Variables retrieved: each channel maps to a reflectance model parameter
        rmin, rmax = 0, 10.0
        self.statevec = ['RFL_%04i' % int(w) for w in self.wl]
217
218
219
220
        self.bounds = [[rmin, rmax]]
        self.scale = [1.0]
        self.init = [0.15 * (rmax-rmin)+rmin]
        self.idx_lamb = np.arange(self.n_wl)
221
222
223
        self.n_state = len(self.statevec)

    def component(self, x):
224
225
        """
        We pick a surface model component using the Mahalanobis distance.
226

227
228
229
        This always uses the Lambertian (non-specular) version of the surface reflectance. If the forward model
        initialize via heuristic (i.e. algebraic inversion), the component is only calculated once based on that first
        solution. That state is preserved in the geometry object.
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
        """

        x_surface = x

        # Get the (possibly normalized) reflectance
        lamb = self.calc_lamb(x_surface)
        lamb_ref = lamb[self.idx_ref]
        lamb_ref = lamb_ref / self.e_norm(lamb_ref)

        # Mahalanobis or Euclidean distances
        mds = []
        for ci in range(self.n_comp):
            ref_mu = self.mus[ci]
            ref_Cinv = self.Cinvs[ci]
            if self.selection_metric == 'Mahalanobis':
                md = (lamb_ref - ref_mu).T.dot(ref_Cinv).dot(lamb_ref - ref_mu)
            else:
                md = sum(pow(lamb_ref - ref_mu, 2))
            mds.append(md)
249
        closest = np.argmin(mds)
250
251
252
253

        return closest

    def xa(self, x_surface):
254
255
256
257
258
        """
        Mean of prior distribution, calculated at state x. We find the covariance in a normalized space
        (normalizing by z) and then unnormalize the result for the calling function. This always uses the Lambertian
        (non-specular) version of the surface reflectance.
        """
259
260
261
262
263
264
265
266
267
268
269

        lamb = self.calc_lamb(x_surface)
        lamb_ref = lamb[self.idx_ref]
        ci = self.component(x_surface)
        lamb_mu = self.components[ci][0]
        lamb_mu = lamb_mu * self.e_norm(lamb_ref)
        mu = lamb_mu

        return mu

    def Sa(self, x_surface):
270
271
272
273
        """
        Covariance of prior distribution, calculated at state x. We find the covariance in a normalized space
        (normalizing by z) and then unnormalize the result for the calling function.
        """
274
275
276
277
278
279
280
281
282
283

        lamb = self.calc_lamb(x_surface)
        lamb_ref = lamb[self.idx_ref]
        ci = self.component(x_surface)
        Cov = self.components[ci][1]
        Cov = Cov * (self.e_norm(lamb_ref)**2)

        return Cov

    def calc_lamb(self, x_surface):
284
285
286
        """
        Lambertian reflectance.
        """
287
288
289
290

        return x_surface[self.idx_lamb]


291
def surface_model(config, logger=None):
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

    configdir, configfile = split(abspath(config))
    config = json_load_ascii(config, shell_replace=True)

    # Determine top level parameters
    for q in ['output_model_file', 'sources', 'normalize', 'wavelength_file']:
        if q not in config:
            raise ValueError("Missing parameter: %s" % q)

    wavelength_file = expand_path(configdir, config['wavelength_file'])
    normalize = config['normalize']
    reference_windows = config['reference_windows']
    outfile = expand_path(configdir, config['output_model_file'])

    # load wavelengths file
307
    q = np.loadtxt(wavelength_file)
308
309
310
311
312
313
314
315
316
317
318
319
    if q.shape[1] > 2:
        q = q[:, 1:]
    if q[0, 0] < 100:
        q = q * 1000.0
    wl = q[:, 0]
    nchan = len(wl)

    # build global reference windows
    refwl = []
    for wi, window in enumerate(reference_windows):
        active_wl = aand(wl >= window[0], wl < window[1])
        refwl.extend(wl[active_wl])
320
321
    normind = np.array([np.argmin(abs(wl - w)) for w in refwl])
    refwl = np.array(refwl, dtype=float)
322
323

    # create basic model template
324
    model = {'normalize': normalize, 'wl': wl, 'means': [], 'covs': [], 'refwl': refwl}
325
326

    for si, source_config in enumerate(config['sources']):
327
        # determine source parameters
328
329
        for q in ['input_spectrum_files', 'windows', 'n_components', 'windows']:
            if q not in source_config:
330
                raise ValueError('Source %i is missing a parameter: %s' % (si, q))
331

332
        # determine whether we should synthesize our own mixtures
333
334
335
336
337
338
339
        if 'mixtures' in source_config:
            mixtures = source_config['mixtures']
        elif 'mixtures' in config:
            mixtures = config['mixtures']
        else:
            mixtures = 0

340
        infiles = [expand_path(configdir, fi) for fi in source_config['input_spectrum_files']]
341
342
343
344
345
346
347
        ncomp = int(source_config['n_components'])
        windows = source_config['windows']

        # load spectra
        spectra = []
        for infile in infiles:

348
            logger.info("Loading " + infile + "...")
349
350
            hdrfile = infile + '.hdr'
            rfl = envi.open(hdrfile, infile)
351
352
            nl, nb, ns = [int(rfl.metadata[n]) for n in ('lines', 'bands', 'samples')]
            swl = np.array([float(f) for f in rfl.metadata['wavelength']])
353

354
            # maybe convert to nanometers
355
356
357
358
359
            if swl[0] < 100:
                swl = swl * 1000.0

            rfl_mm = rfl.open_memmap(interleave='source', writable=True)
            if rfl.metadata['interleave'] == 'bip':
360
361
362
                x = np.array(rfl_mm[:, :, :])
            else:
                x = np.array(rfl_mm[:, :, :]).transpose((0, 2, 1))
363
364
365
366
            x = x.reshape(nl * ns, nb)

            # import spectra and resample
            for x1 in x:
367
                p = interp1d(swl, x1, kind='linear', bounds_error=False, fill_value='extrapolate')
368
369
370
371
372
373
374
375
376
377
                spectra.append(p(wl))

        # calculate mixtures, if needed
        n = float(len(spectra))
        nmix = int(n * mixtures)
        for mi in range(nmix):
            s1, m1 = spectra[int(s.rand() * n)], s.rand()
            s2, m2 = spectra[int(s.rand() * n)], 1.0 - m1
            spectra.append(m1 * s1 + m2 * s2)

378
379
        spectra = np.array(spectra)
        use = np.all(np.isfinite(spectra), axis=1)
380
381
382
        spectra = spectra[use, :]

        # accumulate total list of window indices
383
        window_idx = -np.ones(nchan, dtype=int)
384
385
386
387
        for wi, win in enumerate(windows):
            active_wl = aand(wl >= win['interval'][0], wl < win['interval'][1])
            window_idx[active_wl] = wi

388
        # two step model.  First step is k-means initialization
389
390
391
392
393
        kmeans = KMeans(init='k-means++', n_clusters=ncomp, n_init=10)
        kmeans.fit(spectra)
        Z = kmeans.predict(spectra)

        for ci in range(ncomp):
394
395
            m = np.mean(spectra[Z == ci, :], axis=0)
            C = np.cov(spectra[Z == ci, :], rowvar=False)
396
397
398
399
400
401
402
403
404
405
406

            for i in range(nchan):
                window = windows[window_idx[i]]
                if window['correlation'] == 'EM':
                    C[i, i] = C[i, i] + float(window['regularizer'])
                elif window['correlation'] == 'decorrelated':
                    ci = C[i, i]
                    C[:, i] = 0
                    C[i, :] = 0
                    C[i, i] = ci + float(window['regularizer'])
                else:
407
                    raise ValueError('I do not recognize the source  ' + window['correlation'])
408
409
410

            # Normalize the component spectrum if desired
            if normalize == 'Euclidean':
411
                z = np.sqrt(np.sum(pow(m[normind], 2)))
412
            elif normalize == 'RMS':
413
                z = np.sqrt(np.mean(pow(m[normind], 2)))
414
415
416
            elif normalize == 'None':
                z = 1.0
            else:
417
                raise ValueError('Unrecognized normalization: %s\n' % normalize)
418
419
420
421
422
423
            m = m / z
            C = C / (z ** 2)

            model['means'].append(m)
            model['covs'].append(C)

424
425
    model['means'] = np.array(model['means'])
    model['covs'] = np.array(model['covs'])
426
427

    s.io.savemat(outfile, model)
428
    logger.info("Saving results to " + outfile + "...")
429
430


431
def wv_band_ratio(data, fn_table, vza, sza, dem, aot, raa, intp_wvl, intp_fwhm, jday, month, idx, disable=False):
432
433
    """
    Band ratio water vapor retrieval.
434
435
436
437
438
439
440
441
442

    :param data:      image dataset
    :param fn_table:  path to radiative transfer LUT
    :param vza:       viewing zenitg angle
    :param sza:       sun zenith angle
    :param dem:       digital elevation model, same shape as data
    :param aot:       aerosol optical thicknessw
    :param raa:       relative azimuth angle
    :param intp_wvl:  instrument wavelengths
443
    :param intp_fwhm: instrument fwhm
444
445
446
447
448
449
    :param jday:      acquisition day
    :param month:     acquisition month
    :param idx:       indices of instrument channels, which should be used for retrieval
                      (should be approx. 870, 900 and 940 nm)
    :param disable:   if True, progressbar during retrieval is disabled; default: False
    :return:          water vapor image
450
    """
451

452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
    cnt_land = len(np.ndarray.flatten(data[:, :, idx[1]]))
    num_bd = 2

    toa_sub = np.zeros((cnt_land, num_bd))
    toa_sub[:, 0] = np.ndarray.flatten(data[:, :, idx[1]])
    toa_sub[:, 1] = np.ndarray.flatten(data[:, :, idx[2]])
    cnt_land = len(toa_sub[:, 0])

    luts, axes_x, axes_y, wvl, lut1, lut2, xnodes, nm_nodes, ndim, x_cell = read_lut_enmap_formatted(file_lut=fn_table)

    wvl_lut = wvl
    s_norm = generate_filter(wvl_m=wvl_lut, wvl=intp_wvl, wl_resol=intp_fwhm)

    lut2_shape = np.array(lut2.shape)
    lut2_shape[6] = len(intp_wvl)
    lut2_res = np.zeros(lut2_shape)
    lut1_res = lut1[:, :, :, :, :, :, :, 0] @ s_norm
    for ii in range(lut2.shape[-1]):
        lut2_res[:, :, :, :, :, :, :, ii] = lut2[:, :, :, :, :, :, :, ii] @ s_norm

    dsol = varsol(jday, month)
    dn2rad = dsol * dsol * 0.1
    fac = 1 / dn2rad

    hsfs = [np.min(dem), np.max(dem)]
    cwvs = list(axes_x[1][4])
    rhos = [0.02, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    l_toa_lut = np.zeros((len(hsfs), len(cwvs), len(rhos), len(intp_wvl)))

    for ii, hsf in enumerate(hsfs):
        for jj, cwv in enumerate(cwvs):

            vtest = np.asarray([vza, sza, hsf, aot, raa, cwv])
            f_int = interpol_lut_c(lut1_res, lut2_res, xnodes, nm_nodes, ndim, x_cell, vtest, intp_wvl)

            f_int_l0 = f_int[0, :] * 1.e+3
            f_int_edir = f_int[1, :] * 1.e+3
            f_int_edif = f_int[2, :] * 1.e+3
            f_int_ss = f_int[3, :]

            f_int_ee = f_int_edir * np.cos(np.deg2rad(sza)) + f_int_edif

            for kk, rho in enumerate(rhos):
                l_toa = (f_int_l0 + f_int_ee * rho / np.pi / (1 - f_int_ss * rho)) * fac
                l_toa_lut[ii, jj, kk, :] = l_toa

498
    path_sol = get_data_file(module_name="sicor", file_basename="solar_irradiances_400_2500_1.dill")
499
500
501
502
503
504
505
    with open(path_sol, "rb") as fl:
        solar_lut = dill.load(fl)
    solar_res = solar_lut @ s_norm

    rfl_1_img = data[:, :, idx[0]] / solar_res[idx[0]]
    rfl_2_img = data[:, :, idx[1]] / solar_res[idx[1]]
    rfl_3_img = rfl_1_img + (rfl_2_img - rfl_1_img) * (intp_wvl[idx[2]] - intp_wvl[idx[0]]) / (
506
        intp_wvl[idx[1]] - intp_wvl[idx[0]])
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
    if np.min(rfl_3_img) <= 0:
        for ii in range(rfl_3_img.shape[0]):
            for jj in range(rfl_3_img.shape[1]):
                if rfl_3_img[ii, jj] <= 0:
                    rfl_3_img[ii, jj] = 0.001

    rfl_sl_img = rfl_2_img / rfl_3_img
    rfl_sl_gr = [np.min(rfl_sl_img) * 0.9, np.max(rfl_sl_img) * 1.1]

    dim_sl = len(rfl_sl_gr)

    alog_rat_wv_lut = np.zeros((len(hsfs), len(cwvs), len(rhos), dim_sl))
    rat_wv_lut = l_toa_lut[:, :, :, idx[2]] / l_toa_lut[:, :, :, idx[1]]
    alog_rat_wv_lut[:, :, :, 0] = np.log(rat_wv_lut * rfl_sl_gr[0])
    alog_rat_wv_lut[:, :, :, 1] = np.log(rat_wv_lut * rfl_sl_gr[1])

    cf_arr = np.zeros((3, len(hsfs), len(rhos), dim_sl))
    for ii in range(len(hsfs)):
        for jj in range(len(rhos)):
            for kk in range(dim_sl):
                cf_arr[:, ii, jj, kk] = np.polyfit(x=alog_rat_wv_lut[ii, :, jj, kk], y=cwvs, deg=2)

    alog_rat_wv_img = np.log(toa_sub[:, 1] / toa_sub[:, 0] * np.ndarray.flatten(rfl_sl_img))

    if hsfs[1] != hsfs[0]:
        dem_fac = (dem - hsfs[0]) / (hsfs[1] - hsfs[0])
        dem_fac = np.ndarray.flatten(dem_fac)
    else:
        dem_fac = np.zeros(cnt_land)

    s0 = solar_res[idx[1]]
    ll_cal = np.pi * np.ndarray.flatten(data[:, :, idx[1]])[:] / (s0 * np.cos(np.deg2rad(sza)))
    wv_arr = np.empty(cnt_land)
    wv_arr[:] = np.nan

542
    for ind in tqdm(range(0, cnt_land), disable=disable):
543
544
545
        dem_fac_pix = dem_fac[ind]

        if ll_cal[ind] < rhos[0]:
546
            ll_cal[ind] = rhos[0] + 0.001
547
548
        ll_cal_low = ll_cal[ind] >= rhos
        ll_cal_high = ll_cal[ind] <= rhos
549
550
        idx_low = np.where(ll_cal_low)
        idx_high = np.where(ll_cal_high)
551
552
553
554
555
556
557

        rfl_fac = (ll_cal[ind] - rhos[idx_low[0][-1]]) / (rhos[idx_high[0][0]] - rhos[idx_low[0][-1]])
        rfl_fac_pix = rfl_fac

        rfl_sl = np.ndarray.flatten(rfl_sl_img)[ind]
        rfl_sl_pix = (rfl_sl - rfl_sl_gr[0]) / (rfl_sl_gr[1] - rfl_sl_gr[0])

558
559
560
561
562
563
564
565
        cf_int = (1 - dem_fac_pix) * (1 - rfl_fac_pix) * (1 - rfl_sl_pix) * cf_arr[:, 0, idx_low[0][-1], 0] + (
                1 - dem_fac_pix) * (1 - rfl_fac_pix) * rfl_sl_pix * cf_arr[:, 0, idx_low[0][-1], 1] + (
                         1 - dem_fac_pix) * rfl_fac_pix * (1 - rfl_sl_pix) * cf_arr[:, 0, idx_high[0][0], 0] + (
                         1 - dem_fac_pix) * rfl_fac_pix * rfl_sl_pix * cf_arr[:, 0, idx_high[0][0], 1] + dem_fac_pix * (
                         1 - rfl_fac_pix) * (1 - rfl_sl_pix) * cf_arr[:, 1, idx_low[0][-1], 0] + dem_fac_pix * (
                         1 - rfl_fac_pix) * rfl_sl_pix * cf_arr[:, 1, idx_low[0][-1], 1] + dem_fac_pix * rfl_fac_pix * (
                         1 - rfl_sl_pix) * cf_arr[:, 1, idx_high[0][0],
                                           0] + dem_fac_pix * rfl_fac_pix * rfl_sl_pix * cf_arr[:, 1, idx_high[0][0], 1]
566
567
568
569
570
571
572

        wv = cf_int[2] + alog_rat_wv_img[ind] * cf_int[1] + alog_rat_wv_img[ind] * alog_rat_wv_img[ind] * cf_int[0]
        wv_arr[ind] = wv

    wv_arr_img = np.reshape(wv_arr, (data.shape[:2]))

    return wv_arr_img