# Adapted from Heylen's Multilinear Mixing Model demo:
# https://sites.google.com/site/robheylenresearch/code
# R. Heylen, P. Scheunders, "A multilinear mixing model for nonlinear
# spectral unmixing,"
# IEEE Tran. Geosci. Remote Sens., vol. PP, no. 99., pp. 1-12, 2015
# Translated to Python by Ryan B Anderson, 2022
import copy
import numpy as np
import scipy.io
import scipy.optimize
[docs]
def model_LMM(E, a):
    return E @ a 
[docs]
def model_GBM(E, a):
    p = E.shape[1]
    x = E @ a[0:p]
    c = p
    for i in range(p):
        for j in range(i + 1, p):
            x = x + a[i] * a[j] * a[c] * E[:, i] * E[:, j]
            c = c + 1
    return x 
[docs]
def opt_func(a, x, E, method):
    methods = ["LMM", "GBM"]
    if method in methods:
        if method == "LMM":
            return np.sum((x - model_LMM(E, a)) ** 2)
        if method == "GBM":
            return np.sum((x - model_GBM(E, a)) ** 2)
    else:
        print("Method " + str(method) + " not recognized")
        return 
[docs]
class LMM:
    """
    Linear Mixture Model
    x = spectra (samples x wvl)
    E = endmembers (samples x wvl)
    method: Which solver scipy.minimize should use. Since this is
    constrained minimization, the options are:
        SLSQP = Sequential Least Squares Programming
        trust-constr = Trust region (tends to be slow)
    maxiter = Limits the number of iterations
    """
    def __init__(self, method="SLSQP", maxiter=100):
        self.method = method
        self.maxiter = maxiter
        self.a_LMM = None
        self.x_LMM = None
[docs]
    def unmix(self, x, E):
        x = copy.deepcopy(x).T
        E = copy.deepcopy(E).T
        d, N = x.shape
        p = E.shape[1]
        a_LMM = np.zeros((p, N))
        x_LMM = np.zeros((d, N))
        for i in range(N):
            a_ini = np.ones(p) / p  # Initialize with mean of endmembers
            """
            The function we want to minimize is the reconstruction error between
            the observed and modeled spectrum. The abundances are sum-to-one
            constrained, and constrained to the [0,1] interval each
            """
            nl_const = scipy.optimize.NonlinearConstraint(lambda a: np.sum(a), 1, 1)
            bound_obj = scipy.optimize.Bounds([0] * len(a_ini), [1] * len(a_ini))
            a_LMM[:, i] = scipy.optimize.minimize(
                opt_func,
                a_ini,
                args=(x[:, i], E, "LMM"),
                constraints=nl_const,
                bounds=bound_obj,
                method=self.method,
                options={"maxiter": self.maxiter},
            )["x"]
            x_LMM[:, i] = model_LMM(E, a_LMM[:, i])
            pass
        self.a_LMM = a_LMM
        self.x_LMM = x_LMM
        return a_LMM.T 
 
[docs]
class GBM:
    """
    Generalized bilinear model
    x = spectra (samples x wvl)
    E = endmembers (samples x wvl)
    method: Which solver scipy.minimize should use. Since this is
    constrained minimization, the options are:
        SLSQP = Sequential Least Squares Programming
        trust-constr = Trust region (tends to be slow)
    maxiter = Limits the number of iterations
    random_starts = If specified, the mixture of endmembers will be
    initialized this many times with random proportions
                    of each end member, and the results will be
                    averaged. May help to avoid local minima,
                    but slows down the calculation.
    """
    def __init__(self, method="SLSQP", maxiter=100, random_starts=0):
        self.method = method
        self.maxiter = maxiter
        self.random_starts = random_starts
        self.a_GBM = None
        self.x_GBM = None
[docs]
    def unmix(self, x, E):
        x = copy.deepcopy(x).T
        E = copy.deepcopy(E).T
        d, N = x.shape
        p = E.shape[1]
        a_GBM = np.zeros(
            (int(p + p * (p - 1) / 2), N)
        )  # p first values are the abundances, next p*(
        # p-1)/2 are the gamma hyperparameters of the GBM
        x_GBM = np.zeros((d, N))
        if self.random_starts != 0:  # if specified, create an array of
            # randomized starting values
            rand_vals = np.random.random((p, self.random_starts))
            rand_vals = rand_vals / np.sum(rand_vals, axis=0)
            zero_vals = np.zeros((int(p * (p - 1) / 2), self.random_starts))
            a_ini = np.vstack((rand_vals, zero_vals))
            starts = self.random_starts
        else:  # otherwise start with even proportions of all endmembers
            a_ini = np.hstack((np.ones(p) / p, np.zeros(int(p * (p - 1) / 2))))
            a_ini = np.expand_dims(a_ini, 1)  # pad the dimesions so it works the
            # same as for the multiple random starts
            starts = 1
        # constrain values of a to between 0 and 1
        bound_obj = scipy.optimize.Bounds(
            [0] * len(a_ini[:, 0]), [1] * len(a_ini[:, 0])
        )
        for i in range(N):
            a_GBM_starts_list = []  # this list will store the results of
            # all the random starts
            for start in range(starts):
                a_start = a_ini[:, start]
                a_tmp = scipy.optimize.minimize(
                    opt_func,
                    a_start,
                    args=(x[:, i], E, "GBM"),
                    constraints=scipy.optimize.NonlinearConstraint(
                        lambda a: np.sum(a[:p]), 1, 1
                    ),
                    bounds=bound_obj,
                    method=self.method,
                    options={"maxiter": self.maxiter},
                )["x"]
                a_GBM_starts_list.append(a_tmp)
            a_GBM[:, i] = np.mean(
                np.array(a_GBM_starts_list), axis=0
            )  # take the average of the
            # multiple starts to get the final values of a
            x_GBM[:, i] = model_GBM(E, a_GBM[:, i])
        self.a_GBM = a_GBM[0:p, :]
        self.x_GBM = x_GBM
        return self.a_GBM.T