diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index 27801e23..3850971a 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -6,6 +6,12 @@ __updated__ = '2013-12-16' import numpy as np from parameter_core import Observable +class _Array(np.ndarray): + def __init__(self, dtype=float, buffer=None, offset=0, + strides=None, order=None, *args, **kwargs): + super(_Array, self).__init__(dtype=dtype, buffer=buffer, offset=offset, + strides=strides, order=order, *args, **kwargs) + class ObservableArray(np.ndarray, Observable): """ An ndarray which reports changes to its observers. @@ -14,16 +20,14 @@ class ObservableArray(np.ndarray, Observable): takes exactly one argument, which is this array itself. """ __array_priority__ = -1 # Never give back ObservableArray - def __new__(cls, input_array): + def __new__(cls, input_array, *a, **kw): if not isinstance(input_array, ObservableArray): - obj = np.atleast_1d(input_array).view(cls) + obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['C', 'W'])).view(cls) else: obj = input_array cls.__name__ = "ObservableArray\n " + super(ObservableArray, obj).__init__(*a, **kw) return obj - def __init__(self, *a, **kw): - super(ObservableArray, self).__init__(*a, **kw) - def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 3ebeb566..a2dc9514 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -3,7 +3,7 @@ import itertools import numpy -from parameter_core import OptimizationHandlable, Gradcheckable, adjust_name_for_printing +from parameter_core import OptimizationHandlable, adjust_name_for_printing from array_core import ObservableArray ###### printing @@ -43,13 +43,12 @@ class Param(OptimizationHandlable, ObservableArray): _fixes_ = None _parameters_ = [] def __new__(cls, name, input_array, default_constraint=None): - obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array)) + obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array, name=name, default_constraint=default_constraint)) cls.__name__ = "Param" obj._current_slice_ = (slice(obj.shape[0]),) obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim - obj._updated_ = False from lists_and_dicts import SetDict obj._tied_to_me_ = SetDict() obj._tied_to_ = [] @@ -86,7 +85,6 @@ class Param(OptimizationHandlable, ObservableArray): self._realshape_ = getattr(obj, '_realshape_', None) self._realsize_ = getattr(obj, '_realsize_', None) self._realndim_ = getattr(obj, '_realndim_', None) - self._updated_ = getattr(obj, '_updated_', None) self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, 'name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) @@ -121,14 +119,12 @@ class Param(OptimizationHandlable, ObservableArray): self._realndim_, self._tied_to_me_, self._tied_to_, - self._updated_, ) ) def __setstate__(self, state): super(Param, self).__setstate__(state[0]) state = list(state[1]) - self._updated_ = state.pop() self._tied_to_ = state.pop() self._tied_to_me_ = state.pop() self._realndim_ = state.pop() diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index a78cf02d..3917ed09 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -40,11 +40,9 @@ class Observable(object): as an observer. Every time the observable changes, it sends a notification with self as only argument to all its observers. """ - _updated = True def __init__(self, *args, **kwargs): + super(Observable, self).__init__() self._observer_callables_ = [] - def __del__(self, *args, **kwargs): - del self._observer_callables_ def add_observer(self, observer, callble, priority=0): self._insert_sorted(priority, observer, callble) @@ -161,7 +159,9 @@ class Parentable(object): """ _parent_ = None _parent_index_ = None - + def __init__(self, *args, **kwargs): + super(Parentable, self).__init__() + def has_parent(self): """ Return whether this parentable object currently has a parent. @@ -205,6 +205,7 @@ class Gradcheckable(Parentable): """ def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): """ Check the gradient of this parameter with respect to the highest parent's @@ -272,6 +273,9 @@ class Indexable(object): Enable enraveled indexes and offsets for this object. The raveled index of an object is the index for its parameters in a flattened int array. """ + def __init__(self, *a, **kw): + super(Indexable, self).__init__() + def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. @@ -314,7 +318,7 @@ class Constrainable(Nameable, Indexable): :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): - super(Constrainable, self).__init__(name=name, *a, **kw) + super(Constrainable, self).__init__(name=name, default_constraint=default_constraint, *a, **kw) self._default_constraint_ = default_constraint from index_operations import ParameterIndexOperations self.constraints = ParameterIndexOperations() @@ -534,8 +538,11 @@ class OptimizationHandlable(Constrainable, Observable): """ This enables optimization handles on an Object as done in GPy 0.4. - transformed: make sure the transformations and constraints etc are handled + `..._transformed`: make sure the transformations and constraints etc are handled """ + def __init__(self, name, default_constraint=None, *a, **kw): + super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw) + def transform(self): [np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] @@ -625,6 +632,24 @@ class OptimizationHandlable(Constrainable, Observable): [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] self._set_params_transformed(x) # makes sure all of the tied parameters get the same init (since there's only one prior object...) + #=========================================================================== + # For shared memory arrays. This does nothing in Param, but sets the memory + # for all parameterized objects + #=========================================================================== + def _propagate_param_grad(self, parray, garray): + pi_old_size = 0 + for pi in self._parameters_: + pislice = slice(pi_old_size, pi_old_size+pi.size) + + self._param_array_[pislice] = pi._param_array_.ravel()#, requirements=['C', 'W']).flat + self._gradient_array_[pislice] = pi._gradient_array_.ravel()#, requirements=['C', 'W']).flat + + pi._param_array_.data = parray[pislice].data + pi._gradient_array_.data = garray[pislice].data + + pi._propagate_param_grad(parray[pislice], garray[pislice]) + pi_old_size += pi.size + class Parameterizable(OptimizationHandlable): def __init__(self, *args, **kwargs): super(Parameterizable, self).__init__(*args, **kwargs) @@ -811,25 +836,24 @@ class Parameterizable(OptimizationHandlable): p._parent_index_ = i pslice = slice(old_size, old_size+p.size) - pi_old_size = old_size - for pi in p.flattened_parameters: - pislice = slice(pi_old_size, pi_old_size+pi.size) - - self._param_array_[pislice] = pi._param_array_.flat - self._gradient_array_[pislice] = pi._gradient_array_.flat - - pi._param_array_.data = self._param_array_[pislice].data - pi._gradient_array_.data = self._gradient_array_[pislice].data - - pi_old_size += pi.size + # first connect all children + p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice]) + + # then connect children to self + self._param_array_[pslice] = p._param_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') + self._gradient_array_[pslice] = p._gradient_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') + + if not p._param_array_.flags['C_CONTIGUOUS']: + import ipdb;ipdb.set_trace() p._param_array_.data = self._param_array_[pslice].data p._gradient_array_.data = self._gradient_array_[pslice].data self._param_slices_.append(pslice) + self._add_parameter_name(p, ignore_added_names=ignore_added_names) old_size += p.size - + #=========================================================================== # notification system #=========================================================================== diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 6d06018a..a98f0098 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -65,8 +65,8 @@ class Parameterized(Parameterizable, Pickleable): # **Never** call parameters_changed() yourself __metaclass__ = ParametersChangedMeta #=========================================================================== - def __init__(self, name=None, *a, **kw): - super(Parameterized, self).__init__(name=name, parent=None, parent_index=None, *a, **kw) + def __init__(self, name=None, parameters=[], *a, **kw): + super(Parameterized, self).__init__(name=name, *a, **kw) self._in_init_ = True self._parameters_ = ArrayList() self.size = sum(p.size for p in self._parameters_) @@ -76,6 +76,7 @@ class Parameterized(Parameterizable, Pickleable): self._param_slices_ = [] self._connect_parameters() del self._in_init_ + self.add_parameters(*parameters) def build_pydot(self, G=None): import pydot # @UnresolvedImport @@ -205,25 +206,29 @@ class Parameterized(Parameterizable, Pickleable): return found_params def __getitem__(self, name, paramlist=None): - if paramlist is None: - paramlist = self.grep_param_names(name) - if len(paramlist) < 1: raise AttributeError, name - if len(paramlist) == 1: - if isinstance(paramlist[-1], Parameterized): - paramlist = paramlist[-1].flattened_parameters - if len(paramlist) != 1: - return ParamConcatenation(paramlist) - return paramlist[-1] - return ParamConcatenation(paramlist) + if isinstance(name, (int, slice, tuple, np.ndarray)): + return self._param_array_[name] + else: + if paramlist is None: + paramlist = self.grep_param_names(name) + if len(paramlist) < 1: raise AttributeError, name + if len(paramlist) == 1: + if isinstance(paramlist[-1], Parameterized): + paramlist = paramlist[-1].flattened_parameters + if len(paramlist) != 1: + return ParamConcatenation(paramlist) + return paramlist[-1] + return ParamConcatenation(paramlist) def __setitem__(self, name, value, paramlist=None): if isinstance(name, (slice, tuple, np.ndarray)): self._param_array_[name] = value + self.notify_observers() else: try: param = self.__getitem__(name, paramlist) except AttributeError as a: raise a param[:] = value - + def __setattr__(self, name, val): # override the default behaviour, if setting a param, so broadcasting can by used if hasattr(self, '_parameters_'): diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 71921ab1..87b291a7 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -21,7 +21,7 @@ class VariationalPrior(Parameterized): updates the gradients for mean and variance **in place** """ raise NotImplementedError, "override this for variational inference of latent space" - + class NormalPrior(VariationalPrior): def KL_divergence(self, variational_posterior): var_mean = np.square(variational_posterior.mean).sum() @@ -63,20 +63,38 @@ class SpikeAndSlabPrior(VariationalPrior): class VariationalPosterior(Parameterized): - def __init__(self, means=None, variances=None, name=None, **kw): - super(VariationalPosterior, self).__init__(name=name, **kw) + def __init__(self, means=None, variances=None, name=None, *a, **kw): + super(VariationalPosterior, self).__init__(name=name, *a, **kw) self.mean = Param("mean", means) + self.variance = Param("variance", variances, Logexp()) self.ndim = self.mean.ndim self.shape = self.mean.shape - self.variance = Param("variance", variances, Logexp()) - self.add_parameters(self.mean, self.variance) self.num_data, self.input_dim = self.mean.shape + self.add_parameters(self.mean, self.variance) if self.has_uncertain_inputs(): assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion" def has_uncertain_inputs(self): return not self.variance is None + def __getitem__(self, s): + if isinstance(s, (int, slice, tuple, list, np.ndarray)): + import copy + n = self.__new__(self.__class__, self.name) + dc = self.__dict__.copy() + dc['mean'] = self.mean[s] + dc['variance'] = self.variance[s] + dc['_parameters_'] = copy.copy(self._parameters_) + n.__dict__.update(dc) + n._parameters_[dc['mean']._parent_index_] = dc['mean'] + n._parameters_[dc['variance']._parent_index_] = dc['variance'] + n.ndim = n.mean.ndim + n.shape = n.mean.shape + n.num_data = n.mean.shape[0] + n.input_dim = n.mean.shape[1] if n.ndim != 1 else 1 + return n + else: + return super(VariationalPrior, self).__getitem__(s) class NormalPosterior(VariationalPosterior): ''' @@ -107,7 +125,7 @@ class SpikeAndSlabPosterior(VariationalPosterior): super(SpikeAndSlabPosterior, self).__init__(means, variances, name) self.gamma = Param("binary_prob",binary_prob, Logistic(1e-10,1.-1e-10)) self.add_parameter(self.gamma) - + def plot(self, *args): """ Plot latent space X in 1D: diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 9ebb54a2..0e440ab7 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -324,14 +324,14 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) - likelihood_list = [Gaussian(x, normalize=True) for x in Ylist] - - k = kern.Linear(Q, ARD=True) + kern.Bias(Q, _np.exp(-2)) + kern.White(Q, _np.exp(-2)) - m = MRD(likelihood_list, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw) - m.ensure_default_constraints() - - for i, bgplvm in enumerate(m.bgplvms): - m['{}_noise'.format(i)] = bgplvm.likelihood.Y.var() / 500. + + #Ylist = [Ylist[0]] + k = [kern.Linear(Q, ARD=True) + kern.White(Q, 1e-4) for _ in range(len(Ylist))] + m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="", initz='permute', **kw) + + m['.*noise'] = [Y.var()/500. for Y in Ylist] + #for i, Y in enumerate(Ylist): + # m['.*Y_{}.*Gaussian.*noise'.format(i)] = Y.var(1) / 500. if optimize: print "Optimizing Model:" diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 77fe057d..6498664a 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -1,12 +1,9 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import sys import numpy as np import itertools -from linear import Linear from ...core.parameterization import Parameterized -from ...core.parameterization.param import Param from kern import Kern class Add(Kern): @@ -42,8 +39,11 @@ class Add(Kern): else: return sum([p.K(X[:, i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]) - def update_gradients_full(self, dL_dK, X): - [p.update_gradients_full(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + def update_gradients_full(self, dL_dK, X, X2=None): + if X2 is None: + [p.update_gradients_full(dL_dK, X[:,i_s], X2) for p, i_s in zip(self._parameters_, self.input_slices)] + else: + [p.update_gradients_full(dL_dK, X[:,i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -68,19 +68,18 @@ class Add(Kern): def psi0(self, Z, variational_posterior): - return np.sum([p.psi0(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) + return np.sum([p.psi0(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) def psi1(self, Z, variational_posterior): - return np.sum([p.psi1(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) + return np.sum([p.psi1(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) def psi2(self, Z, variational_posterior): - psi2 = np.sum([p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) + psi2 = np.sum([p.psi2(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) # compute the "cross" terms - from white import White + from static import White, Bias from rbf import RBF #from rbf_inv import RBFInv - from bias import Bias from linear import Linear #ffrom fixed import Fixed @@ -91,24 +90,20 @@ class Add(Kern): # rbf X bias #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)): elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)): - tmp = p2.psi1(Z[:,i2], mu[:,i2], S[:,i2]) + tmp = p2.psi1(Z[:,i2], variational_posterior[:, i_s]) psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)): elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)): - tmp = p1.psi1(Z[:,i1], mu[:,i1], S[:,i1]) + tmp = p1.psi1(Z[:,i1], variational_posterior[:, i_s]) psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) else: raise NotImplementedError, "psi2 cannot be computed for this kernel" return psi2 def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import White - from rbf import RBF - #from rbf_inv import RBFInv - #from bias import Bias - from linear import Linear - #ffrom fixed import Fixed - + from static import White, Bias + mu, S = variational_posterior.mean, variational_posterior.variance + for p1, is1 in zip(self._parameters_, self.input_slices): #compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2! @@ -121,20 +116,15 @@ class Add(Kern): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is1]) * 2. - p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import White - from rbf import RBF - #from rbf_inv import rbfinv - from bias import Bias - from linear import Linear - #ffrom fixed import fixed - + from static import White, Bias + target = np.zeros(Z.shape) for p1, is1 in zip(self._parameters_, self.input_slices): @@ -148,22 +138,17 @@ class Add(Kern): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - target += p1.gradients_z_variational(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + target += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) return target def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import white - from rbf import rbf - #from rbf_inv import rbfinv - #from bias import bias - from linear import linear - #ffrom fixed import fixed - - target_mu = np.zeros(mu.shape) - target_S = np.zeros(S.shape) + from static import White, Bias + + target_mu = np.zeros(variational_posterior.shape) + target_S = np.zeros(variational_posterior.shape) for p1, is1 in zip(self._parameters_, self.input_slices): #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! @@ -171,15 +156,15 @@ class Add(Kern): for p2, is2 in zip(self._parameters_, self.input_slices): if p2 is p1: continue - if isinstance(p2, white): + if isinstance(p2, White): continue - elif isinstance(p2, bias): + elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(z[:,is2], mu[:,is2], s[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], s[:,is1], z[:,is1]) + a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) target_mu += a target_S += b return target_mu, target_S diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 47166156..e1106275 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -89,7 +89,7 @@ class Kern(Parameterized): """ Returns the sensitivity for each dimension of this kernel. """ - return self.kern.input_sensitivity() + return np.zeros(self.input_dim) def __add__(self, other): """ Overloading of the '+' operator. for more control, see self.add """ diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 135e3f9e..f344357c 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -55,7 +55,7 @@ class White(Static): def psi2(self, Z, variational_posterior): return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) - def update_gradients_full(self, dL_dK, X): + def update_gradients_full(self, dL_dK, X, X2=None): self.variance.gradient = np.trace(dL_dK) def update_gradients_diag(self, dL_dKdiag, X): @@ -79,10 +79,10 @@ class Bias(Static): self.variance.gradient = dL_dK.sum() def update_gradients_diag(self, dL_dKdiag, X): - self.variance.gradient = dL_dK.sum() + self.variance.gradient = dL_dKdiag.sum() def psi2(self, Z, variational_posterior): - ret = np.empty((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + ret = np.empty((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) ret[:] = self.variance**2 return ret