mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-27 22:06:22 +02:00
Merge branch 'params' of github.com:SheffieldML/GPy into params
This commit is contained in:
commit
32deb2085d
10 changed files with 177 additions and 54 deletions
|
|
@ -12,6 +12,7 @@ class ListArray(np.ndarray):
|
|||
WARNING: This overrides the functionality of x==y!!!
|
||||
Use numpy.equal(x,y) for element-wise equality testing.
|
||||
"""
|
||||
|
||||
def __new__(cls, input_array):
|
||||
obj = np.asanyarray(input_array).view(cls)
|
||||
return obj
|
||||
|
|
@ -27,24 +28,6 @@ class ParamList(list):
|
|||
return False
|
||||
|
||||
pass
|
||||
class C(np.ndarray):
|
||||
__array_priority__ = 1.
|
||||
def __new__(cls, array):
|
||||
obj = array.view(cls)
|
||||
return obj
|
||||
#def __array_finalize__(self, obj):
|
||||
# #print 'finalize'
|
||||
# return obj
|
||||
def __array_prepare__(self, out_arr, context):
|
||||
#print 'prepare'
|
||||
while type(out_arr) is C:
|
||||
out_arr = out_arr.base
|
||||
return out_arr
|
||||
def __array_wrap__(self, out_arr, context):
|
||||
#print 'wrap', type(self), type(out_arr), context
|
||||
while type(out_arr) is C:
|
||||
out_arr = out_arr.base
|
||||
return out_arr
|
||||
|
||||
class ObservableArray(ListArray, Observable):
|
||||
"""
|
||||
|
|
@ -63,25 +46,168 @@ class ObservableArray(ListArray, Observable):
|
|||
# see InfoArray.__array_finalize__ for comments
|
||||
if obj is None: return
|
||||
self._observers_ = getattr(obj, '_observers_', None)
|
||||
|
||||
def __setitem__(self, s, val, update=True):
|
||||
super(ObservableArray, self).__setitem__(s, val)
|
||||
if update:
|
||||
self._notify_observers()
|
||||
# if self.ndim:
|
||||
# if not np.all(np.equal(self[s], val)):
|
||||
# super(ObservableArray, self).__setitem__(s, val)
|
||||
# if update:
|
||||
# self._notify_observers()
|
||||
# else:
|
||||
# if not np.all(np.equal(self, val)):
|
||||
# super(ObservableArray, self).__setitem__(Ellipsis, val)
|
||||
# if update:
|
||||
# self._notify_observers()
|
||||
def __getslice__(self, start, stop):
|
||||
return self.__getitem__(slice(start, stop))
|
||||
def __setslice__(self, start, stop, val):
|
||||
return self.__setitem__(slice(start, stop), val)
|
||||
|
||||
def __copy__(self, *args):
|
||||
return ObservableArray(self.base.base.copy(*args))
|
||||
def copy(self, *args):
|
||||
return self.__copy__(*args)
|
||||
|
||||
def __ror__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ror__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ilshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ilshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __irshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__irshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rrshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rrshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ixor__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ixor__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rxor__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rxor__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
|
||||
def __rdivmod__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rdivmod__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __radd__(self, *args, **kwargs):
|
||||
r = np.ndarray.__radd__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rdiv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rdiv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rtruediv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rtruediv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ipow__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ipow__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rmul__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rmul__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rpow__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rpow__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rsub__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rsub__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ifloordiv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ifloordiv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __isub__(self, *args, **kwargs):
|
||||
r = np.ndarray.__isub__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ior__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ior__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __itruediv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__itruediv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __idiv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__idiv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rfloordiv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rfloordiv__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __iand__(self, *args, **kwargs):
|
||||
r = np.ndarray.__iand__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __imod__(self, *args, **kwargs):
|
||||
r = np.ndarray.__imod__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __iadd__(self, *args, **kwargs):
|
||||
r = np.ndarray.__iadd__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __imul__(self, *args, **kwargs):
|
||||
r = np.ndarray.__imul__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@ class Normal(Parameterized):
|
|||
'''
|
||||
def __init__(self, means, variances, name='latent space'):
|
||||
Parameterized.__init__(self, name=name)
|
||||
self.means = Param("mean", means)
|
||||
self.variances = Param('variance', variances, Logexp())
|
||||
self.add_parameters(self.means, self.variances)
|
||||
self.mean = Param("mean", means)
|
||||
self.variance = Param('variance', variances, Logexp())
|
||||
self.add_parameters(self.mean, self.variance)
|
||||
|
||||
def plot(self, *args):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -55,10 +55,7 @@ class SparseGP(GP):
|
|||
self.add_parameter(self.Z, index=0)
|
||||
|
||||
def parameters_changed(self):
|
||||
Xvar = self.X_variance
|
||||
if self.X_variance is not None:
|
||||
Xvar = param_to_array(self.X_variance)
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, param_to_array(self.X), Xvar, param_to_array(self.Z), self.likelihood, self.Y)
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y)
|
||||
|
||||
#The derivative of the bound wrt the inducing inputs Z
|
||||
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
import numpy as _np
|
||||
default_seed = _np.random.seed(123344)
|
||||
|
||||
def bgplvm_test_model(seed=default_seed, optimize=False, verbose=1, plot=False):
|
||||
def bgplvm_test_model(seed=default_seed, optimize=False, verbose=1, plot=False, output_dim=1e4):
|
||||
"""
|
||||
model for testing purposes. Samples from a GP with rbf kernel and learns
|
||||
the samples with a new kernel. Normally not for optimization, just model cheking
|
||||
|
|
@ -18,7 +18,7 @@ def bgplvm_test_model(seed=default_seed, optimize=False, verbose=1, plot=False):
|
|||
input_dim = 3
|
||||
else:
|
||||
input_dim = 1
|
||||
output_dim = 25
|
||||
output_dim = output_dim
|
||||
|
||||
# generate GPLVM-like data
|
||||
X = _np.random.rand(num_inputs, input_dim)
|
||||
|
|
@ -27,7 +27,7 @@ def bgplvm_test_model(seed=default_seed, optimize=False, verbose=1, plot=False):
|
|||
#+ GPy.kern.white(input_dim, 0.01)
|
||||
)
|
||||
K = k.K(X)
|
||||
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, output_dim).T
|
||||
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, (output_dim,)).T
|
||||
|
||||
# k = GPy.kern.rbf_inv(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
|
||||
k = GPy.kern.linear(input_dim)# + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||
|
|
@ -266,11 +266,10 @@ def bgplvm_simulation(optimize=True, verbose=1,
|
|||
Y = Ylist[0]
|
||||
k = kern.linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
||||
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
|
||||
m.Gaussian_noise = Y.var() / 100.
|
||||
|
||||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
m.optimize('scg', messages=verbose, max_iters=max_iters,
|
||||
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
||||
gtol=.05)
|
||||
if plot:
|
||||
m.q.plot("BGPLVM Latent Space 1D")
|
||||
|
|
|
|||
|
|
@ -2,9 +2,8 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dpotri, symmetrify
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
|
||||
import numpy as np
|
||||
from ...util.linalg import dtrtri
|
||||
from ...util.caching import Cacher
|
||||
from ...util.misc import param_to_array
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
|
@ -85,7 +84,7 @@ class VarDTC(object):
|
|||
tmp = tmp.T
|
||||
# no backsubstitution because of bound explosion on tr(A) if not...
|
||||
LmInv, _ = dtrtri(Lm, lower=1)
|
||||
A = LmInv.T.dot(psi2_beta.dot(LmInv))
|
||||
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
||||
#print A.sum()
|
||||
else:
|
||||
if het_noise:
|
||||
|
|
@ -97,6 +96,7 @@ class VarDTC(object):
|
|||
|
||||
# factor B
|
||||
B = np.eye(num_inducing) + A
|
||||
self.A = A
|
||||
LB = jitchol(B)
|
||||
|
||||
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
|
||||
|
|
|
|||
|
|
@ -69,8 +69,8 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
|
|||
success = True # Force calculation of directional derivs.
|
||||
nsuccess = 0 # nsuccess counts number of successes.
|
||||
beta = 1.0 # Initial scale parameter.
|
||||
betamin = 1.0e-60 # Lower bound on scale.
|
||||
betamax = 1.0e50 # Upper bound on scale.
|
||||
betamin = 1.0e-15 # Lower bound on scale.
|
||||
betamax = 1.0e15 # Upper bound on scale.
|
||||
status = "Not converged"
|
||||
|
||||
flog = [fold]
|
||||
|
|
|
|||
|
|
@ -71,17 +71,17 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
|
||||
def parameters_changed(self):
|
||||
super(BayesianGPLVM, self).parameters_changed()
|
||||
#self._log_marginal_likelihood -= self.KL_divergence()
|
||||
self._log_marginal_likelihood -= self.KL_divergence()
|
||||
|
||||
dL_dmu, dL_dS = self.dL_dmuS()
|
||||
|
||||
# dL:
|
||||
self.q.means.gradient = dL_dmu
|
||||
self.q.variances.gradient = dL_dS
|
||||
self.q.mean.gradient = dL_dmu
|
||||
self.q.variance.gradient = dL_dS
|
||||
|
||||
# dKL:
|
||||
#self.q.means.gradient -= self.X
|
||||
#self.q.variances.gradient -= (1. - (1. / (self.X_variance))) * 0.5
|
||||
self.q.mean.gradient -= self.X
|
||||
self.q.variance.gradient -= (1. - (1. / (self.X_variance))) * 0.5
|
||||
|
||||
def plot_latent(self, plot_inducing=True, *args, **kwargs):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class SparseGPRegression(SparseGP):
|
|||
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(input_dim) + kern.white(input_dim, variance=1e-3)
|
||||
kernel = kern.rbf(input_dim)# + kern.white(input_dim, variance=1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import pylab as pb
|
|||
import Tango
|
||||
from matplotlib.textpath import TextPath
|
||||
from matplotlib.transforms import offset_copy
|
||||
from ...kern.parts.linear import Linear
|
||||
|
||||
|
||||
def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False):
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ def plot(parameterized, fignum=None, ax=None, colors=None):
|
|||
|
||||
"""
|
||||
if ax is None:
|
||||
fig = pb.figure(num=fignum, figsize=(8, min(12, (2 * parameterized.means.shape[1]))))
|
||||
fig = pb.figure(num=fignum, figsize=(8, min(12, (2 * parameterized.mean.shape[1]))))
|
||||
if colors is None:
|
||||
colors = pb.gca()._get_lines.color_cycle
|
||||
pb.clf()
|
||||
else:
|
||||
colors = iter(colors)
|
||||
plots = []
|
||||
means, variances = param_to_array(parameterized.means, parameterized.variances)
|
||||
means, variances = param_to_array(parameterized.mean, parameterized.variance)
|
||||
x = np.arange(means.shape[0])
|
||||
for i in range(means.shape[1]):
|
||||
if ax is None:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue