mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-30 23:36:23 +02:00
domains added and class names in priors capitalized
This commit is contained in:
parent
3546650d15
commit
c7ac1ed9d8
7 changed files with 61 additions and 49 deletions
10
GPy/core/domains.py
Normal file
10
GPy/core/domains.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
'''
|
||||
Created on 4 Jun 2013
|
||||
|
||||
@author: maxz
|
||||
'''
|
||||
|
||||
REAL = 'real'
|
||||
POSITIVE = "positive"
|
||||
NEGATIVE = 'negative'
|
||||
BOUNDED = 'bounded'
|
||||
|
|
@ -41,16 +41,16 @@ class model(parameterised):
|
|||
Arguments
|
||||
---------
|
||||
which -- string, regexp, or integer array
|
||||
what -- instance of a prior class
|
||||
what -- instance of a Prior class
|
||||
|
||||
Notes
|
||||
-----
|
||||
Asserts that the prior is suitable for the constraint. If the
|
||||
Asserts that the Prior is suitable for the constraint. If the
|
||||
wrong constraint is in place, an error is raised. If no
|
||||
constraint is in place, one is added (warning printed).
|
||||
|
||||
For tied parameters, the prior will only be "counted" once, thus
|
||||
a prior object is only inserted on the first tied index
|
||||
For tied parameters, the Prior will only be "counted" once, thus
|
||||
a Prior object is only inserted on the first tied index
|
||||
"""
|
||||
|
||||
which = self.grep_param_names(which)
|
||||
|
|
@ -58,24 +58,24 @@ class model(parameterised):
|
|||
# check tied situation
|
||||
tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie) == set(which))]
|
||||
if len(tie_partial_matches):
|
||||
raise ValueError, "cannot place prior across partial ties"
|
||||
raise ValueError, "cannot place Prior across partial ties"
|
||||
tie_matches = [tie for tie in self.tied_indices if set(which) == set(tie) ]
|
||||
if len(tie_matches) > 1:
|
||||
raise ValueError, "cannot place prior across multiple ties"
|
||||
raise ValueError, "cannot place Prior across multiple ties"
|
||||
elif len(tie_matches) == 1:
|
||||
which = which[:1] # just place a prior object on the first parameter
|
||||
which = which[:1] # just place a Prior object on the first parameter
|
||||
|
||||
|
||||
# check constraints are okay
|
||||
|
||||
if what.domain is POSITIVE:
|
||||
constrained_positive_indices = [i for i, t in zip(self.constrained_indices, self.constraints) if t.domain == POSITIVE]
|
||||
constrained_positive_indices = [i for i, t in zip(self.constrained_indices, self.constraints) if t.domain is POSITIVE]
|
||||
if len(constrained_positive_indices):
|
||||
constrained_positive_indices = np.hstack(constrained_positive_indices)
|
||||
else:
|
||||
constrained_positive_indices = np.zeros(shape=(0,))
|
||||
bad_constraints = np.setdiff1d(self.all_constrained_indices(), constrained_positive_indices)
|
||||
assert not np.any(which[:, None] == bad_constraints), "constraint and prior incompatible"
|
||||
assert not np.any(which[:, None] == bad_constraints), "constraint and Prior incompatible"
|
||||
unconst = np.setdiff1d(which, constrained_positive_indices)
|
||||
if len(unconst):
|
||||
print "Warning: constraining parameters to be positive:"
|
||||
|
|
@ -83,11 +83,11 @@ class model(parameterised):
|
|||
print '\n'
|
||||
self.constrain_positive(unconst)
|
||||
elif what.domain is REAL:
|
||||
assert not np.any(which[:, None] == self.all_constrained_indices()), "constraint and prior incompatible"
|
||||
assert not np.any(which[:, None] == self.all_constrained_indices()), "constraint and Prior incompatible"
|
||||
else:
|
||||
raise ValueError, "prior not recognised"
|
||||
raise ValueError, "Prior not recognised"
|
||||
|
||||
# store the prior in a local list
|
||||
# store the Prior in a local list
|
||||
for w in which:
|
||||
self.priors[w] = what
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ class model(parameterised):
|
|||
raise AttributeError, "no parameter matches %s" % name
|
||||
|
||||
def log_prior(self):
|
||||
"""evaluate the prior"""
|
||||
"""evaluate the Prior"""
|
||||
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
|
||||
|
||||
def _log_prior_gradients(self):
|
||||
|
|
@ -129,17 +129,17 @@ class model(parameterised):
|
|||
def randomize(self):
|
||||
"""
|
||||
Randomize the model.
|
||||
Make this draw from the prior if one exists, else draw from N(0,1)
|
||||
Make this draw from the Prior if one exists, else draw from N(0,1)
|
||||
"""
|
||||
# first take care of all parameters (from N(0,1))
|
||||
x = self._get_params_transformed()
|
||||
x = np.random.randn(x.size)
|
||||
self._set_params_transformed(x)
|
||||
# now draw from prior where possible
|
||||
# now draw from Prior where possible
|
||||
x = self._get_params()
|
||||
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
|
||||
self._set_params(x)
|
||||
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
||||
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one Prior object...)
|
||||
|
||||
|
||||
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
|
||||
|
|
@ -279,7 +279,7 @@ class model(parameterised):
|
|||
|
||||
def Laplace_covariance(self):
|
||||
"""return the covariance matric of a Laplace approximatino at the current (stationary) point"""
|
||||
# TODO add in the prior contributions for MAP estimation
|
||||
# TODO add in the Prior contributions for MAP estimation
|
||||
# TODO fix the hessian for tied, constrained and fixed components
|
||||
if hasattr(self, 'log_likelihood_hessian'):
|
||||
A = -self.log_likelihood_hessian()
|
||||
|
|
@ -318,14 +318,14 @@ class model(parameterised):
|
|||
log_prior = self.log_prior()
|
||||
obj_funct = '\nLog-likelihood: {0:.3e}'.format(log_like)
|
||||
if len(''.join(strs)) != 0:
|
||||
obj_funct += ', Log prior: {0:.3e}, LL+prior = {0:.3e}'.format(log_prior, log_like + log_prior)
|
||||
obj_funct += ', Log Prior: {0:.3e}, LL+Prior = {0:.3e}'.format(log_prior, log_like + log_prior)
|
||||
obj_funct += '\n\n'
|
||||
s[0] = obj_funct + s[0]
|
||||
s[0] += "|{h:^{col}}".format(h='Prior', col=width)
|
||||
s[1] += '-' * (width + 1)
|
||||
|
||||
for p in range(2, len(strs) + 2):
|
||||
s[p] += '|{prior:^{width}}'.format(prior=strs[p - 2], width=width)
|
||||
s[p] += '|{Prior:^{width}}'.format(Prior=strs[p - 2], width=width)
|
||||
|
||||
return '\n'.join(s)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from ..util.linalg import pdinv
|
|||
from GPy.core.domains import REAL, POSITIVE
|
||||
import warnings
|
||||
|
||||
class prior:
|
||||
class Prior:
|
||||
domain = None
|
||||
def pdf(self, x):
|
||||
return np.exp(self.lnpdf(x))
|
||||
|
|
@ -22,7 +22,7 @@ class prior:
|
|||
pb.plot(xx, self.pdf(xx), 'r', linewidth=2)
|
||||
|
||||
|
||||
class Gaussian(prior):
|
||||
class Gaussian(Prior):
|
||||
"""
|
||||
Implementation of the univariate Gaussian probability function, coupled with random variables.
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ class Gaussian(prior):
|
|||
return np.random.randn(n) * self.sigma + self.mu
|
||||
|
||||
|
||||
class log_Gaussian(prior):
|
||||
class LogGaussian(Prior):
|
||||
"""
|
||||
Implementation of the univariate *log*-Gaussian probability function, coupled with random variables.
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ class log_Gaussian(prior):
|
|||
return np.exp(np.random.randn(n) * self.sigma + self.mu)
|
||||
|
||||
|
||||
class multivariate_Gaussian:
|
||||
class MultivariateGaussian:
|
||||
"""
|
||||
Implementation of the multivariate Gaussian probability function, coupled with random variables.
|
||||
|
||||
|
|
@ -133,20 +133,10 @@ class multivariate_Gaussian:
|
|||
|
||||
|
||||
def gamma_from_EV(E, V):
|
||||
"""
|
||||
Creates an instance of a gamma prior by specifying the Expected value(s)
|
||||
and Variance(s) of the distribution.
|
||||
|
||||
:param E: expected value
|
||||
:param V: variance
|
||||
|
||||
"""
|
||||
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
|
||||
a = np.square(E) / V
|
||||
b = E / V
|
||||
return gamma(a, b)
|
||||
return Gamma.from_EV(E, V)
|
||||
|
||||
class gamma(prior):
|
||||
class Gamma(Prior):
|
||||
"""
|
||||
Implementation of the Gamma probability function, coupled with random variables.
|
||||
|
||||
|
|
@ -184,8 +174,20 @@ class gamma(prior):
|
|||
|
||||
def rvs(self, n):
|
||||
return np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
|
||||
@staticmethod
|
||||
def from_EV(E, V):
|
||||
"""
|
||||
Creates an instance of a Gamma Prior by specifying the Expected value(s)
|
||||
and Variance(s) of the distribution.
|
||||
|
||||
:param E: expected value
|
||||
:param V: variance
|
||||
"""
|
||||
a = np.square(E) / V
|
||||
b = E / V
|
||||
return Gamma(a, b)
|
||||
|
||||
class inverse_gamma(prior):
|
||||
class inverse_gamma(Prior):
|
||||
"""
|
||||
Implementation of the inverse-Gamma probability function, coupled with random variables.
|
||||
|
||||
|
|
|
|||
|
|
@ -156,8 +156,8 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
# beta = 1. # TODO: betareset!!
|
||||
nsuccess = 0
|
||||
elif success:
|
||||
gamma = np.dot(gradold - gradnew, gradnew) / (mu)
|
||||
d = gamma * d - gradnew
|
||||
Gamma = np.dot(gradold - gradnew, gradnew) / (mu)
|
||||
d = Gamma * d - gradnew
|
||||
else:
|
||||
# If we get here, then we haven't terminated in the given number of
|
||||
# iterations.
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ class EP(likelihood):
|
|||
"""
|
||||
Posterior approximation: q(f|y) = N(f| mu, Sigma)
|
||||
Sigma = Diag + P*R.T*R*P.T + K
|
||||
mu = w + P*gamma
|
||||
mu = w + P*Gamma
|
||||
"""
|
||||
mu = np.zeros(self.N)
|
||||
LLT = Kmm.copy()
|
||||
|
|
@ -255,10 +255,10 @@ class EP(likelihood):
|
|||
"""
|
||||
Posterior approximation: q(f|y) = N(f| mu, Sigma)
|
||||
Sigma = Diag + P*R.T*R*P.T + K
|
||||
mu = w + P*gamma
|
||||
mu = w + P*Gamma
|
||||
"""
|
||||
self.w = np.zeros(self.N)
|
||||
self.gamma = np.zeros(M)
|
||||
self.Gamma = np.zeros(M)
|
||||
mu = np.zeros(self.N)
|
||||
P = P0.copy()
|
||||
R = R0.copy()
|
||||
|
|
@ -311,10 +311,10 @@ class EP(likelihood):
|
|||
RTR = np.dot(R.T,np.dot(np.eye(M) - Delta_tau/(1.+Delta_tau*Sigma_diag[i]) * np.dot(Rp_i,Rp_i.T),R))
|
||||
R = jitchol(RTR).T
|
||||
self.w[i] += (Delta_v - Delta_tau*self.w[i])*dii/dtd1
|
||||
self.gamma += (Delta_v - Delta_tau*mu[i])*np.dot(RTR,P[i,:].T)
|
||||
self.Gamma += (Delta_v - Delta_tau*mu[i])*np.dot(RTR,P[i,:].T)
|
||||
RPT = np.dot(R,P.T)
|
||||
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
|
||||
mu = self.w + np.dot(P,self.gamma)
|
||||
mu = self.w + np.dot(P,self.Gamma)
|
||||
self.iterations += 1
|
||||
#Sigma recomptutation with Cholesky decompositon
|
||||
Iplus_Dprod_i = 1./(1.+ Diag0 * self.tau_tilde)
|
||||
|
|
@ -326,8 +326,8 @@ class EP(likelihood):
|
|||
RPT = np.dot(R,P.T)
|
||||
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
|
||||
self.w = Diag * self.v_tilde
|
||||
self.gamma = np.dot(R.T, np.dot(RPT,self.v_tilde))
|
||||
mu = self.w + np.dot(P,self.gamma)
|
||||
self.Gamma = np.dot(R.T, np.dot(RPT,self.v_tilde))
|
||||
mu = self.w + np.dot(P,self.Gamma)
|
||||
epsilon_np1 = sum((self.tau_tilde-self.np1[-1])**2)/self.N
|
||||
epsilon_np2 = sum((self.v_tilde-self.np2[-1])**2)/self.N
|
||||
self.np1.append(self.tau_tilde.copy())
|
||||
|
|
|
|||
|
|
@ -197,8 +197,8 @@ class FITC(sparse_GP):
|
|||
self.RPT = np.dot(self.R,self.P.T)
|
||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||
self.w = self.Diag * self.likelihood.v_tilde
|
||||
self.gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.gamma)
|
||||
self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.Gamma)
|
||||
|
||||
"""
|
||||
Make a prediction for the generalized FITC model
|
||||
|
|
|
|||
|
|
@ -99,8 +99,8 @@ class generalized_FITC(sparse_GP):
|
|||
self.RPT = np.dot(self.R,self.P.T)
|
||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||
self.w = self.Diag * self.likelihood.v_tilde
|
||||
self.gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.gamma)
|
||||
self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.Gamma)
|
||||
|
||||
# Remove extra term from dL_dpsi1
|
||||
self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1*self.likelihood.precision.flatten().reshape(1,self.N))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue