mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 05:52:38 +02:00
added priors behaviour as intended and issue #38 closed and fixed
This commit is contained in:
parent
29790e327a
commit
75f4e26b23
5 changed files with 16 additions and 16 deletions
|
|
@ -136,6 +136,7 @@ def gamma_from_EV(E, V):
|
||||||
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
|
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
|
||||||
return Gamma.from_EV(E, V)
|
return Gamma.from_EV(E, V)
|
||||||
|
|
||||||
|
|
||||||
class Gamma(Prior):
|
class Gamma(Prior):
|
||||||
"""
|
"""
|
||||||
Implementation of the Gamma probability function, coupled with random variables.
|
Implementation of the Gamma probability function, coupled with random variables.
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
||||||
success = True # Force calculation of directional derivs.
|
success = True # Force calculation of directional derivs.
|
||||||
nsuccess = 0 # nsuccess counts number of successes.
|
nsuccess = 0 # nsuccess counts number of successes.
|
||||||
beta = 1.0 # Initial scale parameter.
|
beta = 1.0 # Initial scale parameter.
|
||||||
betamin = 1.0e-15 # Lower bound on scale.
|
betamin = 1.0e-60 # Lower bound on scale.
|
||||||
betamax = 1.0e100 # Upper bound on scale.
|
betamax = 1.0e100 # Upper bound on scale.
|
||||||
status = "Not converged"
|
status = "Not converged"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -192,7 +192,7 @@ class opt_SGD(Optimizer):
|
||||||
if self.model.N == 0 or Y.std() == 0.0:
|
if self.model.N == 0 or Y.std() == 0.0:
|
||||||
return 0, step, self.model.N
|
return 0, step, self.model.N
|
||||||
|
|
||||||
self.model.likelihood._bias = Y.mean()
|
self.model.likelihood._offset = Y.mean()
|
||||||
self.model.likelihood._scale = Y.std()
|
self.model.likelihood._scale = Y.std()
|
||||||
self.model.likelihood.set_data(Y)
|
self.model.likelihood.set_data(Y)
|
||||||
# self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision
|
# self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision
|
||||||
|
|
@ -219,9 +219,9 @@ class opt_SGD(Optimizer):
|
||||||
self.restore_constraints(ci)
|
self.restore_constraints(ci)
|
||||||
|
|
||||||
self.model.grads[j] = fp
|
self.model.grads[j] = fp
|
||||||
# restore likelihood _bias and _scale, otherwise when we call set_data(y) on
|
# restore likelihood _offset and _scale, otherwise when we call set_data(y) on
|
||||||
# the next feature, it will get normalized with the mean and std of this one.
|
# the next feature, it will get normalized with the mean and std of this one.
|
||||||
self.model.likelihood._bias = 0
|
self.model.likelihood._offset = 0
|
||||||
self.model.likelihood._scale = 1
|
self.model.likelihood._scale = 1
|
||||||
|
|
||||||
return f, step, self.model.N
|
return f, step, self.model.N
|
||||||
|
|
@ -266,7 +266,7 @@ class opt_SGD(Optimizer):
|
||||||
|
|
||||||
self.model.likelihood.YYT = 0
|
self.model.likelihood.YYT = 0
|
||||||
self.model.likelihood.trYYT = 0
|
self.model.likelihood.trYYT = 0
|
||||||
self.model.likelihood._bias = 0.0
|
self.model.likelihood._offset = 0.0
|
||||||
self.model.likelihood._scale = 1.0
|
self.model.likelihood._scale = 1.0
|
||||||
|
|
||||||
N, Q = self.model.X.shape
|
N, Q = self.model.X.shape
|
||||||
|
|
|
||||||
|
|
@ -19,12 +19,12 @@ class Gaussian(likelihood):
|
||||||
|
|
||||||
# normalization
|
# normalization
|
||||||
if normalize:
|
if normalize:
|
||||||
self._bias = data.mean(0)[None, :]
|
self._offset = data.mean(0)[None, :]
|
||||||
self._scale = data.std(0)[None, :]
|
self._scale = data.std(0)[None, :]
|
||||||
# Don't scale outputs which have zero variance to zero.
|
# Don't scale outputs which have zero variance to zero.
|
||||||
self._scale[np.nonzero(self._scale == 0.)] = 1.0e-3
|
self._scale[np.nonzero(self._scale == 0.)] = 1.0e-3
|
||||||
else:
|
else:
|
||||||
self._bias = np.zeros((1, self.D))
|
self._offset = np.zeros((1, self.D))
|
||||||
self._scale = np.ones((1, self.D))
|
self._scale = np.ones((1, self.D))
|
||||||
|
|
||||||
self.set_data(data)
|
self.set_data(data)
|
||||||
|
|
@ -36,7 +36,7 @@ class Gaussian(likelihood):
|
||||||
self.data = data
|
self.data = data
|
||||||
self.N, D = data.shape
|
self.N, D = data.shape
|
||||||
assert D == self.D
|
assert D == self.D
|
||||||
self.Y = (self.data - self._bias) / self._scale
|
self.Y = (self.data - self._offset) / self._scale
|
||||||
if D > self.N:
|
if D > self.N:
|
||||||
self.YYT = np.dot(self.Y, self.Y.T)
|
self.YYT = np.dot(self.Y, self.Y.T)
|
||||||
self.trYYT = np.trace(self.YYT)
|
self.trYYT = np.trace(self.YYT)
|
||||||
|
|
@ -66,7 +66,7 @@ class Gaussian(likelihood):
|
||||||
"""
|
"""
|
||||||
Un-normalize the prediction and add the likelihood variance, then return the 5%, 95% interval
|
Un-normalize the prediction and add the likelihood variance, then return the 5%, 95% interval
|
||||||
"""
|
"""
|
||||||
mean = mu * self._scale + self._bias
|
mean = mu * self._scale + self._offset
|
||||||
if full_cov:
|
if full_cov:
|
||||||
if self.D > 1:
|
if self.D > 1:
|
||||||
raise NotImplementedError, "TODO"
|
raise NotImplementedError, "TODO"
|
||||||
|
|
|
||||||
|
|
@ -218,20 +218,19 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
return means, covars
|
return means, covars
|
||||||
|
|
||||||
|
|
||||||
def plot_X_1d(self, fig=None, axes=None, fig_num="LVM mu S 1d", colors=None):
|
def plot_X_1d(self, ax=None, fignum=None, colors=None):
|
||||||
"""
|
"""
|
||||||
Plot latent space X in 1D:
|
Plot latent space X in 1D:
|
||||||
|
|
||||||
-if fig is given, create Q subplots in fig and plot in these
|
-if fig is given, create Q subplots in fig and plot in these
|
||||||
-if axes is given plot Q 1D latent space plots of X into each `axis`
|
-if ax is given plot Q 1D latent space plots of X into each `axis`
|
||||||
-if neither fig nor axes is given create a figure with fig_num and plot in there
|
-if neither fig nor ax is given create a figure with fignum and plot in there
|
||||||
|
|
||||||
colors:
|
colors:
|
||||||
colors of different latent space dimensions Q
|
colors of different latent space dimensions Q
|
||||||
"""
|
"""
|
||||||
import pylab
|
import pylab
|
||||||
if fig is None and axes is None:
|
fig = pylab.figure(num=fignum, figsize=(8, min(12, (2 * self.X.shape[1]))))
|
||||||
fig = pylab.figure(num=fig_num, figsize=(8, min(12, (2 * self.X.shape[1]))))
|
|
||||||
if colors is None:
|
if colors is None:
|
||||||
colors = pylab.gca()._get_lines.color_cycle
|
colors = pylab.gca()._get_lines.color_cycle
|
||||||
pylab.clf()
|
pylab.clf()
|
||||||
|
|
@ -240,10 +239,10 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
plots = []
|
plots = []
|
||||||
x = np.arange(self.X.shape[0])
|
x = np.arange(self.X.shape[0])
|
||||||
for i in range(self.X.shape[1]):
|
for i in range(self.X.shape[1]):
|
||||||
if axes is None:
|
if ax is None:
|
||||||
ax = fig.add_subplot(self.X.shape[1], 1, i + 1)
|
ax = fig.add_subplot(self.X.shape[1], 1, i + 1)
|
||||||
else:
|
else:
|
||||||
ax = axes[i]
|
ax = ax[i]
|
||||||
ax.plot(self.X, c='k', alpha=.3)
|
ax.plot(self.X, c='k', alpha=.3)
|
||||||
plots.extend(ax.plot(x, self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
|
plots.extend(ax.plot(x, self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
|
||||||
ax.fill_between(x,
|
ax.fill_between(x,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue