mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
Resolved merge conflict
This commit is contained in:
commit
ebc0b6e1a5
7 changed files with 16 additions and 14 deletions
|
|
@ -213,7 +213,7 @@ class Model(Parameterized):
|
||||||
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
|
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
|
||||||
return obj_f, self.obj_grads
|
return obj_f, self.obj_grads
|
||||||
|
|
||||||
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=False, **kwargs):
|
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, **kwargs):
|
||||||
"""
|
"""
|
||||||
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
|
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
|
||||||
|
|
||||||
|
|
@ -402,7 +402,7 @@ class Model(Parameterized):
|
||||||
model_details = [['<b>Model</b>', self.name + '<br>'],
|
model_details = [['<b>Model</b>', self.name + '<br>'],
|
||||||
['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))],
|
['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))],
|
||||||
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
|
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
|
||||||
["<b>Updates</b>", '{}<br>'.format(self._updates)],
|
["<b>Updates</b>", '{}<br>'.format(self._update_on)],
|
||||||
]
|
]
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
to_print = ["""<style type="text/css">
|
to_print = ["""<style type="text/css">
|
||||||
|
|
@ -419,7 +419,7 @@ class Model(Parameterized):
|
||||||
model_details = [['Name', self.name],
|
model_details = [['Name', self.name],
|
||||||
['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
|
['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
|
||||||
["Number of Parameters", '{}'.format(self.size)],
|
["Number of Parameters", '{}'.format(self.size)],
|
||||||
["Updates", '{}'.format(self._updates)],
|
["Updates", '{}'.format(self._update_on)],
|
||||||
]
|
]
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
max_len = reduce(lambda a, b: max(len(b[0]), a), model_details, 0)
|
max_len = reduce(lambda a, b: max(len(b[0]), a), model_details, 0)
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@ class Updateable(Observable):
|
||||||
A model can be updated or not.
|
A model can be updated or not.
|
||||||
Make sure updates can be switched on and off.
|
Make sure updates can be switched on and off.
|
||||||
"""
|
"""
|
||||||
_updates = True
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(Updateable, self).__init__(*args, **kwargs)
|
super(Updateable, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -149,7 +149,7 @@ class SparseGP(GP):
|
||||||
|
|
||||||
var_ = mdot(la.T, tmp, la)
|
var_ = mdot(la.T, tmp, la)
|
||||||
p0 = psi0_star[i]
|
p0 = psi0_star[i]
|
||||||
t = self.posterior.woodbury_inv
|
t = np.atleast_3d(self.posterior.woodbury_inv)
|
||||||
t2 = np.trace(t.T.dot(psi2_star), axis1=1, axis2=2)
|
t2 = np.trace(t.T.dot(psi2_star), axis1=1, axis2=2)
|
||||||
|
|
||||||
if full_cov:
|
if full_cov:
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,8 @@ def exponents(fnow, current_grad):
|
||||||
return np.sign(exps) * np.log10(exps).astype(int)
|
return np.sign(exps) * np.log10(exps).astype(int)
|
||||||
|
|
||||||
class VerboseOptimization(object):
|
class VerboseOptimization(object):
|
||||||
def __init__(self, model, opt, maxiters, verbose=True, current_iteration=0, ipython_notebook=False):
|
def __init__(self, model, opt, maxiters, verbose=False, current_iteration=0, ipython_notebook=True):
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.ipython_notebook = ipython_notebook
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.model = model
|
self.model = model
|
||||||
self.iteration = current_iteration
|
self.iteration = current_iteration
|
||||||
|
|
@ -26,13 +25,18 @@ class VerboseOptimization(object):
|
||||||
|
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
if self.ipython_notebook:
|
try:
|
||||||
from IPython.display import display
|
from IPython.display import display
|
||||||
from IPython.html.widgets import FloatProgressWidget, HTMLWidget, ContainerWidget
|
from IPython.html.widgets import FloatProgressWidget, HTMLWidget, ContainerWidget
|
||||||
self.text = HTMLWidget()
|
self.text = HTMLWidget()
|
||||||
self.progress = FloatProgressWidget()
|
self.progress = FloatProgressWidget()
|
||||||
self.model_show = HTMLWidget()
|
self.model_show = HTMLWidget()
|
||||||
|
self.ipython_notebook = ipython_notebook
|
||||||
|
except:
|
||||||
|
# Not in Ipython notebook
|
||||||
|
self.ipython_notebook = False
|
||||||
|
|
||||||
|
if self.ipython_notebook:
|
||||||
self.text.set_css('width', '100%')
|
self.text.set_css('width', '100%')
|
||||||
#self.progress.set_css('width', '100%')
|
#self.progress.set_css('width', '100%')
|
||||||
|
|
||||||
|
|
@ -142,4 +146,5 @@ class VerboseOptimization(object):
|
||||||
if not self.ipython_notebook:
|
if not self.ipython_notebook:
|
||||||
print()
|
print()
|
||||||
print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start))
|
print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start))
|
||||||
print()
|
print('Optimization status: {0:.5g}'.format(self.status))
|
||||||
|
print()
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ class VarDTC(LatentFunctionInference):
|
||||||
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
|
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
const_jitter = 1e-6
|
const_jitter = 1e-8
|
||||||
def __init__(self, limit=1):
|
def __init__(self, limit=1):
|
||||||
#self._YYTfactor_cache = caching.cache()
|
#self._YYTfactor_cache = caching.cache()
|
||||||
from ...util.caching import Cacher
|
from ...util.caching import Cacher
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
||||||
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
|
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
const_jitter = 1e-6
|
const_jitter = 1e-8
|
||||||
def __init__(self, batchsize=None, limit=1, mpi_comm=None):
|
def __init__(self, batchsize=None, limit=1, mpi_comm=None):
|
||||||
|
|
||||||
self.batchsize = batchsize
|
self.batchsize = batchsize
|
||||||
|
|
|
||||||
|
|
@ -138,8 +138,6 @@ class Test(ListDictTestCase):
|
||||||
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
self.assertIsNot(par.gradient_full, pcopy.gradient_full)
|
||||||
self.assertTrue(pcopy.checkgrad())
|
self.assertTrue(pcopy.checkgrad())
|
||||||
self.assert_(np.any(pcopy.gradient!=0.0))
|
self.assert_(np.any(pcopy.gradient!=0.0))
|
||||||
pcopy.optimize('bfgs')
|
|
||||||
par.optimize('bfgs')
|
|
||||||
np.testing.assert_allclose(pcopy.param_array, par.param_array, atol=1e-6)
|
np.testing.assert_allclose(pcopy.param_array, par.param_array, atol=1e-6)
|
||||||
par.randomize()
|
par.randomize()
|
||||||
with tempfile.TemporaryFile('w+b') as f:
|
with tempfile.TemporaryFile('w+b') as f:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue