From 23b53374490fbb67405c8fc5f3cba804e0713664 Mon Sep 17 00:00:00 2001 From: Fergus Cooper Date: Tue, 19 Feb 2019 15:05:09 +0000 Subject: [PATCH 01/12] #719 --- pints/tests/test_log_likelihoods.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index fe208ffb7..a85e029bb 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -451,6 +451,7 @@ def test_sum_of_independent_log_pdfs(self): self.assertTrue(np.all(3 * dy1 == dy)) def test_ar1(self): + # single outputs model = pints.toy.ConstantModel(1) parameters = [0] times = np.asarray([1, 2, 3]) @@ -461,6 +462,16 @@ def test_ar1(self): self.assertAlmostEqual( log_likelihood([0, 0.5, 5]), -19.706737485492436) + # multiple outputs + model = pints.toy.ConstantModel(4) + parameters = [0, 0, 0, 0] + times = np.arange(1, 4) + model.simulate(parameters, times) + values = np.asarray([[3.5, 7.6, 8.5, 3.4], + [1.1, -10.3, 15.6, 5.5], + [-10, -30.5, -5, 7.6]]) + problem = pints.MultiOutputProblem(model, times, values) + def test_arma11(self): model = pints.toy.ConstantModel(1) parameters = [0] From 9c4743735eaf51baa767c70a338049e6ad92a517 Mon Sep 17 00:00:00 2001 From: Fergus Cooper Date: Tue, 19 Feb 2019 15:25:35 +0000 Subject: [PATCH 02/12] #719 Update --- pints/_log_likelihoods.py | 2 +- pints/tests/test_log_likelihoods.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index 9ca1c0031..7c5208986 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -22,7 +22,7 @@ class AR1LogLikelihood(pints.ProblemLogLikelihood): -\\frac{N}{2}\log{2\pi} -N\log{\sigma} -\\frac{1}{2\sigma^2} - \sum_{i=1}^N{(\\epsilon_i x_i - \\rho \\epsilon_{i-1} )^2} + \sum_{i=2}^N{(\\epsilon_i x_i - \\rho \\epsilon_{i-1} )^2} where diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index a85e029bb..c30a4efbc 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -471,6 +471,7 @@ def test_ar1(self): [1.1, -10.3, 15.6, 5.5], [-10, -30.5, -5, 7.6]]) problem = pints.MultiOutputProblem(model, times, values) + log_likelihood = pints.AR1LogLikelihood(problem) def test_arma11(self): model = pints.toy.ConstantModel(1) From 93abbcef93c4174807ee52102b7fd59d459418d8 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 15:33:47 +0000 Subject: [PATCH 03/12] Corrected documentation for AR1 log-likelihood --- pints/_log_likelihoods.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index 7c5208986..950d0729f 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -18,10 +18,10 @@ class AR1LogLikelihood(pints.ProblemLogLikelihood): Calculates a log-likelihood assuming AR1 noise model .. math:: - \log{L(\\theta, \sigma|\\boldsymbol{x})} = + \log{L(\\theta, \sigma'|\\boldsymbol{x})} = -\\frac{N}{2}\log{2\pi} - -N\log{\sigma} - -\\frac{1}{2\sigma^2} + -N\log{\sigma'} + -\\frac{1}{2\sigma'^2} \sum_{i=2}^N{(\\epsilon_i x_i - \\rho \\epsilon_{i-1} )^2} where @@ -29,6 +29,8 @@ class AR1LogLikelihood(pints.ProblemLogLikelihood): .. math:: \\epsilon_i = x_i - f_i(\\theta) + and :math:`sigma' = \\frac{sigma} \\sqrt{1-\\rho^2}`. + Arguments: @@ -490,4 +492,3 @@ def __init__(self, problem): 'The class `pints.KnownNoiseLogLikelihood` is deprecated.' ' Please use `pints.GaussianLogLikelihood` instead.') super(UnknownNoiseLogLikelihood, self).__init__(problem) - From 9614a49141db6e6ffdf0f7d4404ef4429ac27856 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 16:15:21 +0000 Subject: [PATCH 04/12] Update test_log_likelihoods.py --- pints/tests/test_log_likelihoods.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index c30a4efbc..ac136cdd5 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -469,9 +469,22 @@ def test_ar1(self): model.simulate(parameters, times) values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5], - [-10, -30.5, -5, 7.6]]) + [-10, -30.5, -5, 7.6], + [-12, -10.1, -4, 2.3]]) problem = pints.MultiOutputProblem(model, times, values) log_likelihood = pints.AR1LogLikelihood(problem) + # Test AR1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, sigma=1) + + # AR1Logpdf((7.6,-10.3,-30.5, -10.1)|mean=0, rho=-0.25, sigma=3) + + # AR1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, sigma=10) + + # AR1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, sigma=2) + # = -109.4752924909364 -75.32717801724532 + # + self.assertAlmostEqual( + log_likelihood(parameters + [0.5, 1.0, + -0.25, 3.0, + 0.9, 10.0, + 0.0, 1.0]), + -47.83720347766945) def test_arma11(self): model = pints.toy.ConstantModel(1) From c55281fcb472209921f00520950c8e75e079c6f7 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 16:47:19 +0000 Subject: [PATCH 05/12] Corrected error with multidimensional output for AR1 - was previously pulling out the wrong sigma and rho for multi output problems --- pints/_log_likelihoods.py | 7 +++++-- pints/tests/test_log_likelihoods.py | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index 950d0729f..6b167837e 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -56,8 +56,11 @@ def __init__(self, problem): self._logn = 0.5 * (self._nt) * np.log(2 * np.pi) def __call__(self, x): - rho = np.asarray(x[-2 * self._no:-self._no]) - sigma = np.asarray(x[-self._no:]) * np.sqrt(1 - rho**2) + m = 2 * self._no + parameters = x[-m:] + rho = np.asarray(parameters[0::2]) + sigma = np.asarray(parameters[1::2]) + sigma = np.asarray(sigma) * np.sqrt(1 - rho**2) error = self._values - self._problem.evaluate(x[:-2 * self._no]) autocorr_error = error[1:] - rho * error[:-1] return np.sum(- self._logn - self._nt * np.log(sigma) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index ac136cdd5..f75ece820 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -477,14 +477,14 @@ def test_ar1(self): # AR1Logpdf((7.6,-10.3,-30.5, -10.1)|mean=0, rho=-0.25, sigma=3) + # AR1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, sigma=10) + # AR1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, sigma=2) - # = -109.4752924909364 -75.32717801724532 - # + # = -109.4752924909364 -93.58199 - 18.3833.. + # -16.4988 self.assertAlmostEqual( log_likelihood(parameters + [0.5, 1.0, -0.25, 3.0, 0.9, 10.0, - 0.0, 1.0]), - -47.83720347766945) + 0.0, 2.0]), + -237.93936126949615) def test_arma11(self): model = pints.toy.ConstantModel(1) From 2a963fddbe14ae592a946dedff74836ffa4ec2c2 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 17:18:19 +0000 Subject: [PATCH 06/12] ARMA11 tested - Added multi-output test for ARMA11 - Corrected parameter referencing in ARMA11 code - Corrected docs for ARMA11 code --- pints/_log_likelihoods.py | 23 +++++++++++++++++------ pints/tests/test_log_likelihoods.py | 26 +++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index 6b167837e..9a1aee111 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -76,14 +76,22 @@ class ARMA11LogLikelihood(pints.ProblemLogLikelihood): -\\frac{N}{2}\log{2\pi} -N\log{\sigma} -\\frac{1}{2\sigma^2} - \sum_{i=1}^N{(\\epsilon_i x_i - \\rho \\epsilon_{i-1} - - \\phi \\nu(t-1))^2} + \sum_{i=3}^N{(\\nu_i - \\phi \\nu_{i-1})^2} where .. math:: + \\nu_i = \\epsilon_i - \\rho \\epsilon_{i-1} + + and + + ..math:: \\epsilon_i = x_i - f_i(\\theta) + and + + .. math:: + \\sigma = \\sigma\\sqrt{\\frac{1-\\rho^2}{1 + 2\\phi\\rho + \\phi^2}}` Arguments: @@ -109,13 +117,16 @@ def __init__(self, problem): self._logn = 0.5 * (self._nt) * np.log(2 * np.pi) def __call__(self, x): - rho = np.asarray(x[-3 * self._no:-2 * self._no]) - phi = np.asarray(x[-2 * self._no:-self._no]) + m = 3 * self._no + parameters = x[-m:] + rho = np.asarray(parameters[0::3]) + phi = np.asarray(parameters[1::3]) + sigma = np.asarray(parameters[2::3]) sigma = ( - np.asarray(x[-self._no:]) * + sigma * np.sqrt((1.0 - rho**2) / (1.0 + 2.0 * phi * rho + phi**2)) ) - error = self._values - self._problem.evaluate(x[:-3 * self._no]) + error = self._values - self._problem.evaluate(x[:-m]) v = error[1:] - rho * error[:-1] autocorr_error = v[1:] - phi * v[:-1] return np.sum(- self._logn - self._nt * np.log(sigma) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index f75ece820..ad5664eef 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -465,7 +465,7 @@ def test_ar1(self): # multiple outputs model = pints.toy.ConstantModel(4) parameters = [0, 0, 0, 0] - times = np.arange(1, 4) + times = np.arange(1, 5) model.simulate(parameters, times) values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5], @@ -497,6 +497,30 @@ def test_arma11(self): self.assertAlmostEqual( log_likelihood([0, 0.9, -0.4, 1]), -171.53031588534171) + # multiple outputs + model = pints.toy.ConstantModel(4) + parameters = [0, 0, 0, 0] + times = np.arange(1, 5) + model.simulate(parameters, times) + values = np.asarray([[3.5, 7.6, 8.5, 3.4], + [1.1, -10.3, 15.6, 5.5], + [-10, -30.5, -5, 7.6], + [-12, -10.1, -4, 2.3]]) + problem = pints.MultiOutputProblem(model, times, values) + log_likelihood = pints.AR1LogLikelihood(problem) + # ARMA1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, phi=0.34 sigma=1) + + # ARMA1Logpdf((7.6,-10.3,-30.5, -10.1)| + # mean=0, rho=-0.25, phi=0.1, sigma=3) + + # ARMA1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, phi=0.0, sigma=10) + + # ARMA1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, phi=0.9, sigma=2) + # = -116.009 -74.94 -14.32 -8.88 + self.assertAlmostEqual( + log_likelihood(parameters + [0.5, 0.34, 1.0, + -0.25, 0.1, 3.0, + 0.9, 0.0, 10.0, + 0.0, 0.9, 2.0]), + -214.17034137601107) + if __name__ == '__main__': unittest.main() From 2ca2fd5dced3141244310014d5386f38864beab9 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 17:19:13 +0000 Subject: [PATCH 07/12] Updated ARMA(1,1) docstrings --- pints/_log_likelihoods.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index 9a1aee111..b8cfd0d72 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -69,7 +69,7 @@ def __call__(self, x): class ARMA11LogLikelihood(pints.ProblemLogLikelihood): """ - Calculates a log-likelihood assuming AR1 noise model + Calculates a log-likelihood assuming ARMA(1,1) noise model. .. math:: \log{L(\\theta, \sigma|\\boldsymbol{x})} = From 8f346a3d7ce8fa2b566cd6fe55782b7045440418 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 19:07:00 +0000 Subject: [PATCH 08/12] Corrected errorS with Gaussian log-likelihood! - The sensitivities were previously not correct for multi-output problems - Added multi-output value-based tests for multi-output problems - Corrected mistake with constant_model forward sensitivities (it should be a diagonal matrix rather than one with increasing diagonals) - Corrected errors with sensitivities for Gaussian log-likelihood for multiple output problems: formerly we were returning an array of theta derivatives followed by a single sigma derivative. This is incorrect as there are three sigma parameters - one for each output --- pints/_log_likelihoods.py | 12 ++++---- pints/tests/test_log_likelihoods.py | 46 ++++++++++++++++++++++++++++- pints/toy/_constant_model.py | 6 ++-- 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/pints/_log_likelihoods.py b/pints/_log_likelihoods.py index b8cfd0d72..7acd8181e 100644 --- a/pints/_log_likelihoods.py +++ b/pints/_log_likelihoods.py @@ -336,29 +336,27 @@ def __call__(self, x): def evaluateS1(self, x): """ See :meth:`LogPDF.evaluateS1()`. """ - sigma = float(np.asarray(x[-self._no:])) + sigma = np.asarray(x[-self._no:]) # Evaluate, and get residuals y, dy = self._problem.evaluateS1(x[:-self._no]) # Reshape dy, in case we're working with a single-output problem - dy = dy.reshape(self._nt, self._no, self._n_parameters - 1) + dy = dy.reshape(self._nt, self._no, self._n_parameters - self._no) # Note: Must be (data - simulation), sign now matters! r = self._values - y # Calculate log-likelihood - L = np.sum(-self._logn - self._nt * np.log(sigma) - - (1.0 / (2 * sigma**2)) * np.sum(r**2, axis=0)) + L = self.__call__(x) # Calculate derivatives in the model parameters dL = np.sum( (sigma**(-2.0) * np.sum((r.T * dy.T).T, axis=0).T).T, axis=0) # Calculate derivative wrt sigma - dsigma = np.sum(-self._nt / sigma + - sigma**(-3.0) * np.sum(r**2, axis=0)) - dL = np.concatenate((dL, np.array([dsigma]))) + dsigma = -self._nt / sigma + sigma**(-3.0) * np.sum(r**2, axis=0) + dL = np.concatenate((dL, np.array(list(dsigma)))) # Return return L, dL diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index ad5664eef..b71897679 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -136,6 +136,50 @@ def test_gaussian_log_likelihoods_single_output(self): l2 = pints.UnknownNoiseLogLikelihood(problem) self.assertIsInstance(l2, pints.GaussianLogLikelihood) + # test multiple output unknown noise + model = pints.toy.ConstantModel(3) + parameters = [0, 0, 0] + times = [1, 2, 3, 4] + values = model.simulate([0, 0, 0], times) + org_values = [[10.7, 3.5, 3.8], + [1.1, 3.2, -1.4], + [9.3, 0.0, 4.5], + [1.2, -3, -10]] + problem = pints.MultiOutputProblem(model, times, org_values) + log_likelihood = pints.GaussianLogLikelihood(problem) + # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) + + # Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) + + # Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12) + # = -50.5088... + self.assertAlmostEqual( + log_likelihood(parameters + [3.5, 1, 12]), + -50.508848609684783 + ) + l, dl = log_likelihood.evaluateS1(parameters + [3.5, 1, 12]) + self.assertAlmostEqual(l, -50.508848609684783) + self.assertAlmostEqual(dl[0], 1.820408163265306) + self.assertAlmostEqual(dl[1], 3.7000000000000002) + self.assertAlmostEqual(dl[2], -0.021527777777777774) + self.assertAlmostEqual(dl[3], 3.6065306122448981) + self.assertAlmostEqual(dl[4], 27.490000000000002) + self.assertAlmostEqual(dl[5], -0.25425347222222222) + + # test multiple output model dimensions of sensitivities + d = 20 + model = pints.toy.ConstantModel(d) + parameters = [0 for i in range(d)] + times = [1, 2, 3, 4] + values = model.simulate(parameters, times) + org_values = np.ones((len(times), d)) + extra_params = np.ones(d).tolist() + problem = pints.MultiOutputProblem(model, times, org_values) + log_likelihood = pints.GaussianLogLikelihood(problem) + l = log_likelihood(parameters + extra_params) + l1, dl = log_likelihood.evaluateS1(parameters + extra_params) + self.assertTrue(np.array_equal(len(dl), + len(parameters + extra_params))) + self.assertEqual(l, l1) + def test_known_noise_gaussian_single_S1(self): """ Simple tests for single known noise Gaussian log-likelihood with @@ -507,7 +551,7 @@ def test_arma11(self): [-10, -30.5, -5, 7.6], [-12, -10.1, -4, 2.3]]) problem = pints.MultiOutputProblem(model, times, values) - log_likelihood = pints.AR1LogLikelihood(problem) + log_likelihood = pints.ARMA11LogLikelihood(problem) # ARMA1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, phi=0.34 sigma=1) + # ARMA1Logpdf((7.6,-10.3,-30.5, -10.1)| # mean=0, rho=-0.25, phi=0.1, sigma=3) + diff --git a/pints/toy/_constant_model.py b/pints/toy/_constant_model.py index 15cb6698a..a6ef87881 100644 --- a/pints/toy/_constant_model.py +++ b/pints/toy/_constant_model.py @@ -106,8 +106,6 @@ def simulateS1(self, parameters, times): # [df2/dp1, df2/dp2]] # i.e. # [[1, 0], - # [0, 2]] - # i.e. np.diag(self._r) - dy = np.tile(np.diag(self._r), (len(times), 1, 1)) + # [0, 1]] + dy = np.tile(np.diag(np.ones(len(self._r))), (len(times), 1, 1)) return (y, dy) - From 00511e8c1bc8fc830edcff604d1f991f52e237b9 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 19:15:10 +0000 Subject: [PATCH 09/12] GaussianKnownSigmaLogLikelihood testing - Added value-based tests for Gaussian with known sigma --- pints/tests/test_log_likelihoods.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index b71897679..cdcbf70d7 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -112,6 +112,7 @@ def test_gaussian_log_likelihoods_single_output(self): l, dl = log_likelihood.evaluateS1([3]) self.assertAlmostEqual(l, -23.777369746461702) self.assertAlmostEqual(dl[0], -9.3333333333333321) + self.assertEqual(len(dl), 1) # unknown noise value checks log_likelihood = pints.GaussianLogLikelihood(problem) @@ -276,6 +277,33 @@ def test_known_noise_gaussian_single_S1(self): plt.show() + # value-based tests (single output tests are above) + # multiple outputs + model = pints.toy.ConstantModel(3) + parameters = [0, 0, 0] + times = [1, 2, 3, 4] + values = model.simulate(parameters, times) + org_values = [[10.7, 3.5, 3.8], + [1.1, 3.2, -1.4], + [9.3, 0.0, 4.5], + [1.2, -3, -10]] + problem = pints.MultiOutputProblem(model, times, org_values) + sigma = [3.5, 1, 12] + log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma) + # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) + + # Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) + + # Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12) + # = -50.5088... + self.assertAlmostEqual( + log_likelihood(parameters), + -50.508848609684783 + ) + l, dl = log_likelihood.evaluateS1(parameters) + self.assertAlmostEqual(l, -50.508848609684783) + self.assertAlmostEqual(dl[0], 1.820408163265306) + self.assertAlmostEqual(dl[1], 3.7000000000000002) + self.assertAlmostEqual(dl[2], -0.021527777777777774) + def test_student_t_log_likelihood_single(self): """ Single-output test for Student-t noise log-likelihood methods From 9dc3ebc727e3e0ba05de16a09339d75e445c6c96 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 19:26:33 +0000 Subject: [PATCH 10/12] Added value-based tests for ScaledLogLikelihood --- pints/tests/test_log_likelihoods.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pints/tests/test_log_likelihoods.py b/pints/tests/test_log_likelihoods.py index cdcbf70d7..d91340be1 100755 --- a/pints/tests/test_log_likelihoods.py +++ b/pints/tests/test_log_likelihoods.py @@ -78,6 +78,31 @@ def test_scaled_log_likelihood(self): self.assertAlmostEqual(dy1[0] / dy3[0], 1) self.assertAlmostEqual(dy1[1] / dy3[1], 1) + # test values of log-likelihood and derivatives + model = pints.toy.ConstantModel(3) + times = [1, 2, 3, 4] + parameters = [0, 0, 0] + org_values = [[10.7, 3.5, 3.8], + [1.1, 3.2, -1.4], + [9.3, 0.0, 4.5], + [1.2, -3, -10]] + problem = pints.MultiOutputProblem(model, times, org_values) + f2 = pints.GaussianKnownSigmaLogLikelihood(problem, [3.5, 1, 12]) + log_likelihood = pints.ScaledLogLikelihood(f2) + # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) + + # Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) + + # Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12) + # = -50.5088... + self.assertAlmostEqual( + log_likelihood(parameters), + -50.508848609684783 / 12.0 + ) + l, dl = log_likelihood.evaluateS1(parameters) + self.assertAlmostEqual(l, -50.508848609684783 / 12.0) + self.assertAlmostEqual(dl[0], 1.820408163265306 / 12.0) + self.assertAlmostEqual(dl[1], 3.7000000000000002 / 12.0) + self.assertAlmostEqual(dl[2], -0.021527777777777774 / 12.0) + def test_gaussian_log_likelihoods_single_output(self): """ Single-output test for known/unknown noise log-likelihood methods From 27464e153d33393380001d38c5d906898e3071c4 Mon Sep 17 00:00:00 2001 From: ben18785 Date: Tue, 19 Feb 2019 19:43:08 +0000 Subject: [PATCH 11/12] Corrected tests - constant model test was wrong previously - corrected incorrect derivative-based tests for the constant model --- pints/tests/test_error_measures.py | 52 +++++++++++++------------- pints/tests/test_toy_constant_model.py | 6 +-- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/pints/tests/test_error_measures.py b/pints/tests/test_error_measures.py index 77a9abc35..8c74ee3b9 100755 --- a/pints/tests/test_error_measures.py +++ b/pints/tests/test_error_measures.py @@ -160,9 +160,9 @@ def test_mean_squared_error_multi(self): self.assertTrue(np.all(y[0, :] == [1, 4])) self.assertTrue(np.all(y[1, :] == [1, 4])) self.assertTrue(np.all(y[2, :] == [1, 4])) - self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 2]])) + self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 1]])) # Check residuals rx = y - np.array(values) @@ -185,9 +185,9 @@ def test_mean_squared_error_multi(self): # Residuals are: [[0, 0], [-1, -3], [-2, -6]] # Derivatives are: [[1, 0], [0, 2]] # dex1 is: (2 / nt / no) * (0 - 1 - 2) * 1 = (1 / 3) * -3 * 1 = -1 - # dex2 is: (2 / nt / no) * (0 - 3 - 6) * 2 = (1 / 3) * -9 * 2 = -6 + # dex2 is: (2 / nt / no) * (0 - 3 - 6) * 1 = (1 / 3) * -9 * 1 = -3 self.assertEqual(dex[0], -1) - self.assertEqual(dex[1], -6) + self.assertEqual(dex[1], -3) def test_mean_squared_error_weighted(self): """ Tests :class:`pints.MeanSquaredError` with weighted outputs. """ @@ -224,9 +224,9 @@ def test_mean_squared_error_weighted(self): self.assertTrue(np.all(y[0, :] == [1, 4])) self.assertTrue(np.all(y[1, :] == [1, 4])) self.assertTrue(np.all(y[2, :] == [1, 4])) - self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 2]])) + self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 1]])) # Check residuals rx = y - np.array(values) @@ -251,11 +251,11 @@ def test_mean_squared_error_weighted(self): # dex1 is: (2 / nt / no) * (0 - 1 - 2) * 1 * 1 # = (1 / 3) * -3 * 1 * 1 # = -1 - # dex2 is: (2 / nt / no) * (0 - 3 - 6) * 2 * 2 - # = (1 / 3) * -9 * 2 * 2 - # = -12 + # dex2 is: (2 / nt / no) * (0 - 3 - 6) * 1 * 2 + # = (1 / 3) * -9 * 1 * 2 + # = -6 self.assertEqual(dex[0], -1) - self.assertEqual(dex[1], -12) + self.assertEqual(dex[1], -6) def test_probability_based_error(self): """ Tests :class:`pints.ProbabilityBasedError`. """ @@ -350,9 +350,9 @@ def test_sum_of_squares_error_multi(self): self.assertTrue(np.all(y[0, :] == [1, 4])) self.assertTrue(np.all(y[1, :] == [1, 4])) self.assertTrue(np.all(y[2, :] == [1, 4])) - self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 2]])) + self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 1]])) # Check residuals rx = y - np.array(values) @@ -375,9 +375,9 @@ def test_sum_of_squares_error_multi(self): # Residuals are: [[0, 0], [-1, -3], [-2, -6]] # Derivatives are: [[1, 0], [0, 2]] # dex1 is: 2 * (0 - 1 - 2) * 1 = 2 * -3 * 1 = -6 - # dex2 is: 2 * (0 - 3 - 6) * 2 = 2 * -9 * 2 = -36 + # dex2 is: 2 * (0 - 3 - 6) * 2 = 2 * -9 * 1 = -18 self.assertEqual(dex[0], -6) - self.assertEqual(dex[1], -36) + self.assertEqual(dex[1], -18) def test_sum_of_squares_error_weighted(self): """ Tests :class:`pints.MeanSquaredError` with weighted outputs. """ @@ -408,15 +408,15 @@ def test_sum_of_squares_error_weighted(self): x = [1, 2] # Model outputs are 3 times [1, 4] - # Model derivatives are 3 times [[1, 0], [0, 2]] + # Model derivatives are 3 times [[1, 0], [0, 1]] y, dy = p.evaluateS1(x) self.assertTrue(np.all(y == p.evaluate(x))) self.assertTrue(np.all(y[0, :] == [1, 4])) self.assertTrue(np.all(y[1, :] == [1, 4])) self.assertTrue(np.all(y[2, :] == [1, 4])) - self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 2]])) - self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 2]])) + self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 1]])) + self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 1]])) # Check residuals rx = y - np.array(values) @@ -437,15 +437,15 @@ def test_sum_of_squares_error_weighted(self): self.assertEqual(dex.shape, (2, )) # Residuals are: [[0, 0], [-1, -3], [-2, -6]] - # Derivatives are: [[1, 0], [0, 2]] + # Derivatives are: [[1, 0], [0, 1]] # dex1 is: 2 * (0 - 1 - 2) * 1 * 1 # = 2 * -3 * 1 * 1 # = -6 - # dex2 is: 2 * (0 - 3 - 6) * 2 * 2 - # = 2 * -9 * 2 * 2 - # = -72 + # dex2 is: 2 * (0 - 3 - 6) * 2 * 1 + # = 2 * -9 * 2 * 1 + # = -36 self.assertEqual(dex[0], -6) - self.assertEqual(dex[1], -72) + self.assertEqual(dex[1], -36) def test_sum_of_errors(self): """ Tests :class:`pints.SumOfErrors`. """ diff --git a/pints/tests/test_toy_constant_model.py b/pints/tests/test_toy_constant_model.py index bb80c5298..7fe24a0e4 100755 --- a/pints/tests/test_toy_constant_model.py +++ b/pints/tests/test_toy_constant_model.py @@ -174,11 +174,11 @@ def test_derivatives(self): self.assertEqual(dy.shape, (3, 2, 2)) dmx = np.array( [[[1, 0], # dx1/dp1 = 1, dx1/dp2 = 0 - [0, 2]], # dx2/dp2 = 0, dx2/dp2 = 2 + [0, 1]], # dx2/dp2 = 0, dx2/dp2 = 1 [[1, 0], - [0, 2]], + [0, 1]], [[1, 0], - [0, 2]]] + [0, 1]]] ) self.assertTrue(np.all(dy == dmx)) From 0206af7492c9a195f68d4ce0e55acad1aecbc41e Mon Sep 17 00:00:00 2001 From: Ben Lambert Date: Tue, 19 Feb 2019 23:24:19 +0000 Subject: [PATCH 12/12] Tweaks to docstring and derivative function - tweak to docstring for constant model - tweak to derivative function to simplify for constant model - tweak to test comments for error measures to reflect changes to the derivative function for the constant model --- pints/tests/test_error_measures.py | 10 +++++----- pints/toy/_constant_model.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pints/tests/test_error_measures.py b/pints/tests/test_error_measures.py index 8c74ee3b9..a77af9232 100755 --- a/pints/tests/test_error_measures.py +++ b/pints/tests/test_error_measures.py @@ -154,7 +154,7 @@ def test_mean_squared_error_multi(self): x = [1, 2] # Model outputs are 3 times [1, 4] - # Model derivatives are 3 times [[1, 0], [0, 2]] + # Model derivatives are 3 times [[1, 0], [0, 1]] y, dy = p.evaluateS1(x) self.assertTrue(np.all(y == p.evaluate(x))) self.assertTrue(np.all(y[0, :] == [1, 4])) @@ -183,7 +183,7 @@ def test_mean_squared_error_multi(self): self.assertEqual(dex.shape, (2, )) # Residuals are: [[0, 0], [-1, -3], [-2, -6]] - # Derivatives are: [[1, 0], [0, 2]] + # Derivatives are: [[1, 0], [0, 1]] # dex1 is: (2 / nt / no) * (0 - 1 - 2) * 1 = (1 / 3) * -3 * 1 = -1 # dex2 is: (2 / nt / no) * (0 - 3 - 6) * 1 = (1 / 3) * -9 * 1 = -3 self.assertEqual(dex[0], -1) @@ -218,7 +218,7 @@ def test_mean_squared_error_weighted(self): x = [1, 2] # Model outputs are 3 times [1, 4] - # Model derivatives are 3 times [[1, 0], [0, 2]] + # Model derivatives are 3 times [[1, 0], [0, 1]] y, dy = p.evaluateS1(x) self.assertTrue(np.all(y == p.evaluate(x))) self.assertTrue(np.all(y[0, :] == [1, 4])) @@ -344,7 +344,7 @@ def test_sum_of_squares_error_multi(self): x = [1, 2] # Model outputs are 3 times [1,4] - # Model derivatives are 3 times [[1, 0], [0, 2]] + # Model derivatives are 3 times [[1, 0], [0, 1]] y, dy = p.evaluateS1(x) self.assertTrue(np.all(y == p.evaluate(x))) self.assertTrue(np.all(y[0, :] == [1, 4])) @@ -373,7 +373,7 @@ def test_sum_of_squares_error_multi(self): self.assertEqual(dex.shape, (2, )) # Residuals are: [[0, 0], [-1, -3], [-2, -6]] - # Derivatives are: [[1, 0], [0, 2]] + # Derivatives are: [[1, 0], [0, 1]] # dex1 is: 2 * (0 - 1 - 2) * 1 = 2 * -3 * 1 = -6 # dex2 is: 2 * (0 - 3 - 6) * 2 = 2 * -9 * 1 = -18 self.assertEqual(dex[0], -6) diff --git a/pints/toy/_constant_model.py b/pints/toy/_constant_model.py index a6ef87881..8cd056a83 100644 --- a/pints/toy/_constant_model.py +++ b/pints/toy/_constant_model.py @@ -30,7 +30,7 @@ class ConstantModel(pints.ForwardModelS1): .. math:: \\frac{\partial{f_i(t)}}{dp_j} = - \\begin{cases} i, i = j\\\\0, i \\neq j \end{cases} + \\begin{cases} 1, i = j\\\\0, i \\neq j \end{cases} Arguments: @@ -107,5 +107,5 @@ def simulateS1(self, parameters, times): # i.e. # [[1, 0], # [0, 1]] - dy = np.tile(np.diag(np.ones(len(self._r))), (len(times), 1, 1)) + dy = np.tile(np.diag(np.ones(self._n)), (len(times), 1, 1)) return (y, dy)