From ffae44cd3a861fc9c0da31af28cfac059c742589 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 15 Nov 2022 08:05:49 -0800 Subject: [PATCH 01/11] Increment minor version --- HISTORY.rst | 3 +++ enterprise_extensions/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index a9b42375..0db683e8 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,6 +1,9 @@ ======= History ======= +2.4.1 (2022-02-10) +Add support for new noise definitions in WidebandTimingModel. + 2.4.0 (2022-02-10) Use Timing Package (Tempo,Tempo2,Pint) definition of EQUAD. Enterprise has broken backwards compatibility, and here we use the `tnequad` flag to switch on diff --git a/enterprise_extensions/__init__.py b/enterprise_extensions/__init__.py index 3d67cd6b..54499df3 100644 --- a/enterprise_extensions/__init__.py +++ b/enterprise_extensions/__init__.py @@ -1 +1 @@ -__version__ = "2.4.0" +__version__ = "2.4.1" diff --git a/setup.cfg b/setup.cfg index f9a47209..e59ea7a0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 2.4.0 +current_version = 2.4.1 commit = True tag = True From 9c986defc83c8b440b2f5e1def6f26b861365933 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 21:44:11 +0000 Subject: [PATCH 02/11] Bump actions/download-artifact from 2 to 4.1.7 in /.github/workflows Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 2 to 4.1.7. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v2...v4.1.7) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 9197fe92..a08338cb 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -121,7 +121,7 @@ jobs: python -m pip install --upgrade pip pip install setuptools wheel twine - name: Download wheel/dist from build - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4.1.7 with: name: dist path: dist From e1e51ad5a3eca31e661962a2a8ee51afced3887d Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 6 Dec 2024 14:28:20 -0800 Subject: [PATCH 03/11] Move CompareTimingModels to timing.py --- enterprise_extensions/model_utils.py | 92 --------------------------- enterprise_extensions/timing.py | 94 ++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 92 deletions(-) diff --git a/enterprise_extensions/model_utils.py b/enterprise_extensions/model_utils.py index d7a307fd..df7fbe86 100644 --- a/enterprise_extensions/model_utils.py +++ b/enterprise_extensions/model_utils.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import time import matplotlib.pyplot as plt import numpy as np @@ -11,7 +10,6 @@ except ImportError: from emcee.autocorr import integrated_time as acor -from enterprise_extensions import models # Log-spaced frequncies @@ -294,93 +292,3 @@ def mask_filter(psr, mask): psr._planetssb = psr.planetssb[mask, :, :] psr.sort_data() - - -class CompareTimingModels(): - """ - Compare difference between the usual and marginalized timing models. - - After instantiating, the __call__() method can be used for sampling for any number of points. - To see the results, use the results() method. - - :param psrs: Pulsar object containing pulsars from model - :param model_name: String name of model to test. Model must be defined in enterprise_extensions.models. - :param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors - :param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors - :param dense: use the dense cholesky algorithm over sparse - """ - - def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs): - model = getattr(models, model_name) - self.abs_tol = abs_tol - self.rel_tol = rel_tol - if dense: - self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model - else: - self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model - self.pta_norm = model(psrs, **kwargs) # normal model - self.tm_correction = 0 - for psr in psrs: - self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1] - self.abs_err = [] - self.rel_err = [] - self.count = 0 - - def check_timing(self, number=10_000): - print('Timing sample creation...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - end = time.time() - sample_time = end - start - print('Sampling {0} points took {1} seconds.'.format(number, sample_time)) - - print('Timing MarginalizedTimingModel...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - self.pta_marg.get_lnlikelihood(x0) - end = time.time() - time_marg = end - start - sample_time # remove sampling time from total time taken - print('Sampling {0} points took {1} seconds.'.format(number, time_marg)) - - print('Timing TimingModel...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - self.pta_norm.get_lnlikelihood(x0) - end = time.time() - time_norm = end - start - sample_time # remove sampling time from total time taken - print('Sampling {0} points took {1} seconds.'.format(number, time_norm)) - - res = time_norm / time_marg - print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number)) - return res - - def get_sample_point(self): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - return x0 - - def __call__(self, x0): - res_norm = self.pta_norm.get_lnlikelihood(x0) - res_marg = self.pta_marg.get_lnlikelihood(x0) - abs_err = np.abs(res_marg - res_norm) - rel_err = abs_err / res_norm - self.abs_err.append(abs_err) - self.rel_err.append(rel_err) - self.count += 1 - if self.abs_tol is not None and abs_err > self.abs_tol: - abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format( - abs_err, x0, self.abs_tol) - raise ValueError(abs_raise) - elif self.rel_tol is not None and rel_err > self.rel_tol: - rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format( - rel_err, x0, self.rel_tol) - raise ValueError(rel_raise) - return res_norm - - def results(self): - print('Number of points evaluated:', self.count) - print('Maximum absolute error:', np.max(self.abs_err)) - print('Maximum relative error:', np.max(self.rel_err)) - return self.abs_err, self.rel_err diff --git a/enterprise_extensions/timing.py b/enterprise_extensions/timing.py index a1294971..e2506575 100644 --- a/enterprise_extensions/timing.py +++ b/enterprise_extensions/timing.py @@ -1,10 +1,14 @@ # -*- coding: utf-8 -*- +import time + from collections import OrderedDict import numpy as np from enterprise.signals import deterministic_signals, parameter, signal_base +from enterprise_extensions import models + # timing model delay @@ -65,3 +69,93 @@ def timing_block(tmparam_list=['RAJ', 'DECJ', 'F0', 'F1', tm = deterministic_signals.Deterministic(tm_func, name='timing model') return tm + + +class CompareTimingModels(): + """ + Compare difference between the usual and marginalized timing models. + + After instantiating, the __call__() method can be used for sampling for any number of points. + To see the results, use the results() method. + + :param psrs: Pulsar object containing pulsars from model + :param model_name: String name of model to test. Model must be defined in enterprise_extensions.models. + :param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors + :param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors + :param dense: use the dense cholesky algorithm over sparse + """ + + def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs): + model = getattr(models, model_name) + self.abs_tol = abs_tol + self.rel_tol = rel_tol + if dense: + self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model + else: + self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model + self.pta_norm = model(psrs, **kwargs) # normal model + self.tm_correction = 0 + for psr in psrs: + self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1] + self.abs_err = [] + self.rel_err = [] + self.count = 0 + + def check_timing(self, number=10_000): + print('Timing sample creation...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + end = time.time() + sample_time = end - start + print('Sampling {0} points took {1} seconds.'.format(number, sample_time)) + + print('Timing MarginalizedTimingModel...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + self.pta_marg.get_lnlikelihood(x0) + end = time.time() + time_marg = end - start - sample_time # remove sampling time from total time taken + print('Sampling {0} points took {1} seconds.'.format(number, time_marg)) + + print('Timing TimingModel...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + self.pta_norm.get_lnlikelihood(x0) + end = time.time() + time_norm = end - start - sample_time # remove sampling time from total time taken + print('Sampling {0} points took {1} seconds.'.format(number, time_norm)) + + res = time_norm / time_marg + print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number)) + return res + + def get_sample_point(self): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + return x0 + + def __call__(self, x0): + res_norm = self.pta_norm.get_lnlikelihood(x0) + res_marg = self.pta_marg.get_lnlikelihood(x0) + abs_err = np.abs(res_marg - res_norm) + rel_err = abs_err / res_norm + self.abs_err.append(abs_err) + self.rel_err.append(rel_err) + self.count += 1 + if self.abs_tol is not None and abs_err > self.abs_tol: + abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format( + abs_err, x0, self.abs_tol) + raise ValueError(abs_raise) + elif self.rel_tol is not None and rel_err > self.rel_tol: + rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format( + rel_err, x0, self.rel_tol) + raise ValueError(rel_raise) + return res_norm + + def results(self): + print('Number of points evaluated:', self.count) + print('Maximum absolute error:', np.max(self.abs_err)) + print('Maximum relative error:', np.max(self.rel_err)) + return self.abs_err, self.rel_err From 19534329bab74dbbb85b4e12cda46f6645ee71f8 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 10:39:57 -0800 Subject: [PATCH 04/11] update ci python versions --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 9197fe92..a8d4ada9 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: [3.7, 3.8, 3.9, '3.10'] + python-version: ['3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout repository From 270215723923356c35a068d38ae0a687496cfe3d Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 10:40:36 -0800 Subject: [PATCH 05/11] change to x86 runners for Mac OS --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index a8d4ada9..51257989 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-13] python-version: ['3.9', '3.10', '3.11', '3.12'] steps: From a69c8d0f5e2d8d3dab9eeec061eb2bbe5cbfbadc Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 10:59:39 -0800 Subject: [PATCH 06/11] replace interp2d with RegularGridInterpolator --- enterprise_extensions/empirical_distr.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/empirical_distr.py b/enterprise_extensions/empirical_distr.py index d8d14030..b208e143 100644 --- a/enterprise_extensions/empirical_distr.py +++ b/enterprise_extensions/empirical_distr.py @@ -10,7 +10,7 @@ sklearn_available=True except ModuleNotFoundError: sklearn_available=False -from scipy.interpolate import interp1d, interp2d +from scipy.interpolate import interp1d, interp2d, RegularGridInterpolator logger = logging.getLogger(__name__) @@ -175,7 +175,8 @@ def __init__(self, param_names, samples, minvals=None, maxvals=None, bandwidth=0 self._Nbins = [yvals.size for ii in range(xvals.size)] scores = np.array([self.kde.score(np.array([xvals[ii], yvals[jj]]).reshape((1, 2))) for ii in range(xvals.size) for jj in range(yvals.size)]) # interpolate within prior - self._logpdf = interp2d(xvals, yvals, scores, kind='linear', fill_value=-1000) + self._logpdf = RegularGridInterpolator((xvals, yvals), scores, method='linear', bounds_error=False, fill_value=-1000) + def draw(self): params = self.kde.sample(1).T From 97c4b7db586f4713a154e1f54d6d7a2247043aeb Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 11:16:43 -0800 Subject: [PATCH 07/11] fix linting issues --- enterprise_extensions/empirical_distr.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/enterprise_extensions/empirical_distr.py b/enterprise_extensions/empirical_distr.py index b208e143..b0c83472 100644 --- a/enterprise_extensions/empirical_distr.py +++ b/enterprise_extensions/empirical_distr.py @@ -10,7 +10,7 @@ sklearn_available=True except ModuleNotFoundError: sklearn_available=False -from scipy.interpolate import interp1d, interp2d, RegularGridInterpolator +from scipy.interpolate import interp1d, RegularGridInterpolator logger = logging.getLogger(__name__) @@ -177,7 +177,6 @@ def __init__(self, param_names, samples, minvals=None, maxvals=None, bandwidth=0 # interpolate within prior self._logpdf = RegularGridInterpolator((xvals, yvals), scores, method='linear', bounds_error=False, fill_value=-1000) - def draw(self): params = self.kde.sample(1).T return params.squeeze() From 4efde0a3beba38b9cc17f5e6a3d459527285485a Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 17:14:48 -0800 Subject: [PATCH 08/11] fix shape of scores for RegularGridInterpolator --- enterprise_extensions/empirical_distr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise_extensions/empirical_distr.py b/enterprise_extensions/empirical_distr.py index b0c83472..d3d4f5e3 100644 --- a/enterprise_extensions/empirical_distr.py +++ b/enterprise_extensions/empirical_distr.py @@ -173,7 +173,7 @@ def __init__(self, param_names, samples, minvals=None, maxvals=None, bandwidth=0 xvals = np.linspace(minvals[0], maxvals[0], num=nbins) yvals = np.linspace(minvals[1], maxvals[1], num=nbins) self._Nbins = [yvals.size for ii in range(xvals.size)] - scores = np.array([self.kde.score(np.array([xvals[ii], yvals[jj]]).reshape((1, 2))) for ii in range(xvals.size) for jj in range(yvals.size)]) + scores = np.array([self.kde.score(np.array([xvals[ii], yvals[jj]]).reshape((1, 2))) for ii in range(xvals.size) for jj in range(yvals.size)]).reshape(len(xvals), len(yvals)) # interpolate within prior self._logpdf = RegularGridInterpolator((xvals, yvals), scores, method='linear', bounds_error=False, fill_value=-1000) From d4033f5eef83962fba5f65496c422fce092a2c7e Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 22:12:54 -0800 Subject: [PATCH 09/11] downgrade codecov to try to get it to work on mac --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 51257989..07da3119 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -53,7 +53,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v2 #with: # fail_ci_if_error: true From bc3d9181a3f643f1386888d6d5971ff72e42c124 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 8 Dec 2024 22:27:09 -0800 Subject: [PATCH 10/11] remove codecov from macos --- .github/workflows/ci_test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 07da3119..aee0f6cf 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -53,7 +53,8 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v2 + if: runner.os != 'macOS' + uses: codecov/codecov-action@v3 #with: # fail_ci_if_error: true From 95fe53bbee81026f51ec3416eee086bc6b8d8306 Mon Sep 17 00:00:00 2001 From: Aaron Date: Mon, 9 Dec 2024 21:29:32 -0800 Subject: [PATCH 11/11] move CompareTimingModels to a new file --- enterprise_extensions/checks.py | 94 +++++++++++++++++++++++++++++++++ enterprise_extensions/timing.py | 94 --------------------------------- 2 files changed, 94 insertions(+), 94 deletions(-) create mode 100644 enterprise_extensions/checks.py diff --git a/enterprise_extensions/checks.py b/enterprise_extensions/checks.py new file mode 100644 index 00000000..f5bedbba --- /dev/null +++ b/enterprise_extensions/checks.py @@ -0,0 +1,94 @@ +import time +import numpy as np + +from enterprise_extensions import models + + +class CompareTimingModels(): + """ + Compare difference between the usual and marginalized timing models. + + After instantiating, the __call__() method can be used for sampling for any number of points. + To see the results, use the results() method. + + :param psrs: Pulsar object containing pulsars from model + :param model_name: String name of model to test. Model must be defined in enterprise_extensions.models. + :param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors + :param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors + :param dense: use the dense cholesky algorithm over sparse + """ + + def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs): + model = getattr(models, model_name) + self.abs_tol = abs_tol + self.rel_tol = rel_tol + if dense: + self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model + else: + self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model + self.pta_norm = model(psrs, **kwargs) # normal model + self.tm_correction = 0 + for psr in psrs: + self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1] + self.abs_err = [] + self.rel_err = [] + self.count = 0 + + def check_timing(self, number=10_000): + print('Timing sample creation...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + end = time.time() + sample_time = end - start + print('Sampling {0} points took {1} seconds.'.format(number, sample_time)) + + print('Timing MarginalizedTimingModel...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + self.pta_marg.get_lnlikelihood(x0) + end = time.time() + time_marg = end - start - sample_time # remove sampling time from total time taken + print('Sampling {0} points took {1} seconds.'.format(number, time_marg)) + + print('Timing TimingModel...') + start = time.time() + for __ in range(number): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + self.pta_norm.get_lnlikelihood(x0) + end = time.time() + time_norm = end - start - sample_time # remove sampling time from total time taken + print('Sampling {0} points took {1} seconds.'.format(number, time_norm)) + + res = time_norm / time_marg + print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number)) + return res + + def get_sample_point(self): + x0 = np.hstack([p.sample() for p in self.pta_marg.params]) + return x0 + + def __call__(self, x0): + res_norm = self.pta_norm.get_lnlikelihood(x0) + res_marg = self.pta_marg.get_lnlikelihood(x0) + abs_err = np.abs(res_marg - res_norm) + rel_err = abs_err / res_norm + self.abs_err.append(abs_err) + self.rel_err.append(rel_err) + self.count += 1 + if self.abs_tol is not None and abs_err > self.abs_tol: + abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format( + abs_err, x0, self.abs_tol) + raise ValueError(abs_raise) + elif self.rel_tol is not None and rel_err > self.rel_tol: + rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format( + rel_err, x0, self.rel_tol) + raise ValueError(rel_raise) + return res_norm + + def results(self): + print('Number of points evaluated:', self.count) + print('Maximum absolute error:', np.max(self.abs_err)) + print('Maximum relative error:', np.max(self.rel_err)) + return self.abs_err, self.rel_err diff --git a/enterprise_extensions/timing.py b/enterprise_extensions/timing.py index e2506575..a1294971 100644 --- a/enterprise_extensions/timing.py +++ b/enterprise_extensions/timing.py @@ -1,14 +1,10 @@ # -*- coding: utf-8 -*- -import time - from collections import OrderedDict import numpy as np from enterprise.signals import deterministic_signals, parameter, signal_base -from enterprise_extensions import models - # timing model delay @@ -69,93 +65,3 @@ def timing_block(tmparam_list=['RAJ', 'DECJ', 'F0', 'F1', tm = deterministic_signals.Deterministic(tm_func, name='timing model') return tm - - -class CompareTimingModels(): - """ - Compare difference between the usual and marginalized timing models. - - After instantiating, the __call__() method can be used for sampling for any number of points. - To see the results, use the results() method. - - :param psrs: Pulsar object containing pulsars from model - :param model_name: String name of model to test. Model must be defined in enterprise_extensions.models. - :param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors - :param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors - :param dense: use the dense cholesky algorithm over sparse - """ - - def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs): - model = getattr(models, model_name) - self.abs_tol = abs_tol - self.rel_tol = rel_tol - if dense: - self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model - else: - self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model - self.pta_norm = model(psrs, **kwargs) # normal model - self.tm_correction = 0 - for psr in psrs: - self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1] - self.abs_err = [] - self.rel_err = [] - self.count = 0 - - def check_timing(self, number=10_000): - print('Timing sample creation...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - end = time.time() - sample_time = end - start - print('Sampling {0} points took {1} seconds.'.format(number, sample_time)) - - print('Timing MarginalizedTimingModel...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - self.pta_marg.get_lnlikelihood(x0) - end = time.time() - time_marg = end - start - sample_time # remove sampling time from total time taken - print('Sampling {0} points took {1} seconds.'.format(number, time_marg)) - - print('Timing TimingModel...') - start = time.time() - for __ in range(number): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - self.pta_norm.get_lnlikelihood(x0) - end = time.time() - time_norm = end - start - sample_time # remove sampling time from total time taken - print('Sampling {0} points took {1} seconds.'.format(number, time_norm)) - - res = time_norm / time_marg - print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number)) - return res - - def get_sample_point(self): - x0 = np.hstack([p.sample() for p in self.pta_marg.params]) - return x0 - - def __call__(self, x0): - res_norm = self.pta_norm.get_lnlikelihood(x0) - res_marg = self.pta_marg.get_lnlikelihood(x0) - abs_err = np.abs(res_marg - res_norm) - rel_err = abs_err / res_norm - self.abs_err.append(abs_err) - self.rel_err.append(rel_err) - self.count += 1 - if self.abs_tol is not None and abs_err > self.abs_tol: - abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format( - abs_err, x0, self.abs_tol) - raise ValueError(abs_raise) - elif self.rel_tol is not None and rel_err > self.rel_tol: - rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format( - rel_err, x0, self.rel_tol) - raise ValueError(rel_raise) - return res_norm - - def results(self): - print('Number of points evaluated:', self.count) - print('Maximum absolute error:', np.max(self.abs_err)) - print('Maximum relative error:', np.max(self.rel_err)) - return self.abs_err, self.rel_err