Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Performance improvements #1642

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 10 additions & 14 deletions src/pint/models/astrometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,10 +203,10 @@ def d_delay_astrometry_d_PX(self, toas, param="", acc_delay=None):
rd = self.get_d_delay_quantities(toas)

px_r = np.sqrt(rd["ssb_obs_r"] ** 2 - rd["in_psr_obs"] ** 2)
dd_dpx = 0.5 * (px_r**2 / (u.AU * const.c)) * (u.mas / u.radian)
dd_dpx = 0.5 * px_r**2 * ((u.mas / u.radian) / (u.AU * const.c))

# We want to return sec / mas
return dd_dpx.decompose(u.si.bases) / u.mas
return dd_dpx.to(u.s) / u.mas

def d_delay_astrometry_d_POSEPOCH(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt POSEPOCH"""
Expand Down Expand Up @@ -415,7 +415,7 @@ def d_delay_astrometry_d_RAJ(self, toas, param="", acc_delay=None):
)
dd_draj = rd["ssb_obs_r"] * geom / (const.c * u.radian)

return dd_draj.decompose(u.si.bases)
return dd_draj.to(u.s / u.rad)

def d_delay_astrometry_d_DECJ(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt DECJ
Expand All @@ -432,7 +432,7 @@ def d_delay_astrometry_d_DECJ(self, toas, param="", acc_delay=None):
) - np.sin(rd["earth_dec"]) * np.cos(psr_dec)
dd_ddecj = rd["ssb_obs_r"] * geom / (const.c * u.radian)

return dd_ddecj.decompose(u.si.bases)
return dd_ddecj.to(u.s / u.rad)

def d_delay_astrometry_d_PMRA(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt PMRA
Expand All @@ -448,10 +448,9 @@ def d_delay_astrometry_d_PMRA(self, toas, param="", acc_delay=None):
geom = np.cos(rd["earth_dec"]) * np.sin(psr_ra - rd["earth_ra"])

deriv = rd["ssb_obs_r"] * geom * te / (const.c * u.radian)
dd_dpmra = deriv * u.mas / u.year

# We want to return sec / (mas / yr)
return dd_dpmra.decompose(u.si.bases) / (u.mas / u.year)
return deriv.to(u.s * u.year / u.mas)

def d_delay_astrometry_d_PMDEC(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt PMDEC
Expand All @@ -470,10 +469,9 @@ def d_delay_astrometry_d_PMDEC(self, toas, param="", acc_delay=None):
) - np.cos(psr_dec) * np.sin(rd["earth_dec"])

deriv = rd["ssb_obs_r"] * geom * te / (const.c * u.radian)
dd_dpmdec = deriv * u.mas / u.year

# We want to return sec / (mas / yr)
return dd_dpmdec.decompose(u.si.bases) / (u.mas / u.year)
return deriv.to(u.s * u.year / u.mas)

def change_posepoch(self, new_epoch):
"""Change POSEPOCH to a new value and update the position accordingly.
Expand Down Expand Up @@ -823,7 +821,7 @@ def d_delay_astrometry_d_ELONG(self, toas, param="", acc_delay=None):
)
dd_delong = rd["ssb_obs_r"] * geom / (const.c * u.radian)

return dd_delong.decompose(u.si.bases)
return dd_delong.to(u.s / u.rad)

def d_delay_astrometry_d_ELAT(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt DECJ
Expand All @@ -840,7 +838,7 @@ def d_delay_astrometry_d_ELAT(self, toas, param="", acc_delay=None):
) - np.sin(rd["earth_elat"]) * np.cos(psr_elat)
dd_delat = rd["ssb_obs_r"] * geom / (const.c * u.radian)

return dd_delat.decompose(u.si.bases)
return dd_delat.to(u.s / u.rad)

def d_delay_astrometry_d_PMELONG(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt PMRA
Expand All @@ -857,10 +855,9 @@ def d_delay_astrometry_d_PMELONG(self, toas, param="", acc_delay=None):
geom = np.cos(rd["earth_elat"]) * np.sin(psr_elong - rd["earth_elong"])

deriv = rd["ssb_obs_r"] * geom * te / (const.c * u.radian)
dd_dpmelong = deriv * u.mas / u.year

# We want to return sec / (mas / yr)
return dd_dpmelong.decompose(u.si.bases) / (u.mas / u.year)
return deriv.to(u.s * u.year / u.mas)

def d_delay_astrometry_d_PMELAT(self, toas, param="", acc_delay=None):
"""Calculate the derivative wrt PMDEC
Expand All @@ -879,10 +876,9 @@ def d_delay_astrometry_d_PMELAT(self, toas, param="", acc_delay=None):
) - np.cos(psr_elat) * np.sin(rd["earth_elat"])

deriv = rd["ssb_obs_r"] * geom * te / (const.c * u.radian)
dd_dpmelat = deriv * u.mas / u.year

# We want to return sec / (mas / yr)
return dd_dpmelat.decompose(u.si.bases) / (u.mas / u.year)
return deriv.to(u.s * u.year / u.mas)

def print_par(self, format="pint"):
result = ""
Expand Down
22 changes: 16 additions & 6 deletions src/pint/models/noise_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,24 +140,34 @@ def validate(self):
raise ValueError(f"'{el}' have duplicated keys and key values.")

def scale_toa_sigma(self, toas):
sigma_scaled = toas.table["error"].quantity.copy()
# @abhisrkckl : This function turned out to be a performance bottleneck.
# So I have changed this code so as to minimize Quantity operations.

sigma_scaled = (
self._parent._toa_errors_cache
if self._parent.locked
else toas.table["error"].quantity.to_value(u.s)
)

for equad_name in self.EQUADs:
equad = getattr(self, equad_name)
if equad.quantity is None:
continue
mask = equad.select_toa_mask(toas)
if np.any(mask):
sigma_scaled[mask] = np.hypot(sigma_scaled[mask], equad.quantity)
if self._parent.locked or np.any(mask):
sigma_scaled[mask] = np.hypot(
sigma_scaled[mask], equad.quantity.to_value(u.s)
)
else:
warnings.warn(f"EQUAD {equad} has no TOAs")
for efac_name in self.EFACs:
efac = getattr(self, efac_name)
mask = efac.select_toa_mask(toas)
if np.any(mask):
sigma_scaled[mask] *= efac.quantity
if self._parent.locked or np.any(mask):
sigma_scaled[mask] *= efac.quantity.to_value(u.dimensionless_unscaled)
else:
warnings.warn(f"EFAC {efac} has no TOAs")
return sigma_scaled
return sigma_scaled << u.s

def sigma_scaled_cov_matrix(self, toas):
scaled_sigma = self.scale_toa_sigma(toas).to(u.s).value ** 2
Expand Down
22 changes: 22 additions & 0 deletions src/pint/models/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -1809,6 +1809,9 @@ def __init__(
self.is_prefix = True
self._parfile_name = self.origin_name

self.__cache_mask = None
self.__cache_toas = None

def __repr__(self):
out = f"{self.__class__.__name__}({self.name}"
if self.key is not None:
Expand Down Expand Up @@ -2020,6 +2023,9 @@ def select_toa_mask(self, toas):
array
An array of TOA indices selected by the mask.
"""
if self.__cache_mask is not None and self.__cache_toas is toas:
return self.__cache_mask

if len(self.key_value) == 1:
if not hasattr(self, "toa_selector"):
self.toa_selector = TOASelect(is_range=False, use_hash=True)
Expand Down Expand Up @@ -2055,6 +2061,22 @@ def select_toa_mask(self, toas):
select_idx = self.toa_selector.get_select_index(condition, col)
return select_idx[self.name]

def cache_toa_mask(self, toas):
"""Cache the TOA mask for this parameter. This will lead to exceptions and/or
wrong results if the TOAs object is mutated after this. This can be undone using
the `clear_toa_mask_cache()` method.
"""
assert self.select_toa_mask(
toas
).any(), f"Empty TOA mask found for {self.name}. Will not cache this."
self.__cache_mask = self.select_toa_mask(toas)
self.__cache_toas = toas

def clear_toa_mask_cache(self):
"""Clear the TOA mask cache. This will undo a `cache_toa_mask()` call."""
self.__cache_mask = None
self.__cache_toas = None

def compare_key_value(self, other_param):
"""Compare if the key and value are the same with the other parameter.

Expand Down
6 changes: 3 additions & 3 deletions src/pint/models/stand_alone_psr_binaries/DD_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ def d_beta_d_ECC(self):
de/dECC = 1
"""
eTheta = self.eTheta()
a1 = (self.a1()).decompose()
a1 = (self.a1()).to(u.m)
sinOmg = np.sin(self.omega())
cosOmg = np.cos(self.omega())
with u.set_enabled_equivalencies(u.dimensionless_angles()):
Expand All @@ -392,7 +392,7 @@ def d_beta_d_EDOT(self):
de/dEDOT = tt0
"""
eTheta = self.eTheta()
a1 = (self.a1()).decompose()
a1 = (self.a1()).to(u.m)
sinOmg = np.sin(self.omega())
cosOmg = np.cos(self.omega())
with u.set_enabled_equivalencies(u.dimensionless_angles()):
Expand Down Expand Up @@ -642,7 +642,7 @@ def delayInverse(self):
+ 1.0 / 2 * nHat**2 * Dre * Drepp
- 1.0 / 2 * e * sinE / (1 - e * cosE) * nHat**2 * Dre * Drep
)
).decompose()
).to(u.s)

def d_delayI_d_par(self, par):
"""Derivative on delay inverse."""
Expand Down
8 changes: 5 additions & 3 deletions src/pint/models/stand_alone_psr_binaries/ELL1_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,9 @@ def orbits_ELL1(self):
PB = (self.pb()).to("second")
PBDOT = self.pbdot()
ttasc = self.ttasc()
return (ttasc / PB - 0.5 * PBDOT * (ttasc / PB) ** 2).decompose()
return (ttasc / PB - 0.5 * PBDOT * (ttasc / PB) ** 2).to(
u.dimensionless_unscaled
)

def d_Phi_d_TASC(self):
"""dPhi/dTASC"""
Expand Down Expand Up @@ -162,7 +164,7 @@ def delayI(self):
return (
Dre
* (1 - nhat * Drep + (nhat * Drep) ** 2 + 1.0 / 2 * nhat**2 * Dre * Drepp)
).decompose()
).to(u.s)

def nhat(self):
return 2 * np.pi / self.pb()
Expand Down Expand Up @@ -319,7 +321,7 @@ def delayR(self):
Zhu et al. (2019), Eqn. 1
Fiore et al. (2023), Eqn. 4
"""
return ((self.a1() / c.c) * self.d_delayR_da1()).decompose()
return ((self.a1() / c.c) * self.d_delayR_da1()).to(u.s)

def d_Dre_d_par(self, par):
"""Derivative computation.
Expand Down
2 changes: 1 addition & 1 deletion src/pint/models/stand_alone_psr_binaries/ELL1k_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def delayR(self):
+ 0.5
* (self.eps2() * np.sin(2 * Phi) - self.eps1() * (np.cos(2 * Phi) + 3))
)
).decompose()
).to(u.s)

def d_Dre_d_par(self, par):
"""Derivative computation.
Expand Down
2 changes: 1 addition & 1 deletion src/pint/models/stand_alone_psr_binaries/binary_generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def get_tt0(self, barycentricTOA):
def ecc(self):
"""Calculate eccentricity with EDOT"""
if hasattr(self, "_tt0"):
return self.ECC + (self.tt0 * self.EDOT).decompose()
return self.ECC + (self.tt0 * self.EDOT).to(u.dimensionless_unscaled)
return self.ECC

def d_ecc_d_T0(self):
Expand Down
11 changes: 5 additions & 6 deletions src/pint/models/stand_alone_psr_binaries/binary_orbits.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,9 @@ def orbits(self):
PB = self.PB.to("second")
PBDOT = self.PBDOT
XPBDOT = self.XPBDOT
return (
self.tt0 / PB - 0.5 * (PBDOT + XPBDOT) * (self.tt0 / PB) ** 2
).decompose()
return (self.tt0 / PB - 0.5 * (PBDOT + XPBDOT) * (self.tt0 / PB) ** 2).to(
u.dimensionless_unscaled
)

def pbprime(self):
"""Instantaneous binary period as a function of time."""
Expand Down Expand Up @@ -183,7 +183,7 @@ def _FBXs(self):
def orbits(self):
"""Orbital phase (number of orbits since T0)."""
orbits = taylor_horner(self.tt0, self._FBXs())
return orbits.decompose()
return orbits.to(u.dimensionless_unscaled)

def pbprime(self):
"""Instantaneous binary period as a function of time."""
Expand Down Expand Up @@ -213,8 +213,7 @@ def d_orbits_d_FBX(self, FBX):
FBXs.append(1.0 * getattr(self, f"FB{ii}").unit)
break
ii += 1
d_orbits = taylor_horner(self.tt0, FBXs) / par.unit
return d_orbits.decompose() * 2 * np.pi * u.rad
return taylor_horner(self.tt0, FBXs) * 2 * np.pi * (u.rad / par.unit)

def d_pbprime_d_FBX(self, FBX):
par = getattr(self, FBX)
Expand Down
63 changes: 56 additions & 7 deletions src/pint/models/timing_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,10 @@ def __init__(self, name="", components=[]):
for cp in components:
self.add_component(cp, setup=False, validate=False)

self.__locked = False
self.__lock_toas = None
self.__toa_errors_cache = None

def __repr__(self):
return "{}(\n {}\n)".format(
self.__class__.__name__,
Expand Down Expand Up @@ -485,6 +489,47 @@ def num_components_of_type(type):
# result += str(getattr(cp, pp)) + "\n"
# return result

def lock(self, toas):
"""Lock a timing model object for a given TOAs object by caching parameter masks. This also
allows us to do other optimizations such as skipping TOA error checks. This can provide significant
performance gains when mask parameters are present. However, mutating the TOAs object after calling
this function can lead to exceptions or wrong results down the line.
"""

log.warning(
"Locking the TimingModel object for the given TOAs object. This is will lead to "
"undefined behavior if the TOAs object is mutated after this."
)

if (toas.get_errors() == 0).any():
raise ValueError(
"Model cannot be locked since some TOAs have zero uncertainties."
)

self.__locked = True
self.__lock_toas = toas
self._toa_errors_cache = toas.get_errors().to_value(u.s)

for mp in self.get_params_of_type_top("maskParameter"):
mpar = getattr(self, mp)
if mpar.key is not None:
mpar.cache_toa_mask(toas)

@property
def locked(self):
return self.__locked

def unlock(self):
"""Undo the lock() call."""
self.__locked = False
self.__lock_toas = None
self._toa_errors_cache = None

for mp in self.get_params_of_type_top("maskParameter"):
mpar = getattr(self, mp)
if mpar.key is not None:
mpar.clear_toa_mask_cache()

def __getattr__(self, name):
if name in ["components", "component_types", "search_cmp_attr"]:
raise AttributeError
Expand Down Expand Up @@ -1509,17 +1554,21 @@ def scaled_toa_uncertainty(self, toas):
toas: pint.toa.TOAs
The input data object for TOAs uncertainty.
"""
ntoa = toas.ntoas
tbl = toas.table
result = np.zeros(ntoa) * u.us

# When there is no noise model.
if len(self.scaled_toa_uncertainty_funcs) == 0:
result += tbl["error"].quantity
return result
return tbl["error"].quantity.to(u.us)

for nf in self.scaled_toa_uncertainty_funcs:
result += nf(toas)
return result
return (
sum(
map(
lambda nf: nf(toas).to_value(u.us),
self.scaled_toa_uncertainty_funcs,
)
)
<< u.us
)

def scaled_dm_uncertainty(self, toas):
"""Get the scaled DM data uncertainties noise models.
Expand Down
Loading