diff --git a/.travis.yml b/.travis.yml index a19bd60de..25f81d672 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,12 @@ addons: packages: - graphviz +# Only build master and tags, besides pull requests. +branches: + only: + - master + - /v[0-9]+\.[0-9]+\.[0-9]+(-.*)?$/ + stages: # Do the style check and a single test job, don't proceed if it fails @@ -76,7 +82,7 @@ matrix: - os: linux python: 3.8 name: Documentation build - stage: Comprehensive tests + stage: Initial tests env: TOXENV=build_docs # Now all dependencies with the latest Python @@ -156,7 +162,7 @@ matrix: name: Python 3.8 with developer version of astropy stage: Comprehensive tests env: TOXENV=py38-test-devdeps - + # Add a job that runs from cron only and tests against astropy dev and # numpy dev to give a change for early discovery of issues and feedback # for both developer teams. diff --git a/stingray/crossspectrum.py b/stingray/crossspectrum.py index c11971ed9..c8060b06a 100644 --- a/stingray/crossspectrum.py +++ b/stingray/crossspectrum.py @@ -1,11 +1,12 @@ import warnings - +from collections.abc import Iterable, Iterator import numpy as np import scipy import scipy.stats import scipy.fftpack import scipy.optimize +import copy # location of factorial moved between scipy versions try: @@ -25,7 +26,9 @@ from stingray.utils import rebin_data, simon, rebin_data_log from stingray.exceptions import StingrayError from stingray.gti import cross_two_gtis, bin_intervals_from_gtis, check_gtis -import copy +from .events import EventList +from .utils import show_progress + try: from tqdm import tqdm as show_progress @@ -233,6 +236,7 @@ def cospectra_pvalue(power, nspec): return pval + def coherence(lc1, lc2): """ Estimate coherence function of two light curves. @@ -311,10 +315,10 @@ class Crossspectrum(object): Parameters ---------- - lc1: :class:`stingray.Lightcurve` object, optional, default ``None`` + data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None`` The first light curve data for the channel/band of interest. - lc2: :class:`stingray.Lightcurve` object, optional, default ``None`` + data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None`` The light curve data for the reference band. norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none`` @@ -330,6 +334,20 @@ class Crossspectrum(object): This choice overrides the GTIs in the single light curves. Use with care! + lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects + For backwards compatibility only. Like ``data1``, but no + :class:`stingray.events.EventList` objects allowed + + lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects + For backwards compatibility only. Like ``data2``, but no + :class:`stingray.events.EventList` objects allowed + + dt: float + The time resolution of the light curve. Only needed when constructing + light curves in the case where ``data1``, ``data2`` are + :class:`EventList` objects + + Attributes ---------- freq: numpy.ndarray @@ -361,8 +379,10 @@ class Crossspectrum(object): nphots2: float The total number of photons in light curve 2 """ - def __init__(self, lc1=None, lc2=None, norm='none', gti=None, - power_type="real"): + + def __init__(self, data1=None, data2=None, norm='none', gti=None, + lc1=None, lc2=None, power_type="real", dt=None): + if isinstance(norm, str) is False: raise TypeError("norm must be a string") @@ -373,8 +393,18 @@ def __init__(self, lc1=None, lc2=None, norm='none', gti=None, # check if input data is a Lightcurve object, if not make one or # make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None`` - if lc1 is None or lc2 is None: - if lc1 is not None or lc2 is not None: + + if lc1 is not None or lc2 is not None: + warnings.warn("The lcN keywords are now deprecated. Use dataN " + "instead", DeprecationWarning) + # for backwards compatibility + if data1 is None: + data1 = lc1 + if data2 is None: + data2 = lc2 + + if data1 is None or data2 is None: + if data1 is not None or data2 is not None: raise TypeError("You can't do a cross spectrum with just one " "light curve!") else: @@ -387,13 +417,29 @@ def __init__(self, lc1=None, lc2=None, norm='none', gti=None, self.m = 1 self.n = None return + + if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \ + dt is None: + raise ValueError("If using event lists, please specify the bin " + "time to generate lightcurves.") + + if not isinstance(data1, EventList): + lc1 = data1 + else: + lc1 = data1.to_lc(dt) + + if not isinstance(data2, EventList): + lc2 = data2 + elif isinstance(data2, EventList) and data2 is not data1: + lc2 = data2.to_lc(dt) + elif data2 is data1: + lc2 = lc1 + self.gti = gti self.lc1 = lc1 self.lc2 = lc2 self.power_type = power_type - self._make_crossspectrum(lc1, lc2) - # These are needed to calculate coherence self._make_auxil_pds(lc1, lc2) @@ -640,7 +686,10 @@ def _normalize_crossspectrum(self, unnorm_power, tseg): 'none' normalization, imaginary part is returned as well. """ - return normalize_crossspectrum(unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm, self.power_type) + return normalize_crossspectrum( + unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm, + self.power_type) + def rebin_log(self, f=0.01): """ @@ -790,11 +839,11 @@ def plot(self, labels=None, axis=None, title=None, marker='-', save=False, plt.ylabel(labels[1]) except TypeError: simon("``labels`` must be either a list or tuple with " - "x and y labels.") + "x and y labels.") raise except IndexError: simon("``labels`` must have two labels for x and y " - "axes.") + "axes.") # Not raising here because in case of len(labels)==1, only # x-axis will be labelled. plt.legend(loc='best') @@ -896,11 +945,11 @@ class AveragedCrossspectrum(Crossspectrum): Parameters ---------- - lc1: :class:`stingray.Lightcurve` object OR iterable of :class:`stingray.Lightcurve` objects + data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object A light curve from which to compute the cross spectrum. In some cases, this would be the light curve of the wavelength/energy/frequency band of interest. - lc2: :class:`stingray.Lightcurve` object OR iterable of :class:`stingray.Lightcurve` objects + data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object A second light curve to use in the cross spectrum. In some cases, this would be the wavelength/energy/frequency reference band to compare the band of interest with. @@ -921,6 +970,10 @@ class AveragedCrossspectrum(Crossspectrum): This choice overrides the GTIs in the single light curves. Use with care! + dt : float + The time resolution of the light curve. Only needed when constructing + light curves in the case where data1 or data2 are of :class:EventList + power_type: string, optional, default ``real`` Parameter to choose among complete, real part and magnitude of the cross spectrum. @@ -929,6 +982,14 @@ class AveragedCrossspectrum(Crossspectrum): Do not show a progress bar when generating an averaged cross spectrum. Useful for the batch execution of many spectra + lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects + For backwards compatibility only. Like ``data1``, but no + :class:`stingray.events.EventList` objects allowed + + lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects + For backwards compatibility only. Like ``data2``, but no + :class:`stingray.events.EventList` objects allowed + Attributes ---------- freq: numpy.ndarray @@ -966,44 +1027,76 @@ class AveragedCrossspectrum(Crossspectrum): """ - def __init__(self, lc1=None, lc2=None, segment_size=None, - norm='none', gti=None, power_type="real", silent=False): + def __init__(self, data1=None, data2=None, segment_size=None, norm='none', + gti=None, power_type="real", silent=False, lc1=None, lc2=None, + dt=None): + + if lc1 is not None or lc2 is not None: + warnings.warn("The lcN keywords are now deprecated. Use dataN " + "instead", DeprecationWarning) + # for backwards compatibility + if data1 is None: + data1 = lc1 + if data2 is None: + data2 = lc2 self.type = "crossspectrum" - if segment_size is None and lc1 is not None: + if segment_size is None and data1 is not None: raise ValueError("segment_size must be specified") if segment_size is not None and not np.isfinite(segment_size): raise ValueError("segment_size must be finite!") self.segment_size = segment_size self.power_type = power_type + self.show_progress = not silent + self.dt = dt + + if isinstance(data1, EventList): + lengths = data1.gti[:, 1] - data1.gti[:, 0] + good = lengths >= segment_size + data1.gti = data1.gti[good] + data1 = list(data1.to_lc_list(dt)) + + if isinstance(data2, EventList): + lengths = data2.gti[:, 1] - data2.gti[:, 0] + good = lengths >= segment_size + data2.gti = data2.gti[good] + data2 = list(data2.to_lc_list(dt)) - Crossspectrum.__init__(self, lc1, lc2, norm, gti=gti, - power_type=power_type) + Crossspectrum.__init__(self, data1, data2, norm, gti=gti, + power_type=power_type, dt=dt) return def _make_auxil_pds(self, lc1, lc2): """ - Helper method to create the power spectrum of both light curves independently. + Helper method to create the power spectrum of both light curves + independently. Parameters ---------- lc1, lc2 : :class:`stingray.Lightcurve` objects Two light curves used for computing the cross spectrum. """ + is_event = isinstance(lc1, EventList) + is_lc = isinstance(lc1, Lightcurve) + is_lc_iter = isinstance(lc1, Iterator) + is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter # A way to say that this is actually not a power spectrum - if lc1 is not lc2 and isinstance(lc1, Lightcurve): + if self.type != "powerspectrum" and \ + (lc1 is not lc2) and (is_event or is_lc or is_lc_list): self.pds1 = AveragedCrossspectrum(lc1, lc1, segment_size=self.segment_size, - norm='none', gti=lc1.gti, - power_type=self.power_type) + norm='none', gti=self.gti, + power_type=self.power_type, + dt=self.dt) self.pds2 = AveragedCrossspectrum(lc2, lc2, segment_size=self.segment_size, - norm='none', gti=lc2.gti, - power_type=self.power_type) + norm='none', gti=self.gti, + power_type=self.power_type, + dt=self.dt) def _make_segment_spectrum(self, lc1, lc2, segment_size): """ @@ -1045,16 +1138,18 @@ def _make_segment_spectrum(self, lc1, lc2, segment_size): # In case a small difference exists, ignore it lc1.dt = lc2.dt - gti = cross_two_gtis(lc1.gti, lc2.gti) + current_gtis = cross_two_gtis(lc1.gti, lc2.gti) + lc1.gti = lc2.gti = current_gtis lc1.apply_gtis() lc2.apply_gtis() + if self.gti is None: - self.gti = gti + self.gti = current_gtis else: - if not np.all(self.gti == gti): - self.gti = np.vstack([self.gti, gti]) + if not np.all(self.gti == current_gtis): + self.gti = np.vstack([self.gti, current_gtis]) - check_gtis(self.gti) + check_gtis(current_gtis) cs_all = [] nphots1_all = [] @@ -1062,7 +1157,7 @@ def _make_segment_spectrum(self, lc1, lc2, segment_size): start_inds, end_inds = \ - bin_intervals_from_gtis(gti, segment_size, lc1.time, + bin_intervals_from_gtis(current_gtis, segment_size, lc1.time, dt=lc1.dt) simon("Errorbars on cross spectra are not thoroughly tested. " "Please report any inconsistencies.") @@ -1073,13 +1168,12 @@ def _make_segment_spectrum(self, lc1, lc2, segment_size): for start_ind, end_ind in \ local_show_progress(zip(start_inds, end_inds)): - time_1 = lc1.time[start_ind:end_ind] - counts_1 = lc1.counts[start_ind:end_ind] - counts_1_err = lc1.counts_err[start_ind:end_ind] - time_2 = lc2.time[start_ind:end_ind] - counts_2 = lc2.counts[start_ind:end_ind] - counts_2_err = lc2.counts_err[start_ind:end_ind] - + time_1 = copy.deepcopy(lc1.time[start_ind:end_ind]) + counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind]) + counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind]) + time_2 = copy.deepcopy(lc2.time[start_ind:end_ind]) + counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind]) + counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind]) if np.sum(counts_1) == 0 or np.sum(counts_2) == 0: warnings.warn( "No counts in interval {}--{}s".format(time_1[0], @@ -1098,9 +1192,9 @@ def _make_segment_spectrum(self, lc1, lc2, segment_size): err_dist=lc2.err_dist, gti=gti2, dt=lc2.dt, skip_checks=True) - with warnings.catch_warnings(record=True) as w: - cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm, - power_type=self.power_type) + # with warnings.catch_warnings(record=True) as w: + cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm, + power_type=self.power_type) cs_all.append(cs_seg) nphots1_all.append(np.sum(lc1_seg.counts)) @@ -1120,6 +1214,9 @@ def _make_crossspectrum(self, lc1, lc2): lc1, lc2 : :class:`stingray.Lightcurve` objects Two light curves used for computing the cross spectrum. """ + local_show_progress = show_progress + if not self.show_progress: + local_show_progress = lambda a: a # chop light curves into segments if isinstance(lc1, Lightcurve) and \ @@ -1139,8 +1236,7 @@ def _make_crossspectrum(self, lc1, lc2): else: self.cs_all, nphots1_all, nphots2_all = [], [], [] - for lc1_seg, lc2_seg in zip(lc1, lc2): - + for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)): if self.type == "crossspectrum": cs_sep, nphots1_sep, nphots2_sep = \ self._make_segment_spectrum(lc1_seg, lc2_seg, @@ -1152,7 +1248,6 @@ def _make_crossspectrum(self, lc1, lc2): else: raise ValueError("Type of spectrum not recognized!") - self.cs_all.append(cs_sep) nphots1_all.append(nphots1_sep) diff --git a/stingray/events.py b/stingray/events.py index 977fc562b..8c998d4a5 100644 --- a/stingray/events.py +++ b/stingray/events.py @@ -133,6 +133,34 @@ def to_lc(self, dt, tstart=None, tseg=None): gti=self.gti, tseg=tseg, mjdref=self.mjdref) + + def to_lc_list(self, dt): + """Convert event list to a generator of Lightcurves. + + Parameters + ---------- + dt: float + Binning time of the light curves + + Returns + ------- + lc_gen: generator + Generates one :class:`stingray.Lightcurve` object for each GTI + """ + start_times = self.gti[:, 0] + end_times = self.gti[:, 1] + tsegs = end_times - start_times + + for st, end, tseg in zip(start_times, end_times, tsegs): + idx_st = np.searchsorted(self.time, st, side='right') + idx_end = np.searchsorted(self.time, end, side='left') + lc = Lightcurve.make_lightcurve(self.time[idx_st:idx_end], dt, + tstart=st, + gti=np.array([[st, end]]), + tseg=tseg, + mjdref=self.mjdref) + yield lc + @staticmethod def from_lc(lc): """ diff --git a/stingray/powerspectrum.py b/stingray/powerspectrum.py index c598ad9ac..359596cd5 100755 --- a/stingray/powerspectrum.py +++ b/stingray/powerspectrum.py @@ -1,15 +1,18 @@ import warnings + import numpy as np import scipy -import scipy.stats import scipy.fftpack import scipy.optimize +import scipy.stats import stingray.lightcurve as lightcurve import stingray.utils as utils -from stingray.gti import bin_intervals_from_gtis, check_gtis from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum +from stingray.gti import bin_intervals_from_gtis, check_gtis from stingray.stats import pds_probability +from .events import EventList +from .gti import cross_two_gtis try: from tqdm import tqdm as show_progress @@ -32,7 +35,7 @@ class Powerspectrum(Crossspectrum): Parameters ---------- - lc: :class:`stingray.Lightcurve` object, optional, default ``None`` + data: :class:`stingray.Lightcurve` object, optional, default ``None`` The light curve data to be Fourier-transformed. norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac`` @@ -78,9 +81,18 @@ class Powerspectrum(Crossspectrum): The total number of photons in the light curve """ - def __init__(self, lc=None, norm='frac', gti=None): - Crossspectrum.__init__(self, lc1=lc, lc2=lc, norm=norm, gti=gti) + def __init__(self, data=None, norm="frac", gti=None, + dt=None, lc=None): + if lc is not None: + warnings.warn("The lc keyword is now deprecated. Use data " + "instead", DeprecationWarning) + if data is None: + data = lc + + Crossspectrum.__init__(self, data1=data, data2=data, norm=norm, gti=gti, + dt=dt) self.nphots = self.nphots1 + self.dt = dt def rebin(self, df=None, f=None, method="mean"): """ @@ -278,7 +290,7 @@ class AveragedPowerspectrum(AveragedCrossspectrum, Powerspectrum): Parameters ---------- - lc: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects + data: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object The light curve data to be Fourier-transformed. segment_size: float @@ -302,6 +314,11 @@ class AveragedPowerspectrum(AveragedCrossspectrum, Powerspectrum): Do not show a progress bar when generating an averaged cross spectrum. Useful for the batch execution of many spectra + dt: float + The time resolution of the light curve. Only needed when constructing + light curves in the case where data is of :class:EventList + + Attributes ---------- norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` } @@ -334,26 +351,39 @@ class AveragedPowerspectrum(AveragedCrossspectrum, Powerspectrum): The total number of photons in the light curve """ - def __init__(self, lc=None, segment_size=None, norm="frac", gti=None, - silent=False): + def __init__(self, data=None, segment_size=None, norm="frac", gti=None, + silent=False, dt=None, lc=None): self.type = "powerspectrum" - - if segment_size is None and lc is not None: + if lc is not None: + warnings.warn("The lc keyword is now deprecated. Use data " + "instead", DeprecationWarning) + # Backwards compatibility: user might have supplied lc instead + if data is None: + data = lc + + if segment_size is None and data is not None: raise ValueError("segment_size must be specified") if segment_size is not None and not np.isfinite(segment_size): raise ValueError("segment_size must be finite!") + self.dt = dt + + if isinstance(data, EventList): + lengths = data.gti[:, 1] - data.gti[:, 0] + good = lengths >= segment_size + data.gti = data.gti[good] + self.segment_size = segment_size self.show_progress = not silent - Powerspectrum.__init__(self, lc, norm, gti=gti) + Powerspectrum.__init__(self, data, norm, gti=gti, dt=dt) return def _make_segment_spectrum(self, lc, segment_size): """ - Split the light curves into segments of size ``segment_size``, and calculate a power spectrum for - each. + Split the light curves into segments of size ``segment_size``, and + calculate a power spectrum for each. Parameters ---------- @@ -374,6 +404,8 @@ def _make_segment_spectrum(self, lc, segment_size): if not isinstance(lc, lightcurve.Lightcurve): raise TypeError("lc must be a lightcurve.Lightcurve object") + current_gtis = lc.gti + if self.gti is None: self.gti = lc.gti else: @@ -383,7 +415,7 @@ def _make_segment_spectrum(self, lc, segment_size): check_gtis(self.gti) start_inds, end_inds = \ - bin_intervals_from_gtis(lc.gti, segment_size, lc.time, dt=lc.dt) + bin_intervals_from_gtis(current_gtis, segment_size, lc.time, dt=lc.dt) power_all = [] nphots_all = [] @@ -472,7 +504,7 @@ class DynamicalPowerspectrum(AveragedPowerspectrum): The time resolution """ - def __init__(self, lc, segment_size, norm="frac", gti=None): + def __init__(self, lc, segment_size, norm="frac", gti=None, dt=None): if segment_size < 2 * lc.dt: raise ValueError("Length of the segment is too short to form a " "light curve!") @@ -481,7 +513,7 @@ def __init__(self, lc, segment_size, norm="frac", gti=None): "any segments of the light curve!") AveragedPowerspectrum.__init__(self, lc=lc, segment_size=segment_size, norm=norm, - gti=gti) + gti=gti, dt=dt) self._make_matrix(lc) def _make_matrix(self, lc): @@ -498,9 +530,13 @@ def _make_matrix(self, lc): self.dyn_ps = np.array([ps.power for ps in ps_all]).T self.freq = ps_all[0].freq + current_gti = lc.gti + if self.gti is not None: + current_gti = cross_two_gtis(self.gti, current_gti) start_inds, end_inds = \ - bin_intervals_from_gtis(self.gti, self.segment_size, lc.time, dt=lc.dt) + bin_intervals_from_gtis(current_gti, self.segment_size, lc.time, + dt=lc.dt) tstart = lc.time[start_inds] diff --git a/stingray/tests/test_crossspectrum.py b/stingray/tests/test_crossspectrum.py index 60a01f83f..3c0bacc55 100644 --- a/stingray/tests/test_crossspectrum.py +++ b/stingray/tests/test_crossspectrum.py @@ -4,13 +4,13 @@ import warnings import matplotlib.pyplot as plt import scipy.special -from stingray import Lightcurve, AveragedPowerspectrum +from stingray import Lightcurve from stingray import Crossspectrum, AveragedCrossspectrum, coherence, time_lag from stingray.crossspectrum import cospectra_pvalue, normalize_crossspectrum -from ..simulator.simulator import Simulator from stingray import StingrayError from ..simulator.simulator import Simulator +from stingray.events import EventList import copy np.random.seed(20160528) @@ -120,6 +120,100 @@ def test_sixty_spectra(self): assert np.isclose(cospectra_pvalue(power, nspec), pval_theory) +class TestAveragedCrossspectrumEvents(object): + + def setup_class(self): + tstart = 0.0 + tend = 1.0 + self.dt = np.longdouble(0.0001) + + times1 = np.sort(np.random.uniform(tstart, tend, 1000)) + times2 = np.sort(np.random.uniform(tstart, tend, 1000)) + gti = np.array([[tstart, tend]]) + + self.events1 = EventList(times1, gti=gti) + self.events2 = EventList(times2, gti=gti) + + self.cs = Crossspectrum(self.events1, self.events2, dt=self.dt) + + self.acs = AveragedCrossspectrum(self.events1, self.events2, + segment_size=1, dt=self.dt) + self.lc1, self.lc2 = self.events1, self.events2 + + def test_it_works_with_events(self): + lc1 = self.events1.to_lc(self.dt) + lc2 = self.events2.to_lc(self.dt) + lccs = Crossspectrum(lc1, lc2) + assert np.allclose(lccs.power, self.cs.power) + + def test_no_segment_size(self): + with pytest.raises(ValueError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, dt=self.dt) + + def test_init_with_norm_not_str(self): + with pytest.raises(TypeError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, segment_size=1, + norm=1, dt=self.dt) + + def test_init_with_invalid_norm(self): + with pytest.raises(ValueError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, segment_size=1, + norm='frabs', dt=self.dt) + + def test_init_with_inifite_segment_size(self): + with pytest.raises(ValueError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, + segment_size=np.inf, dt=self.dt) + + def test_coherence(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + coh = self.acs.coherence() + + assert len(coh[0]) == 4999 + assert len(coh[1]) == 4999 + assert issubclass(w[-1].category, UserWarning) + + def test_failure_when_normalization_not_recognized(self): + with pytest.raises(ValueError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, + segment_size=1, + norm="wrong", dt=self.dt) + + def test_failure_when_power_type_not_recognized(self): + with pytest.raises(ValueError): + cs = AveragedCrossspectrum(self.lc1, self.lc2, + segment_size=1, + power_type="wrong", dt=self.dt) + + def test_rebin(self): + new_cs = self.acs.rebin(df=1.5) + assert new_cs.df == 1.5 + new_cs.time_lag() + + def test_rebin_factor(self): + new_cs = self.acs.rebin(f=1.5) + assert new_cs.df == self.acs.df * 1.5 + new_cs.time_lag() + + def test_rebin_log(self): + # For now, just verify that it doesn't crash + new_cs = self.acs.rebin_log(f=0.1) + assert type(new_cs) == type(self.acs) + new_cs.time_lag() + + def test_rebin_log_returns_complex_values(self): + # For now, just verify that it doesn't crash + new_cs = self.acs.rebin_log(f=0.1) + assert isinstance(new_cs.power[0], np.complex) + + def test_rebin_log_returns_complex_errors(self): + # For now, just verify that it doesn't crash + new_cs = self.acs.rebin_log(f=0.1) + assert isinstance(new_cs.power_err[0], np.complex) + + class TestCoherenceFunction(object): def setup_class(self): @@ -275,6 +369,15 @@ def setup_class(self): with pytest.warns(UserWarning) as record: self.cs = Crossspectrum(self.lc1, self.lc2) + def test_lc_keyword_deprecation(self): + cs1 = Crossspectrum(self.lc1, self.lc2) + with pytest.warns(DeprecationWarning) as record: + cs2 = Crossspectrum(lc1=self.lc1, lc2=self.lc2) + assert np.any(['lcN keywords' in r.message.args[0] + for r in record]) + assert np.allclose(cs1.power, cs2.power) + assert np.allclose(cs1.freq, cs2.freq) + def test_make_empty_crossspectrum(self): cs = Crossspectrum() assert cs.freq is None @@ -370,7 +473,7 @@ def test_rebin_log(self): def test_norm_abs(self): # Testing for a power spectrum of lc1 - cs = Crossspectrum(lc1=self.lc1, lc2=self.lc1, norm='abs') + cs = Crossspectrum(self.lc1, self.lc1, norm='abs') assert len(cs.power) == 4999 assert cs.norm == 'abs' abs_noise = 2. * self.rate1 # expected Poisson noise level @@ -378,7 +481,8 @@ def test_norm_abs(self): def test_norm_leahy(self): with pytest.warns(UserWarning) as record: - cs = Crossspectrum(lc1=self.lc1, lc2=self.lc1, norm='leahy') + cs = Crossspectrum(self.lc1, self.lc1, + norm='leahy') assert len(cs.power) == 4999 assert cs.norm == 'leahy' leahy_noise = 2.0 # expected Poisson noise level @@ -518,6 +622,17 @@ def setup_class(self): with pytest.warns(UserWarning) as record: self.cs = AveragedCrossspectrum(self.lc1, self.lc2, segment_size=1) + def test_lc_keyword_deprecation(self): + cs1 = AveragedCrossspectrum(data1=self.lc1, data2=self.lc2, + segment_size=1) + with pytest.warns(DeprecationWarning) as record: + cs2 = AveragedCrossspectrum(lc1=self.lc1, lc2=self.lc2, + segment_size=1) + assert np.any(['lcN keywords' in r.message.args[0] + for r in record]) + assert np.allclose(cs1.power, cs2.power) + assert np.allclose(cs1.freq, cs2.freq) + def test_make_empty_crossspectrum(self): cs = AveragedCrossspectrum() assert cs.freq is None @@ -557,12 +672,13 @@ def test_invalid_type_attribute_with_multiple_lcs(self): [self.lc2, self.lc1], segment_size=1) acs_test.type = 'invalid_type' - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: assert AveragedCrossspectrum._make_crossspectrum(acs_test, - lc1=[self.lc1, - self.lc2], - lc2=[self.lc2, + [self.lc1, + self.lc2], + [self.lc2, self.lc1]) + assert "Type of spectrum not recognized" in str(excinfo.value) def test_different_dt(self): time1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] @@ -622,13 +738,14 @@ def test_rebin_with_invalid_type_attribute(self): aps = AveragedCrossspectrum(lc1=self.lc1, lc2=self.lc2, segment_size=1, norm='leahy') aps.type = 'invalid_type' - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: assert aps.rebin(df=new_df, method=aps.type) + assert "Method for summing or averaging not recognized. " in str(excinfo.value) def test_rebin_with_valid_type_attribute(self): new_df = 2 with pytest.warns(UserWarning) as record: - aps = AveragedCrossspectrum(lc1=self.lc1, lc2=self.lc2, + aps = AveragedCrossspectrum(self.lc1, self.lc2, segment_size=1, norm='leahy') assert aps.rebin(df=new_df) @@ -745,6 +862,12 @@ def test_timelag(self): dt = 0.1 simulator = Simulator(dt, 10000, rms=0.2, mean=1000) test_lc1 = simulator.simulate(2) + test_lc1.counts -= np.min(test_lc1.counts) + + test_lc1 = Lightcurve(test_lc1.time, + test_lc1.counts, + err_dist=test_lc1.err_dist, + dt=dt) test_lc2 = Lightcurve(test_lc1.time, np.array(np.roll(test_lc1.counts, 2)), err_dist=test_lc1.err_dist, diff --git a/stingray/tests/test_powerspectrum.py b/stingray/tests/test_powerspectrum.py index f9a97ce1e..8275d3bf1 100644 --- a/stingray/tests/test_powerspectrum.py +++ b/stingray/tests/test_powerspectrum.py @@ -4,12 +4,117 @@ from astropy.tests.helper import pytest from stingray import Lightcurve +from stingray.events import EventList from stingray import Powerspectrum, AveragedPowerspectrum, \ DynamicalPowerspectrum np.random.seed(20150907) +class TestAveragedPowerspectrumEvents(object): + @classmethod + def setup_class(cls): + tstart = 0.0 + tend = 10.0 + cls.dt = 0.0001 + cls.segment_size = tend - tstart + + times = np.sort(np.random.uniform(tstart, tend, 1000)) + gti = np.array([[tstart, tend]]) + + cls.events = EventList(times, gti=gti) + + cls.lc = cls.events + + def test_init(self): + AveragedPowerspectrum(self.lc, self.segment_size, dt=self.dt) + + def test_init_without_segment(self): + with pytest.raises(ValueError): + assert AveragedPowerspectrum(self.lc, dt=self.dt) + + def test_init_with_nonsense_segment(self): + segment_size = "foo" + with pytest.raises(TypeError): + assert AveragedPowerspectrum(self.lc, segment_size, dt=self.dt) + + def test_init_with_none_segment(self): + segment_size = None + with pytest.raises(ValueError): + assert AveragedPowerspectrum(self.lc, segment_size, dt=self.dt) + + def test_init_with_inf_segment(self): + segment_size = np.inf + with pytest.raises(ValueError): + assert AveragedPowerspectrum(self.lc, segment_size, dt=self.dt) + + def test_init_with_nan_segment(self): + segment_size = np.nan + with pytest.raises(ValueError): + assert AveragedPowerspectrum(self.lc, segment_size, dt=self.dt) + + @pytest.mark.parametrize('df', [2, 3, 5, 1.5, 1, 85]) + def test_rebin(self, df): + """ + TODO: Not sure how to write tests for the rebin method! + """ + + aps = AveragedPowerspectrum(self.lc, segment_size=self.segment_size, + norm="Leahy", dt=self.dt) + bin_aps = aps.rebin(df) + assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df, + atol=1e-4, rtol=1e-4) + assert np.isclose(bin_aps.freq[0], + (aps.freq[0]-aps.df*0.5+bin_aps.df*0.5), + atol=1e-4, rtol=1e-4) + + @pytest.mark.parametrize('f', [20, 30, 50, 15, 1, 850]) + def test_rebin_factor(self, f): + """ + TODO: Not sure how to write tests for the rebin method! + """ + + aps = AveragedPowerspectrum(self.lc, segment_size=1, + norm="Leahy", dt=self.dt) + bin_aps = aps.rebin(f=f) + assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df, + atol=1e-4, rtol=1e-4) + assert np.isclose(bin_aps.freq[0], + (aps.freq[0]-aps.df*0.5+bin_aps.df*0.5), + atol=1e-4, rtol=1e-4) + + @pytest.mark.parametrize('df', [0.01, 0.1]) + def test_rebin_log(self, df): + # For now, just verify that it doesn't crash + aps = AveragedPowerspectrum(self.lc, segment_size=1, + norm="Leahy", dt=self.dt) + bin_aps = aps.rebin_log(df) + + def test_rebin_with_invalid_type_attribute(self): + new_df = 2 + aps = AveragedPowerspectrum(self.lc, segment_size=1, + norm='leahy', dt=self.dt) + aps.type = 'invalid_type' + with pytest.raises(AttributeError): + assert aps.rebin(df=new_df) + + def test_leahy_correct_for_multiple(self): + + n = 10 + lc_all = [] + for i in range(n): + time = np.arange(0.0, 10.0, 10. / 10000) + counts = np.random.poisson(1000, size=time.shape[0]) + lc = Lightcurve(time, counts) + lc_all.append(lc) + + ps = AveragedPowerspectrum(lc_all, 1.0, norm="leahy") + + assert np.isclose(np.mean(ps.power), 2.0, atol=1e-2, rtol=1e-2) + assert np.isclose(np.std(ps.power), 2.0 / np.sqrt(n*10), atol=0.1, + rtol=0.1) + + class TestPowerspectrum(object): @classmethod def setup_class(cls): @@ -39,7 +144,7 @@ def test_make_empty_periodogram(self): assert ps.n is None def test_make_periodogram_from_lightcurve(self): - ps = Powerspectrum(lc=self.lc) + ps = Powerspectrum(self.lc) assert ps.freq is not None assert ps.power is not None assert ps.power_err is not None @@ -50,7 +155,7 @@ def test_make_periodogram_from_lightcurve(self): assert ps.nphots == np.sum(self.lc.counts) def test_periodogram_types(self): - ps = Powerspectrum(lc=self.lc) + ps = Powerspectrum(self.lc) assert isinstance(ps.freq, np.ndarray) assert isinstance(ps.power, np.ndarray) assert isinstance(ps.power_err, np.ndarray) @@ -85,7 +190,7 @@ def test_total_variance(self): Note: make sure the factors of ncounts match! Also, make sure to *exclude* the zeroth power! """ - ps = Powerspectrum(lc=self.lc) + ps = Powerspectrum(self.lc) nn = ps.n pp = ps.unnorm_power / np.float(nn) ** 2 p_int = np.sum(pp[:-1] * ps.df) + (pp[-1] * ps.df) / 2 @@ -97,7 +202,7 @@ def test_frac_normalization_is_standard(self): Make sure the standard normalization of a periodogram is rms and it stays that way! """ - ps = Powerspectrum(lc=self.lc) + ps = Powerspectrum(self.lc) assert ps.norm == "frac" def test_frac_normalization_correct(self): @@ -106,7 +211,7 @@ def test_frac_normalization_correct(self): equal to the variance of the light curve divided by the mean of the light curve squared. """ - ps = Powerspectrum(lc=self.lc, norm="frac") + ps = Powerspectrum(self.lc, norm="frac") ps_int = np.sum(ps.power[:-1] * ps.df) + ps.power[-1] * ps.df / 2 std_lc = np.var(self.lc.counts) / np.mean(self.lc.counts) ** 2 assert np.isclose(ps_int, std_lc, atol=0.01, rtol=0.01) @@ -119,11 +224,11 @@ def test_fractional_rms_in_frac_norm_is_consistent(self): lc = Lightcurve(time, counts=poisson_counts, dt=1, gti=[[0, 100]]) - ps = Powerspectrum(lc=lc, norm="leahy") + ps = Powerspectrum(lc, norm="leahy") rms_ps_l, rms_err_l = ps.compute_rms(min_freq=ps.freq[1], max_freq=ps.freq[-1], white_noise_offset=0) - ps = Powerspectrum(lc=lc, norm="frac") + ps = Powerspectrum(lc, norm="frac") rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1], max_freq=ps.freq[-1], white_noise_offset=0) assert np.allclose(rms_ps, rms_ps_l, atol=0.01) @@ -137,12 +242,12 @@ def test_fractional_rms_in_frac_norm_is_consistent_averaged(self): lc = Lightcurve(time, counts=poisson_counts, dt=1, gti=[[0, 400]]) - ps = AveragedPowerspectrum(lc=lc, norm="leahy", segment_size=100, + ps = AveragedPowerspectrum(lc, norm="leahy", segment_size=100, silent=True) rms_ps_l, rms_err_l = ps.compute_rms(min_freq=ps.freq[1], max_freq=ps.freq[-1], white_noise_offset=0) - ps = AveragedPowerspectrum(lc=lc, norm="frac", segment_size=100) + ps = AveragedPowerspectrum(lc, norm="frac", segment_size=100) rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1], max_freq=ps.freq[-1], white_noise_offset=0) assert np.allclose(rms_ps, rms_ps_l, atol=0.01) @@ -156,7 +261,7 @@ def test_fractional_rms_in_frac_norm(self): lc = Lightcurve(time, counts=poisson_counts, dt=1, gti=[[0, 400]]) - ps = AveragedPowerspectrum(lc=lc, norm="frac", segment_size=100) + ps = AveragedPowerspectrum(lc, norm="frac", segment_size=100) rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1], max_freq=ps.freq[-1], white_noise_offset=0) @@ -182,7 +287,7 @@ def test_leahy_norm_total_variance(self): powers multiplied by the number of counts and divided by the square of the number of data points in the light curve """ - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") ps_var = (np.sum(self.lc.counts) / ps.n ** 2.) * \ (np.sum(ps.power[:-1]) + ps.power[-1] / 2.) @@ -194,7 +299,7 @@ def test_fractional_rms_in_leahy_norm(self): deviation divided by the mean of the light curve. Therefore, we allow for a larger tolerance in np.isclose() """ - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0], max_freq=ps.freq[-1]) @@ -203,7 +308,7 @@ def test_fractional_rms_in_leahy_norm(self): def test_fractional_rms_fails_when_rms_not_leahy(self): with pytest.raises(Exception): - ps = Powerspectrum(lc=self.lc, norm="rms") + ps = Powerspectrum(self.lc, norm="rms") rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0], max_freq=ps.freq[-1]) @@ -232,7 +337,7 @@ def test_fractional_rms_error(self): pass def test_rebin_makes_right_attributes(self): - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") # replace powers ps.power = np.ones_like(ps.power) * 2.0 @@ -255,7 +360,7 @@ def test_rebin_uses_mean(self): Note: function defaults come as a tuple, so the first keyword argument had better be 'method' """ - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") assert ps.rebin.__defaults__[2] == "mean" @pytest.mark.parametrize('df', [2, 3, 5, 1.5, 1, 85]) @@ -263,7 +368,7 @@ def test_rebin(self, df): """ TODO: Not sure how to write tests for the rebin method! """ - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") bin_ps = ps.rebin(df) assert np.isclose(bin_ps.freq[1] - bin_ps.freq[0], bin_ps.df, atol=1e-4, rtol=1e-4) @@ -271,17 +376,27 @@ def test_rebin(self, df): (ps.freq[0] - ps.df * 0.5 + bin_ps.df * 0.5), atol=1e-4, rtol=1e-4) + def test_lc_keyword_deprecation(self): + cs1 = Powerspectrum(self.lc, norm="Leahy") + with pytest.warns(DeprecationWarning) as record: + cs2 = Powerspectrum(lc=self.lc, norm="Leahy") + assert np.any(['lc keyword' in r.message.args[0] + for r in record]) + assert np.allclose(cs1.power, cs2.power) + assert np.allclose(cs1.freq, cs2.freq) + + def test_classical_significances_runs(self): - ps = Powerspectrum(lc=self.lc, norm="Leahy") + ps = Powerspectrum(self.lc, norm="Leahy") ps.classical_significances() def test_classical_significances_fails_in_rms(self): - ps = Powerspectrum(lc=self.lc, norm="frac") + ps = Powerspectrum(self.lc, norm="frac") with pytest.raises(ValueError): ps.classical_significances() def test_classical_significances_threshold(self): - ps = Powerspectrum(lc=self.lc, norm="leahy") + ps = Powerspectrum(self.lc, norm="leahy") # change the powers so that just one exceeds the threshold ps.power = np.zeros_like(ps.power) + 2.0 @@ -297,7 +412,7 @@ def test_classical_significances_threshold(self): assert pval[1, 0] == index def test_classical_significances_trial_correction(self): - ps = Powerspectrum(lc=self.lc, norm="leahy") + ps = Powerspectrum(self.lc, norm="leahy") # change the powers so that just one exceeds the threshold ps.power = np.zeros_like(ps.power) + 2.0 index = 1 @@ -309,7 +424,7 @@ def test_classical_significances_trial_correction(self): def test_classical_significances_with_logbinned_psd(self): - ps = Powerspectrum(lc=self.lc, norm="leahy") + ps = Powerspectrum(self.lc, norm="leahy") ps_log = ps.rebin_log() pval = ps_log.classical_significances(threshold=1.1, trial_correction=False) @@ -317,7 +432,7 @@ def test_classical_significances_with_logbinned_psd(self): assert len(pval[0]) == len(ps_log.power) def test_pvals_is_numpy_array(self): - ps = Powerspectrum(lc=self.lc, norm="leahy") + ps = Powerspectrum(self.lc, norm="leahy") # change the powers so that just one exceeds the threshold ps.power = np.zeros_like(ps.power) + 2.0 @@ -357,6 +472,15 @@ def test_one_segment(self): ps = AveragedPowerspectrum(self.lc, segment_size) assert np.isclose(ps.segment_size, segment_size) + def test_lc_keyword_deprecation(self): + cs1 = AveragedPowerspectrum(self.lc, segment_size=self.lc.tseg) + with pytest.warns(DeprecationWarning) as record: + cs2 = AveragedPowerspectrum(lc=self.lc, segment_size=self.lc.tseg) + assert np.any(['lc keyword' in r.message.args[0] + for r in record]) + assert np.allclose(cs1.power, cs2.power) + assert np.allclose(cs1.freq, cs2.freq) + def test_no_counts_warns(self): newlc = copy.deepcopy(self.lc) newlc.counts[:newlc.counts.size // 2] = \ @@ -463,7 +587,7 @@ def test_rebin(self, df): TODO: Not sure how to write tests for the rebin method! """ - aps = AveragedPowerspectrum(lc=self.lc, segment_size=1, + aps = AveragedPowerspectrum(self.lc, segment_size=1, norm="Leahy") bin_aps = aps.rebin(df) assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df, @@ -478,7 +602,7 @@ def test_rebin_factor(self, f): TODO: Not sure how to write tests for the rebin method! """ - aps = AveragedPowerspectrum(lc=self.lc, segment_size=1, + aps = AveragedPowerspectrum(self.lc, segment_size=1, norm="Leahy") bin_aps = aps.rebin(f=f) assert np.isclose(bin_aps.freq[1]-bin_aps.freq[0], bin_aps.df, @@ -490,13 +614,13 @@ def test_rebin_factor(self, f): @pytest.mark.parametrize('df', [0.01, 0.1]) def test_rebin_log(self, df): # For now, just verify that it doesn't crash - aps = AveragedPowerspectrum(lc=self.lc, segment_size=1, + aps = AveragedPowerspectrum(self.lc, segment_size=1, norm="Leahy") bin_aps = aps.rebin_log(df) def test_rebin_with_invalid_type_attribute(self): new_df = 2 - aps = AveragedPowerspectrum(lc=self.lc, segment_size=1, + aps = AveragedPowerspectrum(self.lc, segment_size=1, norm='leahy') aps.type = 'invalid_type' with pytest.raises(AttributeError): diff --git a/stingray/utils.py b/stingray/utils.py index 7071e2fc1..4f9b42d69 100644 --- a/stingray/utils.py +++ b/stingray/utils.py @@ -53,6 +53,12 @@ def __call__(self, func): def prange(x): return range(x) +try: + from tqdm import tqdm as show_progress +except ImportError: + def show_progress(a): + return a + try: from statsmodels.robust import mad as mad # pylint: disable=unused-import except ImportError: