diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7c1a92c10..ecf83e1e5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -32,7 +32,7 @@ Bug Fixes - Various bug fixes in DynamicalPowerspectrum, on event loading and time rebinning (`#779 `__) - Fix issue with the Poisson noise calculation in lag spectra, that produced NaN errors under some conditions (`#789 `__) - Fix rms computation and error bars (`#792 `__) - +- Fix issue with ``Powerspectrum`` of a single light curve (`#663 `__) Breaking Changes ^^^^^^^^^^^^^^^^ diff --git a/docs/notebooks b/docs/notebooks index 448287755..ccc491197 160000 --- a/docs/notebooks +++ b/docs/notebooks @@ -1 +1 @@ -Subproject commit 448287755a373496cd5f81ae6ee904d3de0f4561 +Subproject commit ccc491197b11b018304ca4397af7968a6111a2bb diff --git a/setup.cfg b/setup.cfg index e351c44c1..82852c01f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -115,6 +115,7 @@ filterwarnings = ignore:.*is_categorical_dtype is deprecated.*:DeprecationWarning ignore:.*datetime.datetime.utcfromtimestamp.*:DeprecationWarning ignore:.*__array_wrap__ must accept context and return_scalar arguments:DeprecationWarning + ignore:.*Pyarrow: ;addopts = --disable-warnings diff --git a/stingray/crossspectrum.py b/stingray/crossspectrum.py index 75d40950a..5a6d84867 100644 --- a/stingray/crossspectrum.py +++ b/stingray/crossspectrum.py @@ -16,7 +16,7 @@ from .gti import cross_two_gtis, time_intervals_from_gtis from .lightcurve import Lightcurve from .fourier import avg_cs_from_iterables, error_on_averaged_cross_spectrum -from .fourier import avg_cs_from_events, poisson_level +from .fourier import avg_cs_from_timeseries, poisson_level from .fourier import normalize_periodograms, raw_coherence from .fourier import get_flux_iterable_from_segments, power_color from .fourier import get_rms_from_unnorm_periodogram @@ -1566,6 +1566,27 @@ def _initialize_from_any_input( ``crossspectrum_from_XXXX`` function, and initialize ``self`` with the correct attributes. """ + if segment_size is None and isinstance(data1, StingrayObject): + common_gti = cross_two_gtis(data1.gti, data2.gti) + data1.gti = common_gti + data2.gti = common_gti + data1 = data1.apply_gtis(inplace=False) + data2 = data2.apply_gtis(inplace=False) + + data1_is_binned = ( + "counts" in data1.array_attrs() or "_counts" in data1.internal_array_attrs() + ) + data2_is_binned = ( + "counts" in data2.array_attrs() or "_counts" in data2.internal_array_attrs() + ) + + if data1_is_binned and data2_is_binned: + assert data2.time.size == data1.time.size and np.allclose( + data2.time - data1.time, 0 + ), "Time arrays are not the same" + elif data1_is_binned or data2_is_binned: + raise ValueError("Please use input data of the same kind") + if isinstance(data1, EventList): spec = crossspectrum_from_events( data1, @@ -2444,7 +2465,7 @@ def crossspectrum_from_time_array( force_averaged = segment_size is not None # Suppress progress bar for single periodogram silent = silent or (segment_size is None) - results = avg_cs_from_events( + results = avg_cs_from_timeseries( times1, times2, gti, @@ -2684,7 +2705,7 @@ def crossspectrum_from_timeseries( err1 = getattr(ts1, error_flux_attr) err2 = getattr(ts2, error_flux_attr) - results = avg_cs_from_events( + results = avg_cs_from_timeseries( ts1.time, ts2.time, gti, diff --git a/stingray/fourier.py b/stingray/fourier.py index 52d3e0463..ce0b29438 100644 --- a/stingray/fourier.py +++ b/stingray/fourier.py @@ -12,7 +12,14 @@ generate_indices_of_segment_boundaries_binned, generate_indices_of_segment_boundaries_unbinned, ) -from .utils import fft, fftfreq, histogram, show_progress, sum_if_not_none_or_initialize +from .utils import ( + fft, + fftfreq, + histogram, + show_progress, + sum_if_not_none_or_initialize, + fix_segment_size_to_integer_samples, +) def integrate_power_in_frequency_range( @@ -1497,7 +1504,7 @@ def get_flux_iterable_from_segments( gti : [[gti00, gti01], [gti10, gti11], ...] good time intervals segment_size : float - length of segments + length of segments. If ``None``, the full light curve is used. Other parameters ---------------- @@ -1527,12 +1534,20 @@ def get_flux_iterable_from_segments( cast_kind = float if dt is None and binned: dt = np.median(np.diff(times[:100])) + if binned: fluxes = np.asarray(fluxes) if np.iscomplexobj(fluxes): cast_kind = complex - fun = _which_segment_idx_fun(binned, dt) + if segment_size is None: + segment_size = gti[-1, 1] - gti[0, 0] + + def fun(times, gti, segment_size): + return [[gti[0, 0], gti[-1, 1], 0, times.size]] + + else: + fun = _which_segment_idx_fun(binned, dt) for s, e, idx0, idx1 in fun(times, gti, segment_size): if idx1 - idx0 < 2: @@ -2228,7 +2243,15 @@ def local_show_progress(a): return results -def avg_pds_from_events( +def avg_pds_from_events(*args, **kwargs): + warnings.warn( + "avg_pds_from_events is deprecated, use avg_cs_from_timeseries instead", DeprecationWarning + ) + + return avg_pds_from_timeseries(*args, **kwargs) + + +def avg_pds_from_timeseries( times, gti, segment_size, @@ -2256,7 +2279,7 @@ def avg_pds_from_events( gti : [[gti00, gti01], [gti10, gti11], ...] Good time intervals. segment_size : float - Length of segments. + Length of segments. If ``None``, the full light curve is used. dt : float Time resolution of the light curves used to produce periodograms. @@ -2294,13 +2317,14 @@ def avg_pds_from_events( mean : float the mean flux """ - if segment_size is None: - segment_size = gti.max() - gti.min() - n_bin = int(segment_size / dt) - if fluxes is None: - dt = segment_size / n_bin + binned = fluxes is not None + if segment_size is not None: + segment_size, n_bin = fix_segment_size_to_integer_samples(segment_size, dt) + elif binned and segment_size is None: + n_bin = fluxes.size else: - segment_size = n_bin * dt + _, n_bin = fix_segment_size_to_integer_samples(gti.max() - gti.min(), dt) + flux_iterable = get_flux_iterable_from_segments( times, gti, segment_size, n_bin, dt=dt, fluxes=fluxes, errors=errors ) @@ -2317,7 +2341,14 @@ def avg_pds_from_events( return cross -def avg_cs_from_events( +def avg_cs_from_events(*args, **kwargs): + warnings.warn( + "avg_cs_from_events is deprecated, use avg_cs_from_timeseries instead", DeprecationWarning + ) + return avg_cs_from_timeseries(*args, **kwargs) + + +def avg_cs_from_timeseries( times1, times2, gti, @@ -2353,7 +2384,7 @@ def avg_cs_from_events( gti : [[gti00, gti01], [gti10, gti11], ...] common good time intervals segment_size : float - length of segments + length of segments. If ``None``, the full light curve is used. dt : float Time resolution of the light curves used to produce periodograms @@ -2398,20 +2429,21 @@ def avg_cs_from_events( n_ave : int the number of averaged periodograms """ - if segment_size is None: - segment_size = gti.max() - gti.min() - n_bin = int(segment_size / dt) - # adjust dt - # dt = segment_size / n_bin - if fluxes1 is None and fluxes2 is None: - dt = segment_size / n_bin + + binned = fluxes1 is not None and fluxes2 is not None + + if segment_size is not None: + segment_size, n_bin = fix_segment_size_to_integer_samples(segment_size, dt) + elif binned and segment_size is None: + n_bin = fluxes1.size else: - segment_size = n_bin * dt + _, n_bin = fix_segment_size_to_integer_samples(gti.max() - gti.min(), dt) + flux_iterable1 = get_flux_iterable_from_segments( - times1, gti, segment_size, n_bin, dt=dt, fluxes=fluxes1, errors=errors1 + times1, gti, segment_size, n_bin=n_bin, dt=dt, fluxes=fluxes1, errors=errors1 ) flux_iterable2 = get_flux_iterable_from_segments( - times2, gti, segment_size, n_bin, dt=dt, fluxes=fluxes2, errors=errors2 + times2, gti, segment_size, n_bin=n_bin, dt=dt, fluxes=fluxes2, errors=errors2 ) is_events = np.all([val is None for val in (fluxes1, fluxes2, errors1, errors2)]) diff --git a/stingray/powerspectrum.py b/stingray/powerspectrum.py index fe2fb2b2c..4fd262437 100755 --- a/stingray/powerspectrum.py +++ b/stingray/powerspectrum.py @@ -14,7 +14,7 @@ from .lightcurve import Lightcurve from .fourier import avg_pds_from_iterable, unnormalize_periodograms -from .fourier import avg_pds_from_events +from .fourier import avg_pds_from_timeseries from .fourier import get_flux_iterable_from_segments from .fourier import poisson_level from .fourier import get_rms_from_unnorm_periodogram @@ -1134,7 +1134,7 @@ def powerspectrum_from_time_array( force_averaged = segment_size is not None # Suppress progress bar for single periodogram silent = silent or (segment_size is None) - table = avg_pds_from_events( + table = avg_pds_from_timeseries( times, gti, segment_size, @@ -1267,7 +1267,7 @@ def powerspectrum_from_lightcurve( if gti is None: gti = lc.gti - table = avg_pds_from_events( + table = avg_pds_from_timeseries( lc.time, gti, segment_size, @@ -1347,7 +1347,7 @@ def powerspectrum_from_timeseries( if error_flux_attr is not None: err = getattr(ts, error_flux_attr) - results = avg_pds_from_events( + results = avg_pds_from_timeseries( ts.time, gti, segment_size, diff --git a/stingray/tests/test_crossspectrum.py b/stingray/tests/test_crossspectrum.py index ea5022faa..90f6741e6 100644 --- a/stingray/tests/test_crossspectrum.py +++ b/stingray/tests/test_crossspectrum.py @@ -178,6 +178,19 @@ def setup_class(self): ) self.lc1, self.lc2 = self.events1, self.events2 + def test_single_cs_of_lc_with_tight_gtis_does_not_crash(self): + tstart = 1.0 + tend = 10.0 + gti = [[1.0, 9.0]] + + time = np.linspace(tstart, tend, 10001) + + counts1 = np.random.poisson(10, size=time.shape[0]) + counts2 = np.random.poisson(10, size=time.shape[0]) + lc1 = Lightcurve(time, counts1, gti=gti) + lc2 = Lightcurve(time, counts2, gti=gti) + Crossspectrum(lc1, lc2, norm="leahy") + @pytest.mark.parametrize("norm", ["leahy", "frac", "abs", "none"]) def test_common_mean_gives_comparable_scatter(self, norm): acs = AveragedCrossspectrum( @@ -704,14 +717,22 @@ def test_init_with_wrong_lc2_instance(self): def test_make_crossspectrum_diff_lc_counts_shape(self): counts = np.array([1] * 10001) - time = np.linspace(0.0, 1.0001, 10001) - lc_ = Lightcurve(time, counts) + dt = 0.0001 + time = np.arange(0.0, 1.0001, dt) + lc_ = Lightcurve(time, counts, gti=[[time[0] - dt / 2, time[-1] + dt / 2]]) with pytest.warns(UserWarning, match="Lightcurves do not have same tseg"): - with pytest.raises( - AssertionError, match="No GTIs are equal to or longer than segment_size" - ): + with pytest.raises(AssertionError, match="Time arrays are not the same"): Crossspectrum(self.lc1, lc_) + def test_make_crossspectrum_lc_and_evts(self): + counts = np.array([1] * 10001) + dt = 0.0001 + time = np.arange(0.0, 1.0001, dt) + lc_ = Lightcurve(time, counts, gti=[[time[0] - dt / 2, time[-1] + dt / 2]]) + ev_ = EventList(time) + with pytest.raises(ValueError, match="Please use input data of the same kind"): + Crossspectrum(ev_, lc_, skip_checks=True) + def test_make_crossspectrum_diff_lc_stat(self): lc_ = copy.deepcopy(self.lc1) lc_.err_dist = "gauss" diff --git a/stingray/tests/test_fourier.py b/stingray/tests/test_fourier.py index 51b9b560e..860a41528 100644 --- a/stingray/tests/test_fourier.py +++ b/stingray/tests/test_fourier.py @@ -8,6 +8,8 @@ from stingray.fourier import fft, fftfreq, normalize_abs, normalize_frac, poisson_level from stingray.fourier import ( get_flux_iterable_from_segments, + avg_pds_from_timeseries, + avg_cs_from_timeseries, avg_pds_from_events, avg_cs_from_events, ) @@ -91,8 +93,20 @@ def test_avg_pds_imperfect_lc_size(): gti = np.asarray([[-0.5, 99.5]]) segment_size = 5.99 dt = 1 - res = avg_pds_from_events(times, gti, segment_size, dt, fluxes=fluxes) - assert res.meta["segment_size"] == 5 + res = avg_pds_from_timeseries(times, gti, segment_size, dt, fluxes=fluxes) + assert res.meta["segment_size"] == 6 + assert res.meta["dt"] == 1 + + +def test_avg_pds_from_events_warns(): + times = np.arange(100) + fluxes = np.ones(100).astype(float) + gti = np.asarray([[-0.5, 99.5]]) + segment_size = 5.99 + dt = 1 + with pytest.warns(DeprecationWarning, match="avg_pds_from_events is deprecated"): + res = avg_pds_from_events(times, gti, segment_size, dt, fluxes=fluxes) + assert res.meta["segment_size"] == 6 assert res.meta["dt"] == 1 @@ -103,10 +117,25 @@ def test_avg_cs_imperfect_lc_size(): gti = np.asarray([[-0.5, 99.5]]) segment_size = 5.99 dt = 1 - res = avg_cs_from_events( + res = avg_cs_from_timeseries( times1, times2, gti, segment_size, dt, fluxes1=fluxes1, fluxes2=fluxes2 ) - assert res.meta["segment_size"] == 5 + assert res.meta["segment_size"] == 6 + assert res.meta["dt"] == 1 + + +def test_avg_cs_from_events_warns(): + times1 = times2 = np.arange(100) + fluxes1 = np.ones(100).astype(float) + fluxes2 = np.ones(100).astype(float) + gti = np.asarray([[-0.5, 99.5]]) + segment_size = 5.99 + dt = 1 + with pytest.warns(DeprecationWarning, match="avg_cs_from_events is deprecated"): + res = avg_cs_from_events( + times1, times2, gti, segment_size, dt, fluxes1=fluxes1, fluxes2=fluxes2 + ) + assert res.meta["segment_size"] == 6 assert res.meta["dt"] == 1 @@ -244,7 +273,7 @@ def test_fts_from_segments_cts_and_events_are_equal(self): def test_avg_pds_bad_input(self): times = np.sort(rng.uniform(0, 1000, 1)) - out_ev = avg_pds_from_events(times, self.gti, self.segment_size, self.dt) + out_ev = avg_pds_from_timeseries(times, self.gti, self.segment_size, self.dt) assert out_ev is None @pytest.mark.parametrize("return_subcs", [True, False]) @@ -252,7 +281,7 @@ def test_avg_pds_bad_input(self): def test_avg_cs_bad_input(self, return_auxil, return_subcs): times1 = np.sort(rng.uniform(0, 1000, 1)) times2 = np.sort(rng.uniform(0, 1000, 1)) - out_ev = avg_cs_from_events( + out_ev = avg_cs_from_timeseries( times1, times2, self.gti, @@ -265,7 +294,7 @@ def test_avg_cs_bad_input(self, return_auxil, return_subcs): @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_pds_use_common_mean_similar_stats(self, norm): - out_comm = avg_pds_from_events( + out_comm = avg_pds_from_timeseries( self.times, self.gti, self.segment_size, @@ -275,7 +304,7 @@ def test_avg_pds_use_common_mean_similar_stats(self, norm): silent=True, fluxes=None, ) - out = avg_pds_from_events( + out = avg_pds_from_timeseries( self.times, self.gti, self.segment_size, @@ -289,7 +318,7 @@ def test_avg_pds_use_common_mean_similar_stats(self, norm): @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_cs_use_common_mean_similar_stats(self, norm): - out_comm = avg_cs_from_events( + out_comm = avg_cs_from_timeseries( self.times, self.times2, self.gti, @@ -300,7 +329,7 @@ def test_avg_cs_use_common_mean_similar_stats(self, norm): silent=True, return_subcs=True, ) - out = avg_cs_from_events( + out = avg_cs_from_timeseries( self.times, self.times2, self.gti, @@ -328,7 +357,7 @@ def test_avg_cs_use_common_mean_similar_stats(self, norm): @pytest.mark.parametrize("use_common_mean", [True, False]) @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_pds_cts_and_events_are_equal(self, norm, use_common_mean): - out_ev = avg_pds_from_events( + out_ev = avg_pds_from_timeseries( self.times, self.gti, self.segment_size, @@ -339,7 +368,7 @@ def test_avg_pds_cts_and_events_are_equal(self, norm, use_common_mean): fluxes=None, return_subcs=True, ) - out_ct = avg_pds_from_events( + out_ct = avg_pds_from_timeseries( self.bin_times, self.gti, self.segment_size, @@ -355,7 +384,7 @@ def test_avg_pds_cts_and_events_are_equal(self, norm, use_common_mean): @pytest.mark.parametrize("use_common_mean", [True, False]) @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_pds_cts_and_err_and_events_are_equal(self, norm, use_common_mean): - out_ev = avg_pds_from_events( + out_ev = avg_pds_from_timeseries( self.times, self.gti, self.segment_size, @@ -366,7 +395,7 @@ def test_avg_pds_cts_and_err_and_events_are_equal(self, norm, use_common_mean): fluxes=None, return_subcs=True, ) - out_ct = avg_pds_from_events( + out_ct = avg_pds_from_timeseries( self.bin_times, self.gti, self.segment_size, @@ -389,7 +418,7 @@ def test_avg_pds_cts_and_err_and_events_are_equal(self, norm, use_common_mean): @pytest.mark.parametrize("use_common_mean", [True, False]) @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_cs_cts_and_events_are_equal(self, norm, use_common_mean): - out_ev = avg_cs_from_events( + out_ev = avg_cs_from_timeseries( self.times, self.times2, self.gti, @@ -399,7 +428,7 @@ def test_avg_cs_cts_and_events_are_equal(self, norm, use_common_mean): use_common_mean=use_common_mean, silent=False, ) - out_ct = avg_cs_from_events( + out_ct = avg_cs_from_timeseries( self.bin_times, self.bin_times, self.gti, @@ -419,7 +448,7 @@ def test_avg_cs_cts_and_events_are_equal(self, norm, use_common_mean): @pytest.mark.parametrize("use_common_mean", [True, False]) @pytest.mark.parametrize("norm", ["frac", "abs", "none", "leahy"]) def test_avg_cs_cts_and_err_and_events_are_equal(self, norm, use_common_mean): - out_ev = avg_cs_from_events( + out_ev = avg_cs_from_timeseries( self.times, self.times2, self.gti, @@ -429,7 +458,7 @@ def test_avg_cs_cts_and_err_and_events_are_equal(self, norm, use_common_mean): use_common_mean=use_common_mean, silent=False, ) - out_ct = avg_cs_from_events( + out_ct = avg_cs_from_timeseries( self.bin_times, self.bin_times, self.gti, diff --git a/stingray/tests/test_powerspectrum.py b/stingray/tests/test_powerspectrum.py index cd1f6c045..13ddca437 100644 --- a/stingray/tests/test_powerspectrum.py +++ b/stingray/tests/test_powerspectrum.py @@ -357,6 +357,17 @@ def setup_class(cls): cls.lc = Lightcurve(time, counts=poisson_counts, dt=dt, gti=[[tstart, tend]]) + def test_single_ps_of_lc_with_tight_gtis_does_not_crash(self): + tstart = 1.0 + tend = 10.0 + gti = [[1.0, 9.0]] + + time = np.linspace(tstart, tend, 10000) + counts = np.random.poisson(10, size=time.shape[0]) + + lc = Lightcurve(time, counts, gti=gti) + Powerspectrum(lc, norm="leahy") + @pytest.mark.parametrize("skip_checks", [True, False]) def test_initialize_empty(self, skip_checks): cs = Powerspectrum(skip_checks=skip_checks) diff --git a/stingray/utils.py b/stingray/utils.py index 7f0b16312..d065d6611 100644 --- a/stingray/utils.py +++ b/stingray/utils.py @@ -970,6 +970,51 @@ def _als(y, lam, p, niter=10): return z +def fix_segment_size_to_integer_samples(segment_size, dt, tolerance=0.01): + """Fix segment size to an integer number of bins. + + In the most common case, it will be reduced to an integer number of bins, + approximating to the lower integer. However, when it is close to the next + integer, it will be approximated to the higher integer. + + Parameters + ---------- + segment_size : float + The segment size in seconds + dt : float + The sample time in seconds + + Other Parameters + ---------------- + tolerance : float + The tolerance to consider when approximating to the higher integer + + Returns + ------- + segment_size : float + The segment size in seconds, fixed to an integer number of bins + n_bin : int + The number of bins in the segment + + Examples + -------- + >>> seg, n = fix_segment_size_to_integer_samples(1.0, 0.1) + >>> assert seg == 1.0, n == 10 + >>> seg, n = fix_segment_size_to_integer_samples(0.999, 0.1) + >>> assert seg == 1.0, n == 10 + """ + n_bin_float = segment_size / dt + n_bin_down = np.floor(segment_size / dt) + n_bin_up = np.ceil(segment_size / dt) + n_bin = n_bin_down + + if n_bin_up - n_bin_float < tolerance: + n_bin = n_bin_up + + segment_size = n_bin * dt + return segment_size, int(n_bin) + + def baseline_als(x, y, lam=None, p=None, niter=10, return_baseline=False, offset_correction=False): """Baseline Correction with Asymmetric Least Squares Smoothing. diff --git a/stingray/varenergyspectrum.py b/stingray/varenergyspectrum.py index b1a401574..9b1d7a355 100644 --- a/stingray/varenergyspectrum.py +++ b/stingray/varenergyspectrum.py @@ -6,7 +6,12 @@ from stingray.lightcurve import Lightcurve from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress -from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate +from stingray.fourier import ( + avg_cs_from_timeseries, + avg_pds_from_timeseries, + fftfreq, + get_average_ctrate, +) from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance from abc import ABCMeta, abstractmethod @@ -530,7 +535,7 @@ def _spectrum_function(self): sub2_power_noise = poisson_level(norm="abs", meanrate=countrate_sub2) # Calculate the cross spectrum - results = avg_cs_from_events( + results = avg_cs_from_timeseries( sub_events, sub_events2, self.gti, @@ -550,7 +555,7 @@ def _spectrum_function(self): delta_nu_after_mean * np.sqrt(sub_power_noise * sub2_power_noise) ) else: - results = avg_pds_from_events( + results = avg_pds_from_timeseries( sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs" ) if results is None: @@ -804,7 +809,7 @@ def _spectrum_function(self): ref_events = self._get_times_from_energy_range(self.events2, self.ref_band[0]) # Calculate the PDS in the reference band. Needed to calculate errors. - results = avg_pds_from_events( + results = avg_pds_from_timeseries( ref_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="none" ) @@ -827,7 +832,7 @@ def _spectrum_function(self): # Extract the photon arrival times from the subject band sub_events = self._get_times_from_energy_range(self.events1, eint) - results_cross = avg_cs_from_events( + results_cross = avg_cs_from_timeseries( sub_events, ref_events, self.gti, @@ -837,7 +842,7 @@ def _spectrum_function(self): norm="none", ) - results_ps = avg_pds_from_events( + results_ps = avg_pds_from_timeseries( sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="none" ) @@ -982,7 +987,7 @@ def _spectrum_function(self): countrate_ref = get_average_ctrate(ref_events, self.gti, self.segment_size) ref_power_noise = poisson_level(norm="abs", meanrate=countrate_ref) - results = avg_pds_from_events( + results = avg_pds_from_timeseries( ref_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs" ) freq = results["freq"] @@ -1004,7 +1009,7 @@ def _spectrum_function(self): countrate_sub = get_average_ctrate(sub_events, self.gti, self.segment_size) sub_power_noise = poisson_level(norm="abs", meanrate=countrate_sub) - results_cross = avg_cs_from_events( + results_cross = avg_cs_from_timeseries( sub_events, ref_events, self.gti, @@ -1014,7 +1019,7 @@ def _spectrum_function(self): norm="abs", ) - results_ps = avg_pds_from_events( + results_ps = avg_pds_from_timeseries( sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs" )