diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index ead00cd778d7b..676757d8e095f 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -6,7 +6,6 @@ import operator import os import re -import string from sys import byteorder from typing import ( TYPE_CHECKING, @@ -109,7 +108,6 @@ from pandas.core.arrays import ArrowExtensionArray _N = 30 -_K = 4 UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"] UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"] @@ -341,10 +339,6 @@ def to_array(obj): # Others -def getCols(k) -> str: - return string.ascii_uppercase[:k] - - def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series: if nper is None: nper = _N @@ -355,16 +349,6 @@ def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series: ) -def getTimeSeriesData(nper=None, freq: Frequency = "B") -> dict[str, Series]: - return {c: makeTimeSeries(nper, freq) for c in getCols(_K)} - - -# make frame -def makeTimeDataFrame(nper=None, freq: Frequency = "B") -> DataFrame: - data = getTimeSeriesData(nper, freq) - return DataFrame(data) - - def makeCustomIndex( nentries, nlevels, @@ -887,7 +871,6 @@ def shares_memory(left, right) -> bool: "external_error_raised", "FLOAT_EA_DTYPES", "FLOAT_NUMPY_DTYPES", - "getCols", "get_cython_table_params", "get_dtype", "getitem", @@ -895,13 +878,11 @@ def shares_memory(left, right) -> bool: "get_finest_unit", "get_obj", "get_op_from_name", - "getTimeSeriesData", "iat", "iloc", "loc", "makeCustomDataframe", "makeCustomIndex", - "makeTimeDataFrame", "makeTimeSeries", "maybe_produces_warning", "NARROW_NP_DTYPES", diff --git a/pandas/conftest.py b/pandas/conftest.py index 9ed6f8f43ae03..6401d4b5981e0 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -550,7 +550,11 @@ def multiindex_year_month_day_dataframe_random_data(): DataFrame with 3 level MultiIndex (year, month, day) covering first 100 business days from 2000-01-01 with random data """ - tdf = tm.makeTimeDataFrame(100) + tdf = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() # use int64 Index, to make sure things work ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 99ea565e5b60c..e07024b2e2a09 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -7,7 +7,6 @@ NaT, date_range, ) -import pandas._testing as tm @pytest.fixture @@ -16,27 +15,12 @@ def datetime_frame() -> DataFrame: Fixture for DataFrame of floats with DatetimeIndex Columns are ['A', 'B', 'C', 'D'] - - A B C D - 2000-01-03 -1.122153 0.468535 0.122226 1.693711 - 2000-01-04 0.189378 0.486100 0.007864 -1.216052 - 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357 - 2000-01-06 0.430050 0.894352 0.090719 0.036939 - 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335 - 2000-01-10 -0.752633 0.328434 -0.815325 0.699674 - 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106 - ... ... ... ... ... - 2000-02-03 1.642618 -0.579288 0.046005 1.385249 - 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351 - 2000-02-07 -2.656149 -0.601387 1.410148 0.444150 - 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300 - 2000-02-09 1.377373 0.398619 1.008453 -0.928207 - 2000-02-10 0.473194 -0.636677 0.984058 0.511519 - 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948 - - [30 rows x 4 columns] """ - return DataFrame(tm.getTimeSeriesData()) + return DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="B"), + ) @pytest.fixture diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 135a86cad1395..dfb4a3092789a 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -584,7 +584,7 @@ def test_fancy_getitem_slice_mixed( tm.assert_frame_equal(float_frame, original) def test_getitem_setitem_non_ix_labels(self): - df = tm.makeTimeDataFrame() + df = DataFrame(range(20), index=date_range("2020-01-01", periods=20)) start, end = df.index[[5, 10]] diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 359e9122b0c0b..108816697ef3e 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -6,7 +6,9 @@ import pandas as pd from pandas import ( DataFrame, + Index, Series, + date_range, isna, ) import pandas._testing as tm @@ -325,8 +327,12 @@ def test_corrwith(self, datetime_frame, dtype): tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row])) def test_corrwith_with_objects(self): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame() + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy() cols = ["A", "B", "C", "D"] df1["obj"] = "foo" diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py index 23355a5549a88..212e56442ee07 100644 --- a/pandas/tests/frame/methods/test_first_and_last.py +++ b/pandas/tests/frame/methods/test_first_and_last.py @@ -1,12 +1,15 @@ """ Note: includes tests for `last` """ +import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, + Index, bdate_range, + date_range, ) import pandas._testing as tm @@ -16,13 +19,21 @@ class TestFirst: def test_first_subset(self, frame_or_series): - ts = tm.makeTimeDataFrame(freq="12h") + ts = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="12h"), + ) ts = tm.get_obj(ts, frame_or_series) with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): result = ts.first("10d") assert len(result) == 20 - ts = tm.makeTimeDataFrame(freq="D") + ts = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="D"), + ) ts = tm.get_obj(ts, frame_or_series) with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): result = ts.first("10d") @@ -64,13 +75,21 @@ def test_first_last_raises(self, frame_or_series): obj.last("1D") def test_last_subset(self, frame_or_series): - ts = tm.makeTimeDataFrame(freq="12h") + ts = DataFrame( + np.random.default_rng(2).standard_normal((100, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100, freq="12h"), + ) ts = tm.get_obj(ts, frame_or_series) with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): result = ts.last("10d") assert len(result) == 20 - ts = tm.makeTimeDataFrame(nper=30, freq="D") + ts = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=30, freq="D"), + ) ts = tm.get_obj(ts, frame_or_series) with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): result = ts.last("10d") diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py index 4c4b04076c8d5..12077952c2e03 100644 --- a/pandas/tests/frame/methods/test_truncate.py +++ b/pandas/tests/frame/methods/test_truncate.py @@ -60,7 +60,7 @@ def test_truncate(self, datetime_frame, frame_or_series): truncated = ts.truncate(before=ts.index[-1] + ts.index.freq) assert len(truncated) == 0 - msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00" + msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-05-16 00:00:00" with pytest.raises(ValueError, match=msg): ts.truncate( before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 7fd795dc84cca..a4825c80ee815 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1523,8 +1523,12 @@ def test_combineFunc(self, float_frame, mixed_float_frame): [operator.eq, operator.ne, operator.lt, operator.gt, operator.ge, operator.le], ) def test_comparisons(self, simple_frame, float_frame, func): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame() + df1 = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=pd.date_range("2000-01-01", periods=30, freq="B"), + ) + df2 = df1.copy() row = simple_frame.xs("a") ndim_5 = np.ones(df1.shape + (1, 1, 1)) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 1f08b9d5c35b8..6564e381af0ea 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -10,7 +10,9 @@ from pandas import ( DataFrame, + Index, Series, + date_range, ) import pandas._testing as tm @@ -328,12 +330,16 @@ def test_squeeze_series_noop(self, ser): def test_squeeze_frame_noop(self): # noop - df = tm.makeTimeDataFrame() + df = DataFrame(np.eye(2)) tm.assert_frame_equal(df.squeeze(), df) def test_squeeze_frame_reindex(self): # squeezing - df = tm.makeTimeDataFrame().reindex(columns=["A"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) tm.assert_series_equal(df.squeeze(), df["A"]) def test_squeeze_0_len_dim(self): @@ -345,7 +351,11 @@ def test_squeeze_0_len_dim(self): def test_squeeze_axis(self): # axis argument - df = tm.makeTimeDataFrame(nper=1).iloc[:, :1] + df = DataFrame( + np.random.default_rng(2).standard_normal((1, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=1, freq="B"), + ).iloc[:, :1] assert df.shape == (1, 1) tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0]) @@ -360,14 +370,22 @@ def test_squeeze_axis(self): df.squeeze(axis="x") def test_squeeze_axis_len_3(self): - df = tm.makeTimeDataFrame(3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=3, freq="B"), + ) tm.assert_frame_equal(df.squeeze(axis=0), df) def test_numpy_squeeze(self): s = Series(range(2), dtype=np.float64) tm.assert_series_equal(np.squeeze(s), s) - df = tm.makeTimeDataFrame().reindex(columns=["A"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) tm.assert_series_equal(np.squeeze(df), df["A"]) @pytest.mark.parametrize( @@ -382,11 +400,19 @@ def test_transpose_series(self, ser): tm.assert_series_equal(ser.transpose(), ser) def test_transpose_frame(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) tm.assert_frame_equal(df.transpose().transpose(), df) def test_numpy_transpose(self, frame_or_series): - obj = tm.makeTimeDataFrame() + obj = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) obj = tm.get_obj(obj, frame_or_series) if frame_or_series is Series: @@ -419,7 +445,11 @@ def test_take_series(self, ser): def test_take_frame(self): indices = [1, 5, -2, 6, 3, -1] - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) out = df.take(indices) expected = DataFrame( data=df.values.take(indices, axis=0), @@ -431,7 +461,7 @@ def test_take_frame(self): def test_take_invalid_kwargs(self, frame_or_series): indices = [-3, 2, 0, 1] - obj = tm.makeTimeDataFrame() + obj = DataFrame(range(5)) obj = tm.get_obj(obj, frame_or_series) msg = r"take\(\) got an unexpected keyword argument 'foo'" diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 3ba1510cc6b1d..c3bcd30796e63 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -161,7 +161,11 @@ def test_agg_apply_corner(ts, tsframe): def test_agg_grouping_is_list_tuple(ts): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=pd.date_range("2000-01-01", periods=30, freq="B"), + ) grouped = df.groupby(lambda x: x.year) grouper = grouped.grouper.groupings[0].grouping_vector diff --git a/pandas/tests/groupby/conftest.py b/pandas/tests/groupby/conftest.py index b8fb3b7fff676..6d4a874f9d3ec 100644 --- a/pandas/tests/groupby/conftest.py +++ b/pandas/tests/groupby/conftest.py @@ -1,7 +1,11 @@ import numpy as np import pytest -from pandas import DataFrame +from pandas import ( + DataFrame, + Index, + date_range, +) import pandas._testing as tm from pandas.core.groupby.base import ( reduction_kernels, @@ -48,7 +52,11 @@ def ts(): @pytest.fixture def tsframe(): - return DataFrame(tm.getTimeSeriesData()) + return DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=30, freq="B"), + ) @pytest.fixture diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 5b17484de9c93..254a12d9bdebb 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -319,7 +319,11 @@ def test_pass_args_kwargs_duplicate_columns(tsframe, as_index): def test_len(): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]) assert len(grouped) == len(df) @@ -327,6 +331,8 @@ def test_len(): expected = len({(x.year, x.month) for x in df.index}) assert len(grouped) == expected + +def test_len_nan_group(): # issue 11016 df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]}) assert len(df.groupby("a")) == 0 @@ -940,7 +946,11 @@ def test_groupby_as_index_corner(df, ts): def test_groupby_multiple_key(): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]) agged = grouped.sum() tm.assert_almost_equal(df.values, agged.values) @@ -1655,7 +1665,11 @@ def test_dont_clobber_name_column(): def test_skip_group_keys(): - tsf = tm.makeTimeDataFrame() + tsf = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) grouped = tsf.groupby(lambda x: x.month, group_keys=False) result = grouped.apply(lambda x: x.sort_values(by="A")[:3]) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index a6f160d92fb66..35699fe9647d7 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -10,6 +10,7 @@ from pandas import ( Categorical, DataFrame, + Index, MultiIndex, Series, Timestamp, @@ -67,7 +68,11 @@ def demean(arr): tm.assert_frame_equal(result, expected) # GH 8430 - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((50, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=50, freq="B"), + ) g = df.groupby(pd.Grouper(freq="ME")) g.transform(lambda x: x - 1) @@ -115,7 +120,7 @@ def test_transform_fast2(): ) result = df.groupby("grouping").transform("first") - dates = pd.Index( + dates = Index( [ Timestamp("2014-1-1"), Timestamp("2014-1-2"), diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index eae7e46c7ec35..d270741a0e0bc 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -13,6 +13,7 @@ import pandas as pd from pandas import ( Categorical, + DataFrame, Index, MultiIndex, date_range, @@ -37,7 +38,11 @@ def test_slice_locs_partial(self, idx): assert result == (2, 4) def test_slice_locs(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((50, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=50, freq="B"), + ) stacked = df.stack(future_stack=True) idx = stacked.index @@ -57,7 +62,11 @@ def test_slice_locs(self): tm.assert_almost_equal(sliced.values, expected.values) def test_slice_locs_with_type_mismatch(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) stacked = df.stack(future_stack=True) idx = stacked.index with pytest.raises(TypeError, match="^Level type mismatch"): @@ -861,7 +870,7 @@ def test_timestamp_multiindex_indexer(): [3], ] ) - df = pd.DataFrame({"foo": np.arange(len(idx))}, idx) + df = DataFrame({"foo": np.arange(len(idx))}, idx) result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"] qidx = MultiIndex.from_product( [ diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 2f9018112c03b..ca551024b4c1f 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -536,7 +536,11 @@ def test_series_partial_set_with_name(self): @pytest.mark.parametrize("key", [100, 100.0]) def test_setitem_with_expansion_numeric_into_datetimeindex(self, key): # GH#4940 inserting non-strings - orig = tm.makeTimeDataFrame() + orig = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df = orig.copy() df.loc[key, :] = df.iloc[0] @@ -550,7 +554,11 @@ def test_partial_set_invalid(self): # GH 4940 # allow only setting of 'valid' values - orig = tm.makeTimeDataFrame() + orig = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) # allow object conversion here df = orig.copy() diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index 4dfae753edf72..74286a3ddd8ed 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -20,6 +20,7 @@ DataFrame, Index, MultiIndex, + date_range, option_context, ) import pandas._testing as tm @@ -271,7 +272,7 @@ def test_excel_multindex_roundtrip( def test_read_excel_parse_dates(self, ext): # see gh-11544, gh-12051 df = DataFrame( - {"col": [1, 2, 3], "date_strings": pd.date_range("2012-01-01", periods=3)} + {"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)} ) df2 = df.copy() df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") @@ -460,7 +461,11 @@ def test_mixed(self, frame, path): tm.assert_frame_equal(mixed_frame, recons) def test_ts_frame(self, path): - df = tm.makeTimeDataFrame()[:5] + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) # freq doesn't round-trip index = pd.DatetimeIndex(np.asarray(df.index), freq=None) @@ -533,7 +538,11 @@ def test_inf_roundtrip(self, path): def test_sheets(self, frame, path): # freq doesn't round-trip - tsframe = tm.makeTimeDataFrame()[:5] + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) tsframe.index = index @@ -653,7 +662,11 @@ def test_excel_roundtrip_datetime(self, merge_cells, path): # datetime.date, not sure what to test here exactly # freq does not round-trip - tsframe = tm.makeTimeDataFrame()[:5] + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) tsframe.index = index @@ -772,7 +785,11 @@ def test_to_excel_timedelta(self, path): def test_to_excel_periodindex(self, path): # xp has a PeriodIndex - df = tm.makeTimeDataFrame()[:5] + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) xp = df.resample("ME").mean().to_period("M") xp.to_excel(path, sheet_name="sht1") @@ -837,7 +854,11 @@ def test_to_excel_multiindex_cols(self, merge_cells, frame, path): def test_to_excel_multiindex_dates(self, merge_cells, path): # try multiindex with dates - tsframe = tm.makeTimeDataFrame()[:5] + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) new_index = [tsframe.index, np.arange(len(tsframe.index), dtype=np.int64)] tsframe.index = MultiIndex.from_arrays(new_index) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 79be90cd00469..428c73c282426 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -23,8 +23,10 @@ NA, DataFrame, DatetimeIndex, + Index, Series, Timestamp, + date_range, read_json, ) import pandas._testing as tm @@ -115,7 +117,11 @@ def datetime_series(self): def datetime_frame(self): # Same as usual datetime_frame, but with index freq set to None, # since that doesn't round-trip, see GH#33711 - df = DataFrame(tm.getTimeSeriesData()) + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=30, freq="B"), + ) df.index = df.index._with_freq(None) return df @@ -266,7 +272,7 @@ def test_roundtrip_empty(self, orient, convert_axes): data = StringIO(empty_frame.to_json(orient=orient)) result = read_json(data, orient=orient, convert_axes=convert_axes) if orient == "split": - idx = pd.Index([], dtype=(float if convert_axes else object)) + idx = Index([], dtype=(float if convert_axes else object)) expected = DataFrame(index=idx, columns=idx) elif orient in ["index", "columns"]: expected = DataFrame() @@ -294,7 +300,7 @@ def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame): @pytest.mark.parametrize("convert_axes", [True, False]) def test_roundtrip_mixed(self, orient, convert_axes): - index = pd.Index(["a", "b", "c", "d", "e"]) + index = Index(["a", "b", "c", "d", "e"]) values = { "A": [0.0, 1.0, 2.0, 3.0, 4.0], "B": [0.0, 1.0, 0.0, 1.0, 0.0], @@ -495,7 +501,7 @@ def test_frame_mixedtype_orient(self): # GH10289 tm.assert_frame_equal(left, right) def test_v12_compat(self, datapath): - dti = pd.date_range("2000-01-03", "2000-01-07") + dti = date_range("2000-01-03", "2000-01-07") # freq doesn't roundtrip dti = DatetimeIndex(np.asarray(dti), freq=None) df = DataFrame( @@ -525,7 +531,7 @@ def test_v12_compat(self, datapath): tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False) def test_blocks_compat_GH9037(self, using_infer_string): - index = pd.date_range("20000101", periods=10, freq="h") + index = date_range("20000101", periods=10, freq="h") # freq doesn't round-trip index = DatetimeIndex(list(index), freq=None) @@ -1034,7 +1040,7 @@ def test_doc_example(self): dfj2["date"] = Timestamp("20130101") dfj2["ints"] = range(5) dfj2["bools"] = True - dfj2.index = pd.date_range("20130101", periods=5) + dfj2.index = date_range("20130101", periods=5) json = StringIO(dfj2.to_json()) result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_}) @@ -1078,7 +1084,7 @@ def test_timedelta(self): result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) tm.assert_series_equal(result, ser) - ser = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1])) + ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1])) assert ser.dtype == "timedelta64[ns]" result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) tm.assert_series_equal(result, ser) @@ -1094,7 +1100,7 @@ def test_timedelta2(self): { "a": [timedelta(days=23), timedelta(seconds=5)], "b": [1, 2], - "c": pd.date_range(start="20130101", periods=2), + "c": date_range(start="20130101", periods=2), } ) data = StringIO(frame.to_json(date_unit="ns")) @@ -1209,10 +1215,10 @@ def test_categorical(self): def test_datetime_tz(self): # GH4377 df.to_json segfaults with non-ndarray blocks - tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern") + tz_range = date_range("20130101", periods=3, tz="US/Eastern") tz_naive = tz_range.tz_convert("utc").tz_localize(None) - df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)}) + df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)}) df_naive = df.copy() df_naive["A"] = tz_naive @@ -1265,9 +1271,9 @@ def test_tz_is_naive(self): @pytest.mark.parametrize( "tz_range", [ - pd.date_range("2013-01-01 05:00:00Z", periods=2), - pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), - pd.date_range("2013-01-01 00:00:00-0500", periods=2), + date_range("2013-01-01 05:00:00Z", periods=2), + date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), + date_range("2013-01-01 00:00:00-0500", periods=2), ], ) def test_tz_range_is_utc(self, tz_range): @@ -1290,7 +1296,7 @@ def test_tz_range_is_utc(self, tz_range): assert ujson_dumps(df.astype({"DT": object}), iso_dates=True) def test_tz_range_is_naive(self): - dti = pd.date_range("2013-01-01 05:00:00", periods=2) + dti = date_range("2013-01-01 05:00:00", periods=2) exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]' dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}' @@ -1926,7 +1932,7 @@ def test_to_json_multiindex_escape(self): # GH 15273 df = DataFrame( True, - index=pd.date_range("2017-01-20", "2017-01-23"), + index=date_range("2017-01-20", "2017-01-23"), columns=["foo", "bar"], ).stack(future_stack=True) result = df.to_json() @@ -2128,8 +2134,8 @@ def test_json_roundtrip_string_inference(orient): expected = DataFrame( [["a", "b"], ["c", "d"]], dtype="string[pyarrow_numpy]", - index=pd.Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"), - columns=pd.Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"), + index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"), + columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index 9eb9ffa53dd22..6cb4a4ad47440 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -33,7 +33,11 @@ def test_append(setup_path): with ensure_clean_store(setup_path) as store: # this is allowed by almost always don't want to do it # tables.NaturalNameWarning): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) _maybe_remove(store, "df1") store.append("df1", df[:10]) store.append("df1", df[10:]) @@ -279,7 +283,11 @@ def test_append_all_nans(setup_path): def test_append_frame_column_oriented(setup_path): with ensure_clean_store(setup_path) as store: # column oriented - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.index = df.index._with_freq(None) # freq doesn't round-trip _maybe_remove(store, "df1") @@ -427,7 +435,11 @@ def check_col(key, name, size): # with nans _maybe_remove(store, "df") - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["string"] = "foo" df.loc[df.index[1:4], "string"] = np.nan df["string2"] = "bar" @@ -487,7 +499,11 @@ def test_append_with_empty_string(setup_path): def test_append_with_data_columns(setup_path): with ensure_clean_store(setup_path) as store: - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.iloc[0, df.columns.get_loc("B")] = 1.0 _maybe_remove(store, "df") store.append("df", df[:2], data_columns=["B"]) @@ -854,8 +870,12 @@ def test_append_with_timedelta(setup_path): def test_append_to_multiple(setup_path): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) df2["foo"] = "bar" df = concat([df1, df2], axis=1) @@ -887,8 +907,16 @@ def test_append_to_multiple(setup_path): def test_append_to_multiple_dropna(setup_path): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).rename(columns="{}_2".format) df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan df = concat([df1, df2], axis=1) @@ -904,8 +932,12 @@ def test_append_to_multiple_dropna(setup_path): def test_append_to_multiple_dropna_false(setup_path): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan df = concat([df1, df2], axis=1) diff --git a/pandas/tests/io/pytables/test_errors.py b/pandas/tests/io/pytables/test_errors.py index d956e4f5775eb..2021101098892 100644 --- a/pandas/tests/io/pytables/test_errors.py +++ b/pandas/tests/io/pytables/test_errors.py @@ -98,7 +98,11 @@ def test_unimplemented_dtypes_table_columns(setup_path): def test_invalid_terms(tmp_path, setup_path): with ensure_clean_store(setup_path) as store: - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["string"] = "foo" df.loc[df.index[0:4], "string"] = "bar" diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py index 2920f0b07b31e..40397c49f12d2 100644 --- a/pandas/tests/io/pytables/test_file_handling.py +++ b/pandas/tests/io/pytables/test_file_handling.py @@ -20,6 +20,7 @@ Index, Series, _testing as tm, + date_range, read_hdf, ) from pandas.tests.io.pytables.common import ( @@ -36,7 +37,11 @@ @pytest.mark.parametrize("mode", ["r", "r+", "a", "w"]) def test_mode(setup_path, tmp_path, mode): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) msg = r"[\S]* does not exist" path = tmp_path / setup_path @@ -85,7 +90,11 @@ def test_mode(setup_path, tmp_path, mode): def test_default_mode(tmp_path, setup_path): # read_hdf uses default mode - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) path = tmp_path / setup_path df.to_hdf(path, key="df", mode="w") result = read_hdf(path, "df") diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py index 8a6e3c9006439..df47bd78c86b8 100644 --- a/pandas/tests/io/pytables/test_put.py +++ b/pandas/tests/io/pytables/test_put.py @@ -97,7 +97,11 @@ def test_api_default_format(tmp_path, setup_path): def test_put(setup_path): with ensure_clean_store(setup_path) as store: ts = tm.makeTimeSeries() - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) store["a"] = ts store["b"] = df[:10] store["foo/bar/bah"] = df[:10] @@ -153,7 +157,11 @@ def test_put_string_index(setup_path): def test_put_compression(setup_path): with ensure_clean_store(setup_path) as store: - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) store.put("c", df, format="table", complib="zlib") tm.assert_frame_equal(store["c"], df) @@ -166,7 +174,11 @@ def test_put_compression(setup_path): @td.skip_if_windows def test_put_compression_blosc(setup_path): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with ensure_clean_store(setup_path) as store: # can't compress if format='fixed' @@ -179,7 +191,11 @@ def test_put_compression_blosc(setup_path): def test_put_mixed_type(setup_path): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["obj1"] = "foo" df["obj2"] = "bar" df["bool1"] = df["A"] > 0 diff --git a/pandas/tests/io/pytables/test_read.py b/pandas/tests/io/pytables/test_read.py index 2030b1eca3203..e4a3ea1fc9db8 100644 --- a/pandas/tests/io/pytables/test_read.py +++ b/pandas/tests/io/pytables/test_read.py @@ -15,6 +15,7 @@ Index, Series, _testing as tm, + date_range, read_hdf, ) from pandas.tests.io.pytables.common import ( @@ -72,7 +73,11 @@ def test_read_missing_key_opened_store(tmp_path, setup_path): def test_read_column(setup_path): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with ensure_clean_store(setup_path) as store: _maybe_remove(store, "df") diff --git a/pandas/tests/io/pytables/test_round_trip.py b/pandas/tests/io/pytables/test_round_trip.py index 693f10172a99e..2c61da3809010 100644 --- a/pandas/tests/io/pytables/test_round_trip.py +++ b/pandas/tests/io/pytables/test_round_trip.py @@ -15,6 +15,7 @@ Series, _testing as tm, bdate_range, + date_range, read_hdf, ) from pandas.tests.io.pytables.common import ( @@ -372,7 +373,11 @@ def test_frame(compression, setup_path): df, tm.assert_frame_equal, path=setup_path, compression=compression ) - tdf = tm.makeTimeDataFrame() + tdf = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) _check_roundtrip( tdf, tm.assert_frame_equal, path=setup_path, compression=compression ) diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py index 3eaa1e86dbf6d..0e303d1c890c5 100644 --- a/pandas/tests/io/pytables/test_select.py +++ b/pandas/tests/io/pytables/test_select.py @@ -130,7 +130,11 @@ def test_select_with_dups(setup_path): def test_select(setup_path): with ensure_clean_store(setup_path) as store: # select with columns= - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) _maybe_remove(store, "df") store.append("df", df) result = store.select("df", columns=["A", "B"]) @@ -331,7 +335,11 @@ def test_select_with_many_inputs(setup_path): def test_select_iterator(tmp_path, setup_path): # single table with ensure_clean_store(setup_path) as store: - df = tm.makeTimeDataFrame(500) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) _maybe_remove(store, "df") store.append("df", df) @@ -341,33 +349,41 @@ def test_select_iterator(tmp_path, setup_path): result = concat(results) tm.assert_frame_equal(expected, result) - results = list(store.select("df", chunksize=100)) + results = list(store.select("df", chunksize=2)) assert len(results) == 5 result = concat(results) tm.assert_frame_equal(expected, result) - results = list(store.select("df", chunksize=150)) + results = list(store.select("df", chunksize=2)) result = concat(results) tm.assert_frame_equal(result, expected) path = tmp_path / setup_path - df = tm.makeTimeDataFrame(500) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.to_hdf(path, key="df_non_table") msg = "can only use an iterator or chunksize on a table" with pytest.raises(TypeError, match=msg): - read_hdf(path, "df_non_table", chunksize=100) + read_hdf(path, "df_non_table", chunksize=2) with pytest.raises(TypeError, match=msg): read_hdf(path, "df_non_table", iterator=True) path = tmp_path / setup_path - df = tm.makeTimeDataFrame(500) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.to_hdf(path, key="df", format="table") - results = list(read_hdf(path, "df", chunksize=100)) + results = list(read_hdf(path, "df", chunksize=2)) result = concat(results) assert len(results) == 5 @@ -377,9 +393,13 @@ def test_select_iterator(tmp_path, setup_path): # multiple with ensure_clean_store(setup_path) as store: - df1 = tm.makeTimeDataFrame(500) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) store.append("df1", df1, data_columns=True) - df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format) + df2 = df1.copy().rename(columns="{}_2".format) df2["foo"] = "bar" store.append("df2", df2) @@ -388,7 +408,7 @@ def test_select_iterator(tmp_path, setup_path): # full selection expected = store.select_as_multiple(["df1", "df2"], selector="df1") results = list( - store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150) + store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=2) ) result = concat(results) tm.assert_frame_equal(expected, result) @@ -401,7 +421,11 @@ def test_select_iterator_complete_8014(setup_path): # no iterator with ensure_clean_store(setup_path) as store: - expected = tm.makeTimeDataFrame(100064, "s") + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) _maybe_remove(store, "df") store.append("df", expected) @@ -432,7 +456,11 @@ def test_select_iterator_complete_8014(setup_path): # with iterator, full range with ensure_clean_store(setup_path) as store: - expected = tm.makeTimeDataFrame(100064, "s") + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) _maybe_remove(store, "df") store.append("df", expected) @@ -470,7 +498,11 @@ def test_select_iterator_non_complete_8014(setup_path): # with iterator, non complete range with ensure_clean_store(setup_path) as store: - expected = tm.makeTimeDataFrame(100064, "s") + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) _maybe_remove(store, "df") store.append("df", expected) @@ -500,7 +532,11 @@ def test_select_iterator_non_complete_8014(setup_path): # with iterator, empty where with ensure_clean_store(setup_path) as store: - expected = tm.makeTimeDataFrame(100064, "s") + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) _maybe_remove(store, "df") store.append("df", expected) @@ -520,7 +556,11 @@ def test_select_iterator_many_empty_frames(setup_path): # with iterator, range limited to the first chunk with ensure_clean_store(setup_path) as store: - expected = tm.makeTimeDataFrame(100000, "s") + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) _maybe_remove(store, "df") store.append("df", expected) @@ -568,7 +608,11 @@ def test_select_iterator_many_empty_frames(setup_path): def test_frame_select(setup_path): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with ensure_clean_store(setup_path) as store: store.put("frame", df, format="table") @@ -589,7 +633,11 @@ def test_frame_select(setup_path): tm.assert_frame_equal(result, expected) # invalid terms - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) store.append("df_time", df) msg = "day is out of range for month: 0" with pytest.raises(ValueError, match=msg): @@ -604,7 +652,11 @@ def test_frame_select(setup_path): def test_frame_select_complex(setup_path): # select via complex criteria - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["string"] = "foo" df.loc[df.index[0:4], "string"] = "bar" @@ -717,7 +769,11 @@ def test_frame_select_complex2(tmp_path): def test_invalid_filtering(setup_path): # can't use more than one filter (atm) - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with ensure_clean_store(setup_path) as store: store.put("df", df, format="table") @@ -735,7 +791,11 @@ def test_invalid_filtering(setup_path): def test_string_select(setup_path): # GH 2973 with ensure_clean_store(setup_path) as store: - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) # test string ==/!= df["x"] = "none" @@ -775,8 +835,12 @@ def test_string_select(setup_path): def test_select_as_multiple(setup_path): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) df2["foo"] = "bar" with ensure_clean_store(setup_path) as store: @@ -836,7 +900,8 @@ def test_select_as_multiple(setup_path): tm.assert_frame_equal(result, expected) # test exception for diff rows - store.append("df3", tm.makeTimeDataFrame(nper=50)) + df3 = df1.copy().head(2) + store.append("df3", df3) msg = "all tables must have exactly the same nrows!" with pytest.raises(ValueError, match=msg): store.select_as_multiple( diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 98257f1765d53..057f1b1fd19c3 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -201,7 +201,11 @@ def test_versioning(setup_path): columns=Index(list("ABCD"), dtype=object), index=Index([f"i-{i}" for i in range(30)], dtype=object), ) - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) _maybe_remove(store, "df1") store.append("df1", df[:10]) store.append("df1", df[10:]) @@ -295,7 +299,11 @@ def test_getattr(setup_path): result = getattr(store, "a") tm.assert_series_equal(result, s) - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) store["df"] = df result = store.df tm.assert_frame_equal(result, df) @@ -395,7 +403,11 @@ def col(t, column): return getattr(store.get_storer(t).table.cols, column) # data columns - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["string"] = "foo" df["string2"] = "bar" store.append("f", df, data_columns=["string", "string2"]) @@ -426,7 +438,11 @@ def col(t, column): return getattr(store.get_storer(t).table.cols, column) # data columns - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df["string"] = "foo" df["string2"] = "bar" store.append("f", df, data_columns=["string"]) @@ -640,7 +656,11 @@ def test_store_series_name(setup_path): def test_overwrite_node(setup_path): with ensure_clean_store(setup_path) as store: - store["a"] = tm.makeTimeDataFrame() + store["a"] = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) ts = tm.makeTimeSeries() store["a"] = ts @@ -648,7 +668,11 @@ def test_overwrite_node(setup_path): def test_coordinates(setup_path): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with ensure_clean_store(setup_path) as store: _maybe_remove(store, "df") @@ -679,8 +703,12 @@ def test_coordinates(setup_path): # multiple tables _maybe_remove(store, "df1") _maybe_remove(store, "df2") - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format) + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) store.append("df1", df1, data_columns=["A", "B"]) store.append("df2", df2) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index d7c69ff17749c..e20c49c072515 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -4119,8 +4119,12 @@ def tquery(query, con=None): def test_xsqlite_basic(sqlite_buildin): - frame = tm.makeTimeDataFrame() - assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 30 + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + assert sql.to_sql(frame, name="test_table", con=sqlite_buildin, index=False) == 10 result = sql.read_sql("select * from test_table", sqlite_buildin) # HACK! Change this once indexes are handled properly. @@ -4133,7 +4137,7 @@ def test_xsqlite_basic(sqlite_buildin): frame2 = frame.copy() new_idx = Index(np.arange(len(frame2)), dtype=np.int64) + 10 frame2["Idx"] = new_idx.copy() - assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 30 + assert sql.to_sql(frame2, name="test_table2", con=sqlite_buildin, index=False) == 10 result = sql.read_sql("select * from test_table2", sqlite_buildin, index_col="Idx") expected = frame.copy() expected.index = new_idx @@ -4142,7 +4146,11 @@ def test_xsqlite_basic(sqlite_buildin): def test_xsqlite_write_row_by_row(sqlite_buildin): - frame = tm.makeTimeDataFrame() + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) frame.iloc[0, 0] = np.nan create_sql = sql.get_schema(frame, "test") cur = sqlite_buildin.cursor() @@ -4161,7 +4169,11 @@ def test_xsqlite_write_row_by_row(sqlite_buildin): def test_xsqlite_execute(sqlite_buildin): - frame = tm.makeTimeDataFrame() + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) create_sql = sql.get_schema(frame, "test") cur = sqlite_buildin.cursor() cur.execute(create_sql) @@ -4178,7 +4190,11 @@ def test_xsqlite_execute(sqlite_buildin): def test_xsqlite_schema(sqlite_buildin): - frame = tm.makeTimeDataFrame() + frame = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) create_sql = sql.get_schema(frame, "test") lines = create_sql.splitlines() for line in lines: diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 2a864abc5ea4a..45dc612148f40 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -19,6 +19,7 @@ import pandas as pd from pandas import ( DataFrame, + Index, MultiIndex, PeriodIndex, Series, @@ -53,19 +54,31 @@ class TestDataFramePlots: @pytest.mark.slow def test_plot(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) _check_plot_works(df.plot, grid=False) @pytest.mark.slow def test_plot_subplots(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) # _check_plot_works adds an ax so use default_axes=True to avoid warning axes = _check_plot_works(df.plot, default_axes=True, subplots=True) _check_axes_shape(axes, axes_num=4, layout=(4, 1)) @pytest.mark.slow def test_plot_subplots_negative_layout(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) axes = _check_plot_works( df.plot, default_axes=True, @@ -76,7 +89,11 @@ def test_plot_subplots_negative_layout(self): @pytest.mark.slow def test_plot_subplots_use_index(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) axes = _check_plot_works( df.plot, default_axes=True, @@ -286,7 +303,11 @@ def test_donot_overwrite_index_name(self): def test_plot_xy(self): # columns.inferred_type == 'string' - df = tm.makeTimeDataFrame(5) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) _check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot()) _check_data(df.plot(x=0), df.set_index("A").plot()) _check_data(df.plot(y=0), df.B.plot()) @@ -295,7 +316,11 @@ def test_plot_xy(self): _check_data(df.plot(y="B"), df.B.plot()) def test_plot_xy_int_cols(self): - df = tm.makeTimeDataFrame(5) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) # columns.inferred_type == 'integer' df.columns = np.arange(1, len(df.columns) + 1) _check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot()) @@ -303,7 +328,11 @@ def test_plot_xy_int_cols(self): _check_data(df.plot(y=1), df[1].plot()) def test_plot_xy_figsize_and_title(self): - df = tm.makeTimeDataFrame(5) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) # figsize and title ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8)) _check_text_labels(ax.title, "Test") @@ -345,14 +374,22 @@ def test_invalid_logscale(self, input_param): df.plot.pie(subplots=True, **{input_param: True}) def test_xcompat(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) ax = df.plot(x_compat=True) lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) _check_ticks_props(ax, xrot=30) def test_xcompat_plot_params(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) plotting.plot_params["xaxis.compat"] = True ax = df.plot() lines = ax.get_lines() @@ -360,7 +397,11 @@ def test_xcompat_plot_params(self): _check_ticks_props(ax, xrot=30) def test_xcompat_plot_params_x_compat(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) plotting.plot_params["x_compat"] = False ax = df.plot() @@ -371,7 +412,11 @@ def test_xcompat_plot_params_x_compat(self): assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) def test_xcompat_plot_params_context_manager(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) # useful if you're plotting a bunch together with plotting.plot_params.use("x_compat", True): ax = df.plot() @@ -380,7 +425,11 @@ def test_xcompat_plot_params_context_manager(self): _check_ticks_props(ax, xrot=30) def test_xcompat_plot_period(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) ax = df.plot() lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) @@ -405,7 +454,7 @@ def test_period_compat(self): def test_unsorted_index(self, index_dtype): df = DataFrame( {"y": np.arange(100)}, - index=pd.Index(np.arange(99, -1, -1), dtype=index_dtype), + index=Index(np.arange(99, -1, -1), dtype=index_dtype), dtype=np.int64, ) ax = df.plot() @@ -723,7 +772,7 @@ def test_bar_nan_stacked(self): expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0] assert result == expected - @pytest.mark.parametrize("idx", [pd.Index, pd.CategoricalIndex]) + @pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex]) def test_bar_categorical(self, idx): # GH 13019 df = DataFrame( @@ -1391,7 +1440,7 @@ def test_unordered_ts(self): # the ticks are sorted xticks = ax.xaxis.get_ticklabels() xlocs = [x.get_position()[0] for x in xticks] - assert pd.Index(xlocs).is_monotonic_increasing + assert Index(xlocs).is_monotonic_increasing xlabels = [x.get_text() for x in xticks] assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing @@ -2062,9 +2111,17 @@ def test_memory_leak(self, kind): ) args = {"x": "A", "y": "B"} elif kind == "area": - df = tm.makeTimeDataFrame().abs() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).abs() else: - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) # Use a weakref so we can see if the object gets collected without # also preventing it from being collected @@ -2513,7 +2570,11 @@ def test_secondary_y(self, secondary_y): def test_plot_no_warning(self): # GH 55138 # TODO(3.0): this can be removed once Period[B] deprecation is enforced - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) with tm.assert_produces_warning(False): _ = df.plot() _ = df.T.plot() diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 3195b7637ee3c..401a7610b25d8 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1247,7 +1247,11 @@ def test_secondary_legend(self): ax = fig.add_subplot(211) # ts - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.plot(secondary_y=["A", "B"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 @@ -1265,7 +1269,11 @@ def test_secondary_legend(self): mpl.pyplot.close(fig) def test_secondary_legend_right(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) fig = mpl.pyplot.figure() ax = fig.add_subplot(211) df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax) @@ -1278,7 +1286,11 @@ def test_secondary_legend_right(self): mpl.pyplot.close(fig) def test_secondary_legend_bar(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) fig, ax = mpl.pyplot.subplots() df.plot(kind="bar", secondary_y=["A"], ax=ax) leg = ax.get_legend() @@ -1287,7 +1299,11 @@ def test_secondary_legend_bar(self): mpl.pyplot.close(fig) def test_secondary_legend_bar_right(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) fig, ax = mpl.pyplot.subplots() df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax) leg = ax.get_legend() @@ -1296,10 +1312,18 @@ def test_secondary_legend_bar_right(self): mpl.pyplot.close(fig) def test_secondary_legend_multi_col(self): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) fig = mpl.pyplot.figure() ax = fig.add_subplot(211) - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) ax = df.plot(secondary_y=["C", "D"], ax=ax) leg = ax.get_legend() assert len(leg.get_lines()) == 4 diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 865c02798c648..8a725c6e51e3f 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -11,6 +11,7 @@ import pandas as pd from pandas import ( DataFrame, + Index, Series, Timedelta, Timestamp, @@ -433,7 +434,11 @@ def test_resample_upsampling_picked_but_not_correct(unit): @pytest.mark.parametrize("f", ["sum", "mean", "prod", "min", "max", "var"]) def test_resample_frame_basic_cy_funcs(f, unit): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((50, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=50, freq="B"), + ) df.index = df.index.as_unit(unit) b = Grouper(freq="ME") @@ -445,7 +450,11 @@ def test_resample_frame_basic_cy_funcs(f, unit): @pytest.mark.parametrize("freq", ["YE", "ME"]) def test_resample_frame_basic_M_A(freq, unit): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((50, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=50, freq="B"), + ) df.index = df.index.as_unit(unit) result = df.resample(freq).mean() tm.assert_series_equal(result["A"], df["A"].resample(freq).mean()) @@ -453,7 +462,11 @@ def test_resample_frame_basic_M_A(freq, unit): @pytest.mark.parametrize("freq", ["W-WED", "ME"]) def test_resample_frame_basic_kind(freq, unit): - df = tm.makeTimeDataFrame() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) df.index = df.index.as_unit(unit) msg = "The 'kind' keyword in DataFrame.resample is deprecated" with tm.assert_produces_warning(FutureWarning, match=msg): @@ -1465,7 +1478,11 @@ def test_resample_nunique(unit): def test_resample_nunique_preserves_column_level_names(unit): # see gh-23222 - df = tm.makeTimeDataFrame(freq="1D").abs() + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="D"), + ).abs() df.index = df.index.as_unit(unit) df.columns = pd.MultiIndex.from_arrays( [df.columns.tolist()] * 2, names=["lev0", "lev1"] diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index f07c223bf0de2..3408e6e4731bd 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -168,7 +168,7 @@ def test_join_on_fails_with_different_right_index(self): "a": np.random.default_rng(2).choice(["m", "f"], size=10), "b": np.random.default_rng(2).standard_normal(10), }, - index=tm.makeCustomIndex(10, 2), + index=MultiIndex.from_product([range(5), ["A", "B"]]), ) msg = r'len\(left_on\) must equal the number of levels in the index of "right"' with pytest.raises(ValueError, match=msg): @@ -180,7 +180,7 @@ def test_join_on_fails_with_different_left_index(self): "a": np.random.default_rng(2).choice(["m", "f"], size=3), "b": np.random.default_rng(2).standard_normal(3), }, - index=tm.makeCustomIndex(3, 2), + index=MultiIndex.from_arrays([range(3), list("abc")]), ) df2 = DataFrame( { @@ -204,7 +204,7 @@ def test_join_on_fails_with_different_column_counts(self): "a": np.random.default_rng(2).choice(["m", "f"], size=10), "b": np.random.default_rng(2).standard_normal(10), }, - index=tm.makeCustomIndex(10, 2), + index=MultiIndex.from_product([range(5), ["A", "B"]]), ) msg = r"len\(right_on\) must equal len\(left_on\)" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index b10436889d829..ff9f927597956 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -6,6 +6,8 @@ import pandas as pd from pandas import ( DataFrame, + Index, + date_range, lreshape, melt, wide_to_long, @@ -15,7 +17,11 @@ @pytest.fixture def df(): - res = tm.makeTimeDataFrame()[:10] + res = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) res["id1"] = (res["A"] > 0).astype(np.int64) res["id2"] = (res["B"] > 0).astype(np.int64) return res @@ -281,7 +287,7 @@ def test_multiindex(self, df1): @pytest.mark.parametrize( "col", [ - pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")), + pd.Series(date_range("2010", periods=5, tz="US/Pacific")), pd.Series(["a", "b", "c", "a", "d"], dtype="category"), pd.Series([0, 1, 0, 0, 0]), ], @@ -396,11 +402,11 @@ def test_ignore_multiindex(self): def test_ignore_index_name_and_type(self): # GH 17440 - index = pd.Index(["foo", "bar"], dtype="category", name="baz") + index = Index(["foo", "bar"], dtype="category", name="baz") df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index) result = melt(df, ignore_index=False) - expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz") + expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz") expected = DataFrame( {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]}, index=expected_index, @@ -1203,7 +1209,7 @@ def test_missing_stubname(self, dtype): j="num", sep="-", ) - index = pd.Index( + index = Index( [("1", 1), ("2", 1), ("1", 2), ("2", 2)], name=("id", "num"), ) diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py index b294e2fcce9d8..3c70e839c8e20 100644 --- a/pandas/tests/series/methods/test_unstack.py +++ b/pandas/tests/series/methods/test_unstack.py @@ -4,8 +4,10 @@ import pandas as pd from pandas import ( DataFrame, + Index, MultiIndex, Series, + date_range, ) import pandas._testing as tm @@ -92,7 +94,7 @@ def test_unstack_tuplename_in_multiindex(): expected = DataFrame( [[1, 1, 1], [1, 1, 1], [1, 1, 1]], columns=MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), - index=pd.Index([1, 2, 3], name=("B", "b")), + index=Index([1, 2, 3], name=("B", "b")), ) tm.assert_frame_equal(result, expected) @@ -109,7 +111,7 @@ def test_unstack_tuplename_in_multiindex(): ( (("A", "a"), "B"), [[1, 1, 1, 1], [1, 1, 1, 1]], - pd.Index([3, 4], name="C"), + Index([3, 4], name="C"), MultiIndex.from_tuples( [("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=[("A", "a"), "B"] ), @@ -133,9 +135,12 @@ def test_unstack_mixed_type_name_in_multiindex( def test_unstack_multi_index_categorical_values(): - mi = ( - tm.makeTimeDataFrame().stack(future_stack=True).index.rename(["major", "minor"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), ) + mi = df.stack(future_stack=True).index.rename(["major", "minor"]) ser = Series(["foo"] * len(mi), index=mi, name="category", dtype="category") result = ser.unstack() @@ -144,7 +149,7 @@ def test_unstack_multi_index_categorical_values(): c = pd.Categorical(["foo"] * len(dti)) expected = DataFrame( {"A": c.copy(), "B": c.copy(), "C": c.copy(), "D": c.copy()}, - columns=pd.Index(list("ABCD"), name="minor"), + columns=Index(list("ABCD"), name="minor"), index=dti.rename("major"), ) tm.assert_frame_equal(result, expected) @@ -158,7 +163,7 @@ def test_unstack_mixed_level_names(): result = ser.unstack("x") expected = DataFrame( [[1], [2]], - columns=pd.Index(["a"], name="x"), + columns=Index(["a"], name="x"), index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 773d7e174feac..502096d41dde2 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -679,7 +679,7 @@ def test_constructor_broadcast_list(self): Series(["foo"], index=["a", "b", "c"]) def test_constructor_corner(self): - df = tm.makeTimeDataFrame() + df = DataFrame(range(5), index=date_range("2020-01-01", periods=5)) objs = [df, df] s = Series(objs, index=[0, 1]) assert isinstance(s, Series) diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 4fa256a6b8630..1e7fdd920e365 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -146,8 +146,8 @@ def test_multiindex_objects(): "D": pd.date_range("20130101", periods=5), } ), - tm.makeTimeDataFrame(), - tm.makeTimeSeries(), + DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)), + Series(range(5), index=pd.date_range("2020-01-01", periods=5)), Series(period_range("2020-01-01", periods=10, freq="D")), Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), ], @@ -179,8 +179,8 @@ def test_hash_pandas_object(obj, index): "D": pd.date_range("20130101", periods=5), } ), - tm.makeTimeDataFrame(), - tm.makeTimeSeries(), + DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)), + Series(range(5), index=pd.date_range("2020-01-01", periods=5)), Series(period_range("2020-01-01", periods=10, freq="D")), Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), ],