From e8d9a32e3abc8b9c0ee0e290cd0a19d5f3c86cf2 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 21 Nov 2023 11:46:49 -0800 Subject: [PATCH] TST: better exception messages with na_values and pyarrow engine (#56090) * TST: better exception messages with na_values and pyarrow engine * remove commented-out * remove commented-out --- pandas/io/parsers/arrow_parser_wrapper.py | 9 ++ pandas/io/parsers/readers.py | 23 ++-- pandas/tests/io/parser/test_na_values.py | 144 ++++++++++++++++++---- 3 files changed, 146 insertions(+), 30 deletions(-) diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 2dc88a5701033..5786073c9d9cc 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -250,6 +250,15 @@ def read(self) -> DataFrame: include = self.convert_options.get("include_columns", None) if include is not None: self._validate_usecols(include) + + nulls = self.convert_options.get("null_values", set()) + if not lib.is_list_like(nulls) or not all( + isinstance(x, str) for x in nulls + ): + raise TypeError( + "The 'pyarrow' engine requires all na_values to be strings" + ) + raise try: diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 83d75508920a4..66990de6d3b89 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1716,7 +1716,10 @@ def _clean_options( # Converting values to NA keep_default_na = options["keep_default_na"] - na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) + floatify = engine != "pyarrow" + na_values, na_fvalues = _clean_na_values( + na_values, keep_default_na, floatify=floatify + ) # handle skiprows; this is internally handled by the # c-engine, so only need for python and pyarrow parsers @@ -1928,7 +1931,7 @@ def TextParser(*args, **kwds) -> TextFileReader: return TextFileReader(*args, **kwds) -def _clean_na_values(na_values, keep_default_na: bool = True): +def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True): na_fvalues: set | dict if na_values is None: if keep_default_na: @@ -1956,7 +1959,7 @@ def _clean_na_values(na_values, keep_default_na: bool = True): else: if not is_list_like(na_values): na_values = [na_values] - na_values = _stringify_na_values(na_values) + na_values = _stringify_na_values(na_values, floatify) if keep_default_na: na_values = na_values | STR_NA_VALUES @@ -1978,7 +1981,7 @@ def _floatify_na_values(na_values): return result -def _stringify_na_values(na_values): +def _stringify_na_values(na_values, floatify: bool): """return a stringified and numeric for these values""" result: list[str | float] = [] for x in na_values: @@ -1993,13 +1996,15 @@ def _stringify_na_values(na_values): result.append(f"{v}.0") result.append(str(v)) - result.append(v) - except (TypeError, ValueError, OverflowError): - pass - try: - result.append(int(x)) + if floatify: + result.append(v) except (TypeError, ValueError, OverflowError): pass + if floatify: + try: + result.append(int(x)) + except (TypeError, ValueError, OverflowError): + pass return set(result) diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 437a5fb5e9f09..ca106fa772e82 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -59,7 +59,6 @@ def test_detect_string_na(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "na_values", [ @@ -87,12 +86,26 @@ def test_detect_string_na(all_parsers): """, ], ) -def test_non_string_na_values(all_parsers, data, na_values): +def test_non_string_na_values(all_parsers, data, na_values, request): # see gh-3611: with an odd float format, we can't match # the string "999.0" exactly but still need float matching parser = all_parsers expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"]) + if parser.engine == "pyarrow" and not all(isinstance(x, str) for x in na_values): + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values) + return + elif parser.engine == "pyarrow" and "-999.000" in data: + # bc the pyarrow engine does not include the float-ified version + # of "-999" -> -999, it does not match the entry with the trailing + # zeros, so "-999.000" is not treated as null. + mark = pytest.mark.xfail( + reason="pyarrow engined does not recognize equivalent floats" + ) + request.applymarker(mark) + result = parser.read_csv(StringIO(data), na_values=na_values) tm.assert_frame_equal(result, expected) @@ -145,8 +158,6 @@ def f(i, v): tm.assert_frame_equal(result, expected) -# ValueError: skiprows argument must be an integer when using engine='pyarrow' -@xfail_pyarrow @pytest.mark.parametrize("na_values", ["baz", ["baz"]]) def test_custom_na_values(all_parsers, na_values): parser = all_parsers @@ -159,6 +170,12 @@ def test_custom_na_values(all_parsers, na_values): expected = DataFrame( [[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"] ) + if parser.engine == "pyarrow": + msg = "skiprows argument must be an integer when using engine='pyarrow'" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1]) + return + result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1]) tm.assert_frame_equal(result, expected) @@ -183,7 +200,6 @@ def test_bool_na_values(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_value_dict(all_parsers): data = """A,B,C foo,bar,NA @@ -191,6 +207,13 @@ def test_na_value_dict(all_parsers): foo,bar,NA bar,foo,foo""" parser = all_parsers + + if parser.engine == "pyarrow": + msg = "pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]}) + return + df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]}) expected = DataFrame( { @@ -235,7 +258,6 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict @pytest.mark.parametrize( "kwargs,expected", [ @@ -281,7 +303,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): ), ], ) -def test_na_values_keep_default(all_parsers, kwargs, expected): +def test_na_values_keep_default(all_parsers, kwargs, expected, request): data = """\ A,B,C a,1,one @@ -293,6 +315,15 @@ def test_na_values_keep_default(all_parsers, kwargs, expected): g,7,seven """ parser = all_parsers + if parser.engine == "pyarrow": + if "na_values" in kwargs and isinstance(kwargs["na_values"], dict): + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + mark = pytest.mark.xfail() + request.applymarker(mark) + result = parser.read_csv(StringIO(data), **kwargs) tm.assert_frame_equal(result, expected) @@ -323,11 +354,19 @@ def test_no_na_values_no_keep_default(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_no_keep_default_na_dict_na_values(all_parsers): # see gh-19227 data = "a,b\n,2" parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), na_values={"b": ["2"]}, keep_default_na=False + ) + return + result = parser.read_csv( StringIO(data), na_values={"b": ["2"]}, keep_default_na=False ) @@ -335,19 +374,24 @@ def test_no_keep_default_na_dict_na_values(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_no_keep_default_na_dict_na_scalar_values(all_parsers): # see gh-19227 # # Scalar values shouldn't cause the parsing to crash or fail. data = "a,b\n1,2" parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False) + return + df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False) expected = DataFrame({"a": [1], "b": [np.nan]}) tm.assert_frame_equal(df, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict @pytest.mark.parametrize("col_zero_na_values", [113125, "113125"]) def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values): # see gh-19227 @@ -368,6 +412,17 @@ def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_v } ) + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=None, + keep_default_na=False, + na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values}, + ) + return + result = parser.read_csv( StringIO(data), header=None, @@ -427,7 +482,6 @@ def test_na_trailing_columns(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "na_values,row_data", [ @@ -441,12 +495,27 @@ def test_na_values_scalar(all_parsers, na_values, row_data): names = ["a", "b"] data = "1,2\n2,1" + if parser.engine == "pyarrow" and isinstance(na_values, dict): + if isinstance(na_values, dict): + err = ValueError + msg = "The pyarrow engine doesn't support passing a dict for na_values" + else: + err = TypeError + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(err, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + elif parser.engine == "pyarrow": + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + result = parser.read_csv(StringIO(data), names=names, na_values=na_values) expected = DataFrame(row_data, columns=names) tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_values_dict_aliasing(all_parsers): parser = all_parsers na_values = {"a": 2, "b": 1} @@ -456,25 +525,36 @@ def test_na_values_dict_aliasing(all_parsers): data = "1,2\n2,1" expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names) + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + result = parser.read_csv(StringIO(data), names=names, na_values=na_values) tm.assert_frame_equal(result, expected) tm.assert_dict_equal(na_values, na_values_copy) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_values_dict_col_index(all_parsers): # see gh-14203 data = "a\nfoo\n1" parser = all_parsers na_values = {0: "foo"} + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values) + return + result = parser.read_csv(StringIO(data), na_values=na_values) expected = DataFrame({"a": [np.nan, 1]}) tm.assert_frame_equal(result, expected) -@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "data,kwargs,expected", [ @@ -487,9 +567,19 @@ def test_na_values_dict_col_index(all_parsers): (str(2**63) + "\n1", {"na_values": [2**63]}, DataFrame([np.nan, 1])), ], ) -def test_na_values_uint64(all_parsers, data, kwargs, expected): +def test_na_values_uint64(all_parsers, data, kwargs, expected, request): # see gh-14983 parser = all_parsers + + if parser.engine == "pyarrow" and "na_values" in kwargs: + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), header=None, **kwargs) + return + elif parser.engine == "pyarrow": + mark = pytest.mark.xfail(reason="Returns float64 instead of object") + request.applymarker(mark) + result = parser.read_csv(StringIO(data), header=None, **kwargs) tm.assert_frame_equal(result, expected) @@ -566,11 +656,14 @@ def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter): ) def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values): parser = all_parsers - msg = ( - "(Bool column has NA values in column [0a])|" - "(cannot safely convert passed user dtype of " - "bool for object dtyped data in column 0)" + msg = "|".join( + [ + "Bool column has NA values in column [0a]", + "cannot safely convert passed user dtype of " + "bool for object dtyped data in column 0", + ] ) + with pytest.raises(ValueError, match=msg): parser.read_csv( StringIO(data), @@ -581,6 +674,8 @@ def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values): ) +# TODO: this test isn't about the na_values keyword, it is about the empty entries +# being returned with NaN entries, whereas the pyarrow engine returns "nan" @xfail_pyarrow # mismatched shapes def test_str_nan_dropped(all_parsers): # see gh-21131 @@ -610,12 +705,19 @@ def test_str_nan_dropped(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_nan_multi_index(all_parsers): # GH 42446 parser = all_parsers data = "A,B,B\nX,Y,Z\n1,2,inf" + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"} + ) + return + result = parser.read_csv( StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"} ) @@ -631,7 +733,7 @@ def test_nan_multi_index(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow # Failed: DID NOT RAISE +@xfail_pyarrow # Failed: DID NOT RAISE ; it casts the NaN to False def test_bool_and_nan_to_bool(all_parsers): # GH#42808 parser = all_parsers