Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into enhence_Styler_bar_…
Browse files Browse the repository at this point in the history
…test
  • Loading branch information
ccccjone committed Dec 9, 2023
2 parents dc77342 + 04307e7 commit 32e1cca
Show file tree
Hide file tree
Showing 55 changed files with 595 additions and 439 deletions.
2 changes: 2 additions & 0 deletions ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
# $ ./ci/code_checks.sh single-docs # check single-page docs build warning-free
# $ ./ci/code_checks.sh notebooks # check execution of documentation notebooks

set -uo pipefail

[[ -z "$1" || "$1" == "code" || "$1" == "doctests" || "$1" == "docstrings" || "$1" == "single-docs" || "$1" == "notebooks" ]] || \
{ echo "Unknown command $1. Usage: $0 [code|doctests|docstrings|single-docs|notebooks]"; exit 9999; }

Expand Down
5 changes: 3 additions & 2 deletions doc/make.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,8 +236,9 @@ def html(self):
os.remove(zip_fname)

if ret_code == 0:
if self.single_doc_html is not None and not self.no_browser:
self._open_browser(self.single_doc_html)
if self.single_doc_html is not None:
if not self.no_browser:
self._open_browser(self.single_doc_html)
else:
self._add_redirects()
if self.whatsnew and not self.no_browser:
Expand Down
5 changes: 5 additions & 0 deletions doc/source/whatsnew/v2.2.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ Other Deprecations
- Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_string` except ``buf``. (:issue:`54229`)
- Deprecated allowing non-keyword arguments in :meth:`DataFrame.to_xml` except ``path_or_buffer``. (:issue:`54229`)
- Deprecated allowing passing :class:`BlockManager` objects to :class:`DataFrame` or :class:`SingleBlockManager` objects to :class:`Series` (:issue:`52419`)
- Deprecated behavior of :meth:`Index.insert` with an object-dtype index silently performing type inference on the result, explicitly call ``result.infer_objects(copy=False)`` for the old behavior instead (:issue:`51363`)
- Deprecated downcasting behavior in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, :meth:`DataFrame.mask`, :meth:`Series.clip`, :meth:`DataFrame.clip`; in a future version these will not infer object-dtype columns to non-object dtype, or all-round floats to integer dtype. Call ``result.infer_objects(copy=False)`` on the result for object inference, or explicitly cast floats to ints. To opt in to the future version, use ``pd.set_option("future.no_silent_downcasting", True)`` (:issue:`53656`)
- Deprecated including the groups in computations when using :meth:`.DataFrameGroupBy.apply` and :meth:`.DataFrameGroupBy.resample`; pass ``include_groups=False`` to exclude the groups (:issue:`7155`)
- Deprecated indexing an :class:`Index` with a boolean indexer of length zero (:issue:`55820`)
Expand Down Expand Up @@ -576,7 +577,10 @@ Strings
^^^^^^^
- Bug in :func:`pandas.api.types.is_string_dtype` while checking object array with no elements is of the string dtype (:issue:`54661`)
- Bug in :meth:`DataFrame.apply` failing when ``engine="numba"`` and columns or index have ``StringDtype`` (:issue:`56189`)
- Bug in :meth:`DataFrame.reindex` not matching :class:`Index` with ``string[pyarrow_numpy]`` dtype (:issue:`56106`)
- Bug in :meth:`Index.str.cat` always casting result to object dtype (:issue:`56157`)
- Bug in :meth:`Series.__mul__` for :class:`ArrowDtype` with ``pyarrow.string`` dtype and ``string[pyarrow]`` for the pyarrow backend (:issue:`51970`)
- Bug in :meth:`Series.str.find` when ``start < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56411`)
- Bug in :meth:`Series.str.replace` when ``n < 0`` for :class:`ArrowDtype` with ``pyarrow.string`` (:issue:`56404`)
- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with arguments of type ``tuple[str, ...]`` for ``string[pyarrow]`` (:issue:`54942`)

Expand Down Expand Up @@ -645,6 +649,7 @@ Groupby/resample/rolling
- Bug in :meth:`DataFrame.asfreq` and :meth:`Series.asfreq` with a :class:`DatetimeIndex` with non-nanosecond resolution incorrectly converting to nanosecond resolution (:issue:`55958`)
- Bug in :meth:`DataFrame.ewm` when passed ``times`` with non-nanosecond ``datetime64`` or :class:`DatetimeTZDtype` dtype (:issue:`56262`)
- Bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`)
- Bug in :meth:`DataFrame.resample` when resampling on a :class:`ArrowDtype` of ``pyarrow.timestamp`` or ``pyarrow.duration`` type (:issue:`55989`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`)
- Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`)
-
Expand Down
3 changes: 2 additions & 1 deletion meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ project(
meson_version: '>=1.2.1',
default_options: [
'buildtype=release',
'c_std=c11'
'c_std=c11',
'warning_level=2',
]
)

Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/include/pandas/datetime/pd_datetime.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ typedef struct {
NPY_DATETIMEUNIT *, int *, int *, const char *,
int, FormatRequirement);
int (*get_datetime_iso_8601_strlen)(int, NPY_DATETIMEUNIT);
int (*make_iso_8601_datetime)(npy_datetimestruct *, char *, int, int,
int (*make_iso_8601_datetime)(npy_datetimestruct *, char *, size_t, int,
NPY_DATETIMEUNIT);
int (*make_iso_8601_timedelta)(pandas_timedeltastruct *, char *, size_t *);
} PandasDateTime_CAPI;
Expand Down
12 changes: 12 additions & 0 deletions pandas/_libs/include/pandas/portable.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,15 @@ The full license is in the LICENSE file, distributed with this software.
#define isspace_ascii(c) (((c) == ' ') || (((unsigned)(c) - '\t') < 5))
#define toupper_ascii(c) ((((unsigned)(c) - 'a') < 26) ? ((c) & 0x5f) : (c))
#define tolower_ascii(c) ((((unsigned)(c) - 'A') < 26) ? ((c) | 0x20) : (c))

#if defined(_WIN32)
#define PD_FALLTHROUGH \
do { \
} while (0) /* fallthrough */
#elif __has_attribute(__fallthrough__)
#define PD_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define PD_FALLTHROUGH \
do { \
} while (0) /* fallthrough */
#endif
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base);
* Returns 0 on success, -1 on failure (for example if the output
* string was too short).
*/
int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen,
int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, size_t outlen,
int utc, NPY_DATETIMEUNIT base);

/*
Expand Down
3 changes: 2 additions & 1 deletion pandas/_libs/src/datetime/pd_datetime.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt

#include "datetime.h"
#include "pandas/datetime/pd_datetime.h"
#include "pandas/portable.h"

static void pandas_datetime_destructor(PyObject *op) {
void *ptr = PyCapsule_GetPointer(op, PandasDateTime_CAPSULE_NAME);
Expand Down Expand Up @@ -188,7 +189,7 @@ static npy_datetime PyDateTimeToEpoch(PyObject *dt, NPY_DATETIMEUNIT base) {
return npy_dt;
}

static int pandas_datetime_exec(PyObject *module) {
static int pandas_datetime_exec(PyObject *Py_UNUSED(module)) {
PyDateTime_IMPORT;
PandasDateTime_CAPI *capi = PyMem_Malloc(sizeof(PandasDateTime_CAPI));
if (capi == NULL) {
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/src/parser/pd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static void pandas_parser_destructor(PyObject *op) {
PyMem_Free(ptr);
}

static int pandas_parser_exec(PyObject *module) {
static int pandas_parser_exec(PyObject *Py_UNUSED(module)) {
PandasParser_CAPI *capi = PyMem_Malloc(sizeof(PandasParser_CAPI));
if (capi == NULL) {
PyErr_NoMemory();
Expand Down
36 changes: 22 additions & 14 deletions pandas/_libs/src/parser/tokenizer.c
Original file line number Diff line number Diff line change
Expand Up @@ -795,7 +795,7 @@ static int tokenize_bytes(parser_t *self, size_t line_limit,
break;
} else if (!isblank(c)) {
self->state = START_FIELD;
// fall through to subsequent state
PD_FALLTHROUGH; // fall through to subsequent state
} else {
// if whitespace char, keep slurping
break;
Expand Down Expand Up @@ -849,12 +849,12 @@ static int tokenize_bytes(parser_t *self, size_t line_limit,
self->state = WHITESPACE_LINE;
break;
}
// fall through
}

// normal character - fall through
// to handle as START_FIELD
self->state = START_FIELD;
PD_FALLTHROUGH;
}
case START_FIELD:
// expecting field
Expand Down Expand Up @@ -1130,10 +1130,10 @@ int parser_consume_rows(parser_t *self, size_t nrows) {

/* if word_deletions == 0 (i.e. this case) then char_count must
* be 0 too, as no data needs to be skipped */
const int64_t char_count = word_deletions >= 1
? (self->word_starts[word_deletions - 1] +
strlen(self->words[word_deletions - 1]) + 1)
: 0;
const uint64_t char_count =
word_deletions >= 1 ? (self->word_starts[word_deletions - 1] +
strlen(self->words[word_deletions - 1]) + 1)
: 0;

TRACE(("parser_consume_rows: Deleting %d words, %d chars\n", word_deletions,
char_count));
Expand Down Expand Up @@ -1415,9 +1415,11 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
int negative = 0;
switch (*p) {
case '-':
negative = 1; // Fall through to increment position.
negative = 1;
PD_FALLTHROUGH; // Fall through to increment position.
case '+':
p++;
break;
}

int exponent = 0;
Expand Down Expand Up @@ -1485,9 +1487,11 @@ double xstrtod(const char *str, char **endptr, char decimal, char sci,
negative = 0;
switch (*++p) {
case '-':
negative = 1; // Fall through to increment pos.
negative = 1;
PD_FALLTHROUGH; // Fall through to increment position.
case '+':
p++;
break;
}

// Process string of digits.
Expand Down Expand Up @@ -1595,9 +1599,11 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
int negative = 0;
switch (*p) {
case '-':
negative = 1; // Fall through to increment position.
negative = 1;
PD_FALLTHROUGH; // Fall through to increment position.
case '+':
p++;
break;
}

double number = 0.;
Expand Down Expand Up @@ -1656,9 +1662,11 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, char sci,
negative = 0;
switch (*++p) {
case '-':
negative = 1; // Fall through to increment pos.
negative = 1;
PD_FALLTHROUGH; // Fall through to increment position.
case '+':
p++;
break;
}

// Process string of digits.
Expand Down Expand Up @@ -1762,8 +1770,8 @@ static char *_str_copy_decimal_str_c(const char *s, char **endpos, char decimal,
return s_copy;
}

double round_trip(const char *p, char **q, char decimal, char sci, char tsep,
int skip_trailing, int *error, int *maybe_int) {
double round_trip(const char *p, char **q, char decimal, char Py_UNUSED(sci),
char tsep, int skip_trailing, int *error, int *maybe_int) {
// 'normalize' representation to C-locale; replace decimal with '.' and
// remove thousands separator.
char *endptr;
Expand Down Expand Up @@ -1975,7 +1983,7 @@ uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max,
break;
}
if ((number < pre_max) ||
((number == pre_max) && (d - '0' <= dig_pre_max))) {
((number == pre_max) && ((uint64_t)(d - '0') <= dig_pre_max))) {
number = number * 10 + (d - '0');
d = *++p;

Expand All @@ -1987,7 +1995,7 @@ uint64_t str_to_uint64(uint_state *state, const char *p_item, int64_t int_max,
} else {
while (isdigit_ascii(d)) {
if ((number < pre_max) ||
((number == pre_max) && (d - '0' <= dig_pre_max))) {
((number == pre_max) && ((uint64_t)(d - '0') <= dig_pre_max))) {
number = number * 10 + (d - '0');
d = *++p;

Expand Down
Loading

0 comments on commit 32e1cca

Please sign in to comment.