This commit is contained in:
2024-12-04 13:35:57 +05:00
parent d346bf4b2a
commit 73ce681a55
7059 changed files with 1196501 additions and 0 deletions

View File

@ -0,0 +1,27 @@
__all__ = [
"NaT",
"NaTType",
"OutOfBoundsDatetime",
"Period",
"Timedelta",
"Timestamp",
"iNaT",
"Interval",
]
# Below imports needs to happen first to ensure pandas top level
# module gets monkeypatched with the pandas_datetime_CAPI
# see pandas_datetime_exec in pd_datetime.c
import pandas._libs.pandas_parser # isort: skip # type: ignore[reportUnusedImport]
import pandas._libs.pandas_datetime # noqa: F401 # isort: skip # type: ignore[reportUnusedImport]
from pandas._libs.interval import Interval
from pandas._libs.tslibs import (
NaT,
NaTType,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
iNaT,
)

View File

@ -0,0 +1,416 @@
from typing import Any
import numpy as np
from pandas._typing import npt
class Infinity:
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __lt__(self, other) -> bool: ...
def __le__(self, other) -> bool: ...
def __gt__(self, other) -> bool: ...
def __ge__(self, other) -> bool: ...
class NegInfinity:
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __lt__(self, other) -> bool: ...
def __le__(self, other) -> bool: ...
def __gt__(self, other) -> bool: ...
def __ge__(self, other) -> bool: ...
def unique_deltas(
arr: np.ndarray, # const int64_t[:]
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ...
def groupsort_indexer(
index: np.ndarray, # const int64_t[:]
ngroups: int,
) -> tuple[
np.ndarray, # ndarray[int64_t, ndim=1]
np.ndarray, # ndarray[int64_t, ndim=1]
]: ...
def kth_smallest(
arr: np.ndarray, # numeric[:]
k: int,
) -> Any: ... # numeric
# ----------------------------------------------------------------------
# Pairwise correlation/covariance
def nancorr(
mat: npt.NDArray[np.float64], # const float64_t[:, :]
cov: bool = ...,
minp: int | None = ...,
) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
def nancorr_spearman(
mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2]
minp: int = ...,
) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
# ----------------------------------------------------------------------
def validate_limit(nobs: int | None, limit=...) -> int: ...
def get_fill_indexer(
mask: npt.NDArray[np.bool_],
limit: int | None = None,
) -> npt.NDArray[np.intp]: ...
def pad(
old: np.ndarray, # ndarray[numeric_object_t]
new: np.ndarray, # ndarray[numeric_object_t]
limit=...,
) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
def pad_inplace(
values: np.ndarray, # numeric_object_t[:]
mask: np.ndarray, # uint8_t[:]
limit=...,
) -> None: ...
def pad_2d_inplace(
values: np.ndarray, # numeric_object_t[:, :]
mask: np.ndarray, # const uint8_t[:, :]
limit=...,
) -> None: ...
def backfill(
old: np.ndarray, # ndarray[numeric_object_t]
new: np.ndarray, # ndarray[numeric_object_t]
limit=...,
) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
def backfill_inplace(
values: np.ndarray, # numeric_object_t[:]
mask: np.ndarray, # uint8_t[:]
limit=...,
) -> None: ...
def backfill_2d_inplace(
values: np.ndarray, # numeric_object_t[:, :]
mask: np.ndarray, # const uint8_t[:, :]
limit=...,
) -> None: ...
def is_monotonic(
arr: np.ndarray, # ndarray[numeric_object_t, ndim=1]
timelike: bool,
) -> tuple[bool, bool, bool]: ...
# ----------------------------------------------------------------------
# rank_1d, rank_2d
# ----------------------------------------------------------------------
def rank_1d(
values: np.ndarray, # ndarray[numeric_object_t, ndim=1]
labels: np.ndarray | None = ..., # const int64_t[:]=None
is_datetimelike: bool = ...,
ties_method=...,
ascending: bool = ...,
pct: bool = ...,
na_option=...,
mask: npt.NDArray[np.bool_] | None = ...,
) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
def rank_2d(
in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2]
axis: int = ...,
is_datetimelike: bool = ...,
ties_method=...,
ascending: bool = ...,
na_option=...,
pct: bool = ...,
) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
def diff_2d(
arr: np.ndarray, # ndarray[diff_t, ndim=2]
out: np.ndarray, # ndarray[out_t, ndim=2]
periods: int,
axis: int,
datetimelike: bool = ...,
) -> None: ...
def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ...
def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ...
def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ...
def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ...
def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ...
def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ...
def take_1d_int8_int8(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int8_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int8_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int8_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int16_int16(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int16_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int16_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int16_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int32_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int32_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int64_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_int64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_float32_float32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_float32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_float64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_object_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_bool_bool(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_1d_bool_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int8_int8(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int8_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int8_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int8_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int16_int16(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int16_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int16_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int16_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int32_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int32_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int64_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_int64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_float32_float32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_float32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_float64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_object_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_bool_bool(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis0_bool_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int8_int8(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int8_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int8_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int8_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int16_int16(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int16_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int16_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int16_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int32_int32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int32_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int64_int64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_int64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_float32_float32(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_float32_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_float64_float64(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_object_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_bool_bool(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_axis1_bool_object(
values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
) -> None: ...
def take_2d_multi_int8_int8(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int8_int32(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int8_int64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int8_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int16_int16(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int16_int32(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int16_int64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int16_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int32_int32(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int32_int64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int32_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int64_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_float32_float32(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_float32_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_float64_float64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_object_object(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_bool_bool(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_bool_object(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...
def take_2d_multi_int64_int64(
values: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value=...,
) -> None: ...

View File

@ -0,0 +1,40 @@
from typing import Sequence
import numpy as np
from pandas._typing import (
AxisInt,
DtypeObj,
Self,
Shape,
)
class NDArrayBacked:
_dtype: DtypeObj
_ndarray: np.ndarray
def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ...
@classmethod
def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ...
def _from_backing_data(self, values: np.ndarray): ...
def __setstate__(self, state): ...
def __len__(self) -> int: ...
@property
def shape(self) -> Shape: ...
@property
def ndim(self) -> int: ...
@property
def size(self) -> int: ...
@property
def nbytes(self) -> int: ...
def copy(self, order=...): ...
def delete(self, loc, axis=...): ...
def swapaxes(self, axis1, axis2): ...
def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ...
def reshape(self, *args, **kwargs): ...
def ravel(self, order=...): ...
@property
def T(self): ...
@classmethod
def _concat_same_type(
cls, to_concat: Sequence[Self], axis: AxisInt = ...
) -> Self: ...

View File

@ -0,0 +1,5 @@
def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...

View File

@ -0,0 +1,216 @@
from typing import Literal
import numpy as np
from pandas._typing import npt
def group_median_float64(
out: np.ndarray, # ndarray[float64_t, ndim=2]
counts: npt.NDArray[np.int64],
values: np.ndarray, # ndarray[float64_t, ndim=2]
labels: npt.NDArray[np.int64],
min_count: int = ..., # Py_ssize_t
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_cumprod(
out: np.ndarray, # float64_t[:, ::1]
values: np.ndarray, # const float64_t[:, :]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
skipna: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_cumsum(
out: np.ndarray, # int64float_t[:, ::1]
values: np.ndarray, # ndarray[int64float_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
skipna: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_shift_indexer(
out: np.ndarray, # int64_t[::1]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
periods: int,
) -> None: ...
def group_fillna_indexer(
out: np.ndarray, # ndarray[intp_t]
labels: np.ndarray, # ndarray[int64_t]
sorted_labels: npt.NDArray[np.intp],
mask: npt.NDArray[np.uint8],
limit: int, # int64_t
dropna: bool,
) -> None: ...
def group_any_all(
out: np.ndarray, # uint8_t[::1]
values: np.ndarray, # const uint8_t[::1]
labels: np.ndarray, # const int64_t[:]
mask: np.ndarray, # const uint8_t[::1]
val_test: Literal["any", "all"],
skipna: bool,
result_mask: np.ndarray | None,
) -> None: ...
def group_sum(
out: np.ndarray, # complexfloatingintuint_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2]
labels: np.ndarray, # const intp_t[:]
mask: np.ndarray | None,
result_mask: np.ndarray | None = ...,
min_count: int = ...,
is_datetimelike: bool = ...,
) -> None: ...
def group_prod(
out: np.ndarray, # int64float_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[int64float_t, ndim=2]
labels: np.ndarray, # const intp_t[:]
mask: np.ndarray | None,
result_mask: np.ndarray | None = ...,
min_count: int = ...,
) -> None: ...
def group_var(
out: np.ndarray, # floating[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[floating, ndim=2]
labels: np.ndarray, # const intp_t[:]
min_count: int = ..., # Py_ssize_t
ddof: int = ..., # int64_t
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
is_datetimelike: bool = ...,
name: str = ...,
) -> None: ...
def group_skew(
out: np.ndarray, # float64_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[float64_T, ndim=2]
labels: np.ndarray, # const intp_t[::1]
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
skipna: bool = ...,
) -> None: ...
def group_mean(
out: np.ndarray, # floating[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[floating, ndim=2]
labels: np.ndarray, # const intp_t[:]
min_count: int = ..., # Py_ssize_t
is_datetimelike: bool = ..., # bint
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_ohlc(
out: np.ndarray, # floatingintuint_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[floatingintuint_t, ndim=2]
labels: np.ndarray, # const intp_t[:]
min_count: int = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_quantile(
out: npt.NDArray[np.float64],
values: np.ndarray, # ndarray[numeric, ndim=1]
labels: npt.NDArray[np.intp],
mask: npt.NDArray[np.uint8],
qs: npt.NDArray[np.float64], # const
starts: npt.NDArray[np.int64],
ends: npt.NDArray[np.int64],
interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
result_mask: np.ndarray | None,
is_datetimelike: bool,
) -> None: ...
def group_last(
out: np.ndarray, # rank_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[rank_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
mask: npt.NDArray[np.bool_] | None,
result_mask: npt.NDArray[np.bool_] | None = ...,
min_count: int = ..., # Py_ssize_t
is_datetimelike: bool = ...,
skipna: bool = ...,
) -> None: ...
def group_nth(
out: np.ndarray, # rank_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[rank_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
mask: npt.NDArray[np.bool_] | None,
result_mask: npt.NDArray[np.bool_] | None = ...,
min_count: int = ..., # int64_t
rank: int = ..., # int64_t
is_datetimelike: bool = ...,
skipna: bool = ...,
) -> None: ...
def group_rank(
out: np.ndarray, # float64_t[:, ::1]
values: np.ndarray, # ndarray[rank_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
ties_method: Literal["average", "min", "max", "first", "dense"] = ...,
ascending: bool = ...,
pct: bool = ...,
na_option: Literal["keep", "top", "bottom"] = ...,
mask: npt.NDArray[np.bool_] | None = ...,
) -> None: ...
def group_max(
out: np.ndarray, # groupby_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
min_count: int = ...,
is_datetimelike: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_min(
out: np.ndarray, # groupby_t[:, ::1]
counts: np.ndarray, # int64_t[::1]
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
min_count: int = ...,
is_datetimelike: bool = ...,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_idxmin_idxmax(
out: npt.NDArray[np.intp],
counts: npt.NDArray[np.int64],
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: npt.NDArray[np.intp],
min_count: int = ...,
is_datetimelike: bool = ...,
mask: np.ndarray | None = ...,
name: str = ...,
skipna: bool = ...,
result_mask: np.ndarray | None = ...,
) -> None: ...
def group_cummin(
out: np.ndarray, # groupby_t[:, ::1]
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
skipna: bool = ...,
) -> None: ...
def group_cummax(
out: np.ndarray, # groupby_t[:, ::1]
values: np.ndarray, # ndarray[groupby_t, ndim=2]
labels: np.ndarray, # const int64_t[:]
ngroups: int,
is_datetimelike: bool,
mask: np.ndarray | None = ...,
result_mask: np.ndarray | None = ...,
skipna: bool = ...,
) -> None: ...

View File

@ -0,0 +1,9 @@
import numpy as np
from pandas._typing import npt
def hash_object_array(
arr: npt.NDArray[np.object_],
key: str,
encoding: str = ...,
) -> npt.NDArray[np.uint64]: ...

View File

@ -0,0 +1,252 @@
from typing import (
Any,
Hashable,
Literal,
)
import numpy as np
from pandas._typing import npt
def unique_label_indices(
labels: np.ndarray, # const int64_t[:]
) -> np.ndarray: ...
class Factorizer:
count: int
uniques: Any
def __init__(self, size_hint: int) -> None: ...
def get_count(self) -> int: ...
def factorize(
self,
values: np.ndarray,
na_sentinel=...,
na_value=...,
mask=...,
) -> npt.NDArray[np.intp]: ...
class ObjectFactorizer(Factorizer):
table: PyObjectHashTable
uniques: ObjectVector
class Int64Factorizer(Factorizer):
table: Int64HashTable
uniques: Int64Vector
class UInt64Factorizer(Factorizer):
table: UInt64HashTable
uniques: UInt64Vector
class Int32Factorizer(Factorizer):
table: Int32HashTable
uniques: Int32Vector
class UInt32Factorizer(Factorizer):
table: UInt32HashTable
uniques: UInt32Vector
class Int16Factorizer(Factorizer):
table: Int16HashTable
uniques: Int16Vector
class UInt16Factorizer(Factorizer):
table: UInt16HashTable
uniques: UInt16Vector
class Int8Factorizer(Factorizer):
table: Int8HashTable
uniques: Int8Vector
class UInt8Factorizer(Factorizer):
table: UInt8HashTable
uniques: UInt8Vector
class Float64Factorizer(Factorizer):
table: Float64HashTable
uniques: Float64Vector
class Float32Factorizer(Factorizer):
table: Float32HashTable
uniques: Float32Vector
class Complex64Factorizer(Factorizer):
table: Complex64HashTable
uniques: Complex64Vector
class Complex128Factorizer(Factorizer):
table: Complex128HashTable
uniques: Complex128Vector
class Int64Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.int64]: ...
class Int32Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.int32]: ...
class Int16Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.int16]: ...
class Int8Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.int8]: ...
class UInt64Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.uint64]: ...
class UInt32Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.uint32]: ...
class UInt16Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.uint16]: ...
class UInt8Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.uint8]: ...
class Float64Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.float64]: ...
class Float32Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.float32]: ...
class Complex128Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.complex128]: ...
class Complex64Vector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.complex64]: ...
class StringVector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.object_]: ...
class ObjectVector:
def __init__(self, *args) -> None: ...
def __len__(self) -> int: ...
def to_array(self) -> npt.NDArray[np.object_]: ...
class HashTable:
# NB: The base HashTable class does _not_ actually have these methods;
# we are putting them here for the sake of mypy to avoid
# reproducing them in each subclass below.
def __init__(self, size_hint: int = ..., uses_mask: bool = ...) -> None: ...
def __len__(self) -> int: ...
def __contains__(self, key: Hashable) -> bool: ...
def sizeof(self, deep: bool = ...) -> int: ...
def get_state(self) -> dict[str, int]: ...
# TODO: `val/key` type is subclass-specific
def get_item(self, val): ... # TODO: return type?
def set_item(self, key, val) -> None: ...
def get_na(self): ... # TODO: return type?
def set_na(self, val) -> None: ...
def map_locations(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
mask: npt.NDArray[np.bool_] | None = ...,
) -> None: ...
def lookup(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
mask: npt.NDArray[np.bool_] | None = ...,
) -> npt.NDArray[np.intp]: ...
def get_labels(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
uniques, # SubclassTypeVector
count_prior: int = ...,
na_sentinel: int = ...,
na_value: object = ...,
mask=...,
) -> npt.NDArray[np.intp]: ...
def unique(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
return_inverse: bool = ...,
mask=...,
) -> (
tuple[
np.ndarray, # np.ndarray[subclass-specific]
npt.NDArray[np.intp],
]
| np.ndarray
): ... # np.ndarray[subclass-specific]
def factorize(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
na_sentinel: int = ...,
na_value: object = ...,
mask=...,
ignore_na: bool = True,
) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
class Complex128HashTable(HashTable): ...
class Complex64HashTable(HashTable): ...
class Float64HashTable(HashTable): ...
class Float32HashTable(HashTable): ...
class Int64HashTable(HashTable):
# Only Int64HashTable has get_labels_groupby, map_keys_to_values
def get_labels_groupby(
self,
values: npt.NDArray[np.int64], # const int64_t[:]
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ...
def map_keys_to_values(
self,
keys: npt.NDArray[np.int64],
values: npt.NDArray[np.int64], # const int64_t[:]
) -> None: ...
class Int32HashTable(HashTable): ...
class Int16HashTable(HashTable): ...
class Int8HashTable(HashTable): ...
class UInt64HashTable(HashTable): ...
class UInt32HashTable(HashTable): ...
class UInt16HashTable(HashTable): ...
class UInt8HashTable(HashTable): ...
class StringHashTable(HashTable): ...
class PyObjectHashTable(HashTable): ...
class IntpHashTable(HashTable): ...
def duplicated(
values: np.ndarray,
keep: Literal["last", "first", False] = ...,
mask: npt.NDArray[np.bool_] | None = ...,
) -> npt.NDArray[np.bool_]: ...
def mode(
values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ...
) -> np.ndarray: ...
def value_count(
values: np.ndarray,
dropna: bool,
mask: npt.NDArray[np.bool_] | None = ...,
) -> tuple[np.ndarray, npt.NDArray[np.int64], int]: ... # np.ndarray[same-as-values]
# arr and values should have same dtype
def ismember(
arr: np.ndarray,
values: np.ndarray,
) -> npt.NDArray[np.bool_]: ...
def object_hash(obj) -> int: ...
def objects_are_equal(a, b) -> bool: ...

View File

@ -0,0 +1,100 @@
import numpy as np
from pandas._typing import npt
from pandas import MultiIndex
from pandas.core.arrays import ExtensionArray
multiindex_nulls_shift: int
class IndexEngine:
over_size_threshold: bool
def __init__(self, values: np.ndarray) -> None: ...
def __contains__(self, val: object) -> bool: ...
# -> int | slice | np.ndarray[bool]
def get_loc(self, val: object) -> int | slice | np.ndarray: ...
def sizeof(self, deep: bool = ...) -> int: ...
def __sizeof__(self) -> int: ...
@property
def is_unique(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
@property
def is_mapping_populated(self) -> bool: ...
def clear_mapping(self): ...
def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
def get_indexer_non_unique(
self,
targets: np.ndarray,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
class MaskedIndexEngine(IndexEngine):
def __init__(self, values: object) -> None: ...
def get_indexer_non_unique(
self, targets: object
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
class Float64Engine(IndexEngine): ...
class Float32Engine(IndexEngine): ...
class Complex128Engine(IndexEngine): ...
class Complex64Engine(IndexEngine): ...
class Int64Engine(IndexEngine): ...
class Int32Engine(IndexEngine): ...
class Int16Engine(IndexEngine): ...
class Int8Engine(IndexEngine): ...
class UInt64Engine(IndexEngine): ...
class UInt32Engine(IndexEngine): ...
class UInt16Engine(IndexEngine): ...
class UInt8Engine(IndexEngine): ...
class ObjectEngine(IndexEngine): ...
class DatetimeEngine(Int64Engine): ...
class TimedeltaEngine(DatetimeEngine): ...
class PeriodEngine(Int64Engine): ...
class BoolEngine(UInt8Engine): ...
class MaskedFloat64Engine(MaskedIndexEngine): ...
class MaskedFloat32Engine(MaskedIndexEngine): ...
class MaskedComplex128Engine(MaskedIndexEngine): ...
class MaskedComplex64Engine(MaskedIndexEngine): ...
class MaskedInt64Engine(MaskedIndexEngine): ...
class MaskedInt32Engine(MaskedIndexEngine): ...
class MaskedInt16Engine(MaskedIndexEngine): ...
class MaskedInt8Engine(MaskedIndexEngine): ...
class MaskedUInt64Engine(MaskedIndexEngine): ...
class MaskedUInt32Engine(MaskedIndexEngine): ...
class MaskedUInt16Engine(MaskedIndexEngine): ...
class MaskedUInt8Engine(MaskedIndexEngine): ...
class MaskedBoolEngine(MaskedUInt8Engine): ...
class BaseMultiIndexCodesEngine:
levels: list[np.ndarray]
offsets: np.ndarray # ndarray[uint64_t, ndim=1]
def __init__(
self,
levels: list[np.ndarray], # all entries hashable
labels: list[np.ndarray], # all entries integer-dtyped
offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1]
) -> None: ...
def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ...
def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ...
class ExtensionEngine:
def __init__(self, values: ExtensionArray) -> None: ...
def __contains__(self, val: object) -> bool: ...
def get_loc(self, val: object) -> int | slice | np.ndarray: ...
def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
def get_indexer_non_unique(
self,
targets: np.ndarray,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@property
def is_unique(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
@property
def is_monotonic_decreasing(self) -> bool: ...
def sizeof(self, deep: bool = ...) -> int: ...
def clear_mapping(self): ...

View File

@ -0,0 +1,17 @@
from typing import (
Generic,
TypeVar,
)
from pandas.core.indexing import IndexingMixin
_IndexingMixinT = TypeVar("_IndexingMixinT", bound=IndexingMixin)
class NDFrameIndexerBase(Generic[_IndexingMixinT]):
name: str
# in practice obj is either a DataFrame or a Series
obj: _IndexingMixinT
def __init__(self, name: str, obj: _IndexingMixinT) -> None: ...
@property
def ndim(self) -> int: ...

View File

@ -0,0 +1,94 @@
from typing import (
Iterator,
Sequence,
final,
overload,
)
import weakref
import numpy as np
from pandas._typing import (
ArrayLike,
Self,
npt,
)
from pandas import Index
from pandas.core.internals.blocks import Block as B
def slice_len(slc: slice, objlen: int = ...) -> int: ...
def get_concat_blkno_indexers(
blknos_list: list[npt.NDArray[np.intp]],
) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ...
def get_blkno_indexers(
blknos: np.ndarray, # int64_t[:]
group: bool = ...,
) -> list[tuple[int, slice | np.ndarray]]: ...
def get_blkno_placements(
blknos: np.ndarray,
group: bool = ...,
) -> Iterator[tuple[int, BlockPlacement]]: ...
def update_blklocs_and_blknos(
blklocs: npt.NDArray[np.intp],
blknos: npt.NDArray[np.intp],
loc: int,
nblocks: int,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
@final
class BlockPlacement:
def __init__(self, val: int | slice | np.ndarray) -> None: ...
@property
def indexer(self) -> np.ndarray | slice: ...
@property
def as_array(self) -> np.ndarray: ...
@property
def as_slice(self) -> slice: ...
@property
def is_slice_like(self) -> bool: ...
@overload
def __getitem__(
self, loc: slice | Sequence[int] | npt.NDArray[np.intp]
) -> BlockPlacement: ...
@overload
def __getitem__(self, loc: int) -> int: ...
def __iter__(self) -> Iterator[int]: ...
def __len__(self) -> int: ...
def delete(self, loc) -> BlockPlacement: ...
def add(self, other) -> BlockPlacement: ...
def append(self, others: list[BlockPlacement]) -> BlockPlacement: ...
def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ...
class Block:
_mgr_locs: BlockPlacement
ndim: int
values: ArrayLike
refs: BlockValuesRefs
def __init__(
self,
values: ArrayLike,
placement: BlockPlacement,
ndim: int,
refs: BlockValuesRefs | None = ...,
) -> None: ...
def slice_block_rows(self, slicer: slice) -> Self: ...
class BlockManager:
blocks: tuple[B, ...]
axes: list[Index]
_known_consolidated: bool
_is_consolidated: bool
_blknos: np.ndarray
_blklocs: np.ndarray
def __init__(
self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=...
) -> None: ...
def get_slice(self, slobj: slice, axis: int = ...) -> Self: ...
def _rebuild_blknos_and_blklocs(self) -> None: ...
class BlockValuesRefs:
referenced_blocks: list[weakref.ref]
def __init__(self, blk: Block | None = ...) -> None: ...
def add_reference(self, blk: Block) -> None: ...
def add_index_reference(self, index: Index) -> None: ...
def has_reference(self) -> bool: ...

View File

@ -0,0 +1,174 @@
from typing import (
Any,
Generic,
TypeVar,
overload,
)
import numpy as np
import numpy.typing as npt
from pandas._typing import (
IntervalClosedType,
Timedelta,
Timestamp,
)
VALID_CLOSED: frozenset[str]
_OrderableScalarT = TypeVar("_OrderableScalarT", int, float)
_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta)
_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta)
class _LengthDescriptor:
@overload
def __get__(
self, instance: Interval[_OrderableScalarT], owner: Any
) -> _OrderableScalarT: ...
@overload
def __get__(
self, instance: Interval[_OrderableTimesT], owner: Any
) -> Timedelta: ...
class _MidDescriptor:
@overload
def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ...
@overload
def __get__(
self, instance: Interval[_OrderableTimesT], owner: Any
) -> _OrderableTimesT: ...
class IntervalMixin:
@property
def closed_left(self) -> bool: ...
@property
def closed_right(self) -> bool: ...
@property
def open_left(self) -> bool: ...
@property
def open_right(self) -> bool: ...
@property
def is_empty(self) -> bool: ...
def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ...
class Interval(IntervalMixin, Generic[_OrderableT]):
@property
def left(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
def right(self: Interval[_OrderableT]) -> _OrderableT: ...
@property
def closed(self) -> IntervalClosedType: ...
mid: _MidDescriptor
length: _LengthDescriptor
def __init__(
self,
left: _OrderableT,
right: _OrderableT,
closed: IntervalClosedType = ...,
) -> None: ...
def __hash__(self) -> int: ...
@overload
def __contains__(
self: Interval[Timedelta], key: Timedelta | Interval[Timedelta]
) -> bool: ...
@overload
def __contains__(
self: Interval[Timestamp], key: Timestamp | Interval[Timestamp]
) -> bool: ...
@overload
def __contains__(
self: Interval[_OrderableScalarT],
key: _OrderableScalarT | Interval[_OrderableScalarT],
) -> bool: ...
@overload
def __add__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __add__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __add__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __radd__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __radd__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __radd__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __sub__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __sub__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __sub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rsub__(
self: Interval[_OrderableTimesT], y: Timedelta
) -> Interval[_OrderableTimesT]: ...
@overload
def __rsub__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __rsub__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __mul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __mul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __rmul__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __rmul__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __truediv__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __truediv__(self: Interval[float], y: float) -> Interval[float]: ...
@overload
def __floordiv__(
self: Interval[int], y: _OrderableScalarT
) -> Interval[_OrderableScalarT]: ...
@overload
def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ...
def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
def intervals_to_interval_bounds(
intervals: np.ndarray, validate_closed: bool = ...
) -> tuple[np.ndarray, np.ndarray, IntervalClosedType]: ...
class IntervalTree(IntervalMixin):
def __init__(
self,
left: np.ndarray,
right: np.ndarray,
closed: IntervalClosedType = ...,
leaf_size: int = ...,
) -> None: ...
@property
def mid(self) -> np.ndarray: ...
@property
def length(self) -> np.ndarray: ...
def get_indexer(self, target) -> npt.NDArray[np.intp]: ...
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
_na_count: int
@property
def is_overlapping(self) -> bool: ...
@property
def is_monotonic_increasing(self) -> bool: ...
def clear_mapping(self) -> None: ...

View File

@ -0,0 +1,79 @@
import numpy as np
from pandas._typing import npt
def inner_join(
left: np.ndarray, # const intp_t[:]
right: np.ndarray, # const intp_t[:]
max_groups: int,
sort: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def left_outer_join(
left: np.ndarray, # const intp_t[:]
right: np.ndarray, # const intp_t[:]
max_groups: int,
sort: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def full_outer_join(
left: np.ndarray, # const intp_t[:]
right: np.ndarray, # const intp_t[:]
max_groups: int,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def ffill_indexer(
indexer: np.ndarray, # const intp_t[:]
) -> npt.NDArray[np.intp]: ...
def left_join_indexer_unique(
left: np.ndarray, # ndarray[join_t]
right: np.ndarray, # ndarray[join_t]
) -> npt.NDArray[np.intp]: ...
def left_join_indexer(
left: np.ndarray, # ndarray[join_t]
right: np.ndarray, # ndarray[join_t]
) -> tuple[
np.ndarray, # np.ndarray[join_t]
npt.NDArray[np.intp],
npt.NDArray[np.intp],
]: ...
def inner_join_indexer(
left: np.ndarray, # ndarray[join_t]
right: np.ndarray, # ndarray[join_t]
) -> tuple[
np.ndarray, # np.ndarray[join_t]
npt.NDArray[np.intp],
npt.NDArray[np.intp],
]: ...
def outer_join_indexer(
left: np.ndarray, # ndarray[join_t]
right: np.ndarray, # ndarray[join_t]
) -> tuple[
np.ndarray, # np.ndarray[join_t]
npt.NDArray[np.intp],
npt.NDArray[np.intp],
]: ...
def asof_join_backward_on_X_by_Y(
left_values: np.ndarray, # ndarray[numeric_t]
right_values: np.ndarray, # ndarray[numeric_t]
left_by_values: np.ndarray, # const int64_t[:]
right_by_values: np.ndarray, # const int64_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_forward_on_X_by_Y(
left_values: np.ndarray, # ndarray[numeric_t]
right_values: np.ndarray, # ndarray[numeric_t]
left_by_values: np.ndarray, # const int64_t[:]
right_by_values: np.ndarray, # const int64_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
def asof_join_nearest_on_X_by_Y(
left_values: np.ndarray, # ndarray[numeric_t]
right_values: np.ndarray, # ndarray[numeric_t]
left_by_values: np.ndarray, # const int64_t[:]
right_by_values: np.ndarray, # const int64_t[:]
allow_exact_matches: bool = ...,
tolerance: np.number | float | None = ...,
use_hashtable: bool = ...,
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...

View File

@ -0,0 +1,23 @@
from typing import (
Any,
Callable,
)
def ujson_dumps(
obj: Any,
ensure_ascii: bool = ...,
double_precision: int = ...,
indent: int = ...,
orient: str = ...,
date_unit: str = ...,
iso_dates: bool = ...,
default_handler: None
| Callable[[Any], str | float | bool | list | dict | None] = ...,
) -> str: ...
def ujson_loads(
s: str,
precise_float: bool = ...,
numpy: bool = ...,
dtype: None = ...,
labelled: bool = ...,
) -> Any: ...

View File

@ -0,0 +1,213 @@
# TODO(npdtypes): Many types specified here can be made more specific/accurate;
# the more specific versions are specified in comments
from decimal import Decimal
from typing import (
Any,
Callable,
Final,
Generator,
Hashable,
Literal,
TypeAlias,
overload,
)
import numpy as np
from pandas._libs.interval import Interval
from pandas._libs.tslibs import Period
from pandas._typing import (
ArrayLike,
DtypeObj,
TypeGuard,
npt,
)
# placeholder until we can specify np.ndarray[object, ndim=2]
ndarray_obj_2d = np.ndarray
from enum import Enum
class _NoDefault(Enum):
no_default = ...
no_default: Final = _NoDefault.no_default
NoDefault: TypeAlias = Literal[_NoDefault.no_default]
i8max: int
u8max: int
def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ...
def item_from_zerodim(val: object) -> object: ...
def infer_dtype(value: object, skipna: bool = ...) -> str: ...
def is_iterator(obj: object) -> bool: ...
def is_scalar(val: object) -> bool: ...
def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
def is_pyarrow_array(obj: object) -> bool: ...
def is_period(val: object) -> TypeGuard[Period]: ...
def is_interval(obj: object) -> TypeGuard[Interval]: ...
def is_decimal(obj: object) -> TypeGuard[Decimal]: ...
def is_complex(obj: object) -> TypeGuard[complex]: ...
def is_bool(obj: object) -> TypeGuard[bool | np.bool_]: ...
def is_integer(obj: object) -> TypeGuard[int | np.integer]: ...
def is_int_or_none(obj) -> bool: ...
def is_float(obj: object) -> TypeGuard[float]: ...
def is_interval_array(values: np.ndarray) -> bool: ...
def is_datetime64_array(values: np.ndarray, skipna: bool = True) -> bool: ...
def is_timedelta_or_timedelta64_array(
values: np.ndarray, skipna: bool = True
) -> bool: ...
def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ...
def is_time_array(values: np.ndarray, skipna: bool = ...): ...
def is_date_array(values: np.ndarray, skipna: bool = ...): ...
def is_datetime_array(values: np.ndarray, skipna: bool = ...): ...
def is_string_array(values: np.ndarray, skipna: bool = ...): ...
def is_float_array(values: np.ndarray): ...
def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
def fast_multiget(
mapping: dict,
keys: np.ndarray, # object[:]
default=...,
) -> np.ndarray: ...
def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
def map_infer(
arr: np.ndarray,
f: Callable[[Any], Any],
convert: bool = ...,
ignore_na: bool = ...,
) -> np.ndarray: ...
@overload
def maybe_convert_objects(
objects: npt.NDArray[np.object_],
*,
try_float: bool = ...,
safe: bool = ...,
convert_numeric: bool = ...,
convert_non_numeric: Literal[False] = ...,
convert_to_nullable_dtype: Literal[False] = ...,
dtype_if_all_nat: DtypeObj | None = ...,
) -> npt.NDArray[np.object_ | np.number]: ...
@overload
def maybe_convert_objects(
objects: npt.NDArray[np.object_],
*,
try_float: bool = ...,
safe: bool = ...,
convert_numeric: bool = ...,
convert_non_numeric: bool = ...,
convert_to_nullable_dtype: Literal[True] = ...,
dtype_if_all_nat: DtypeObj | None = ...,
) -> ArrayLike: ...
@overload
def maybe_convert_objects(
objects: npt.NDArray[np.object_],
*,
try_float: bool = ...,
safe: bool = ...,
convert_numeric: bool = ...,
convert_non_numeric: bool = ...,
convert_to_nullable_dtype: bool = ...,
dtype_if_all_nat: DtypeObj | None = ...,
) -> ArrayLike: ...
@overload
def maybe_convert_numeric(
values: npt.NDArray[np.object_],
na_values: set,
convert_empty: bool = ...,
coerce_numeric: bool = ...,
convert_to_masked_nullable: Literal[False] = ...,
) -> tuple[np.ndarray, None]: ...
@overload
def maybe_convert_numeric(
values: npt.NDArray[np.object_],
na_values: set,
convert_empty: bool = ...,
coerce_numeric: bool = ...,
*,
convert_to_masked_nullable: Literal[True],
) -> tuple[np.ndarray, np.ndarray]: ...
# TODO: restrict `arr`?
def ensure_string_array(
arr,
na_value: object = ...,
convert_na_value: bool = ...,
copy: bool = ...,
skipna: bool = ...,
) -> npt.NDArray[np.object_]: ...
def convert_nans_to_NA(
arr: npt.NDArray[np.object_],
) -> npt.NDArray[np.object_]: ...
def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ...
# TODO: can we be more specific about rows?
def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ...
def tuples_to_object_array(
tuples: npt.NDArray[np.object_],
) -> ndarray_obj_2d: ...
# TODO: can we be more specific about rows?
def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ...
def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ...
def maybe_booleans_to_slice(
mask: npt.NDArray[np.uint8],
) -> slice | npt.NDArray[np.uint8]: ...
def maybe_indices_to_slice(
indices: npt.NDArray[np.intp],
max_len: int,
) -> slice | npt.NDArray[np.intp]: ...
def is_all_arraylike(obj: list) -> bool: ...
# -----------------------------------------------------------------
# Functions which in reality take memoryviews
def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64
def map_infer_mask(
arr: np.ndarray,
f: Callable[[Any], Any],
mask: np.ndarray, # const uint8_t[:]
convert: bool = ...,
na_value: Any = ...,
dtype: np.dtype = ...,
) -> np.ndarray: ...
def indices_fast(
index: npt.NDArray[np.intp],
labels: np.ndarray, # const int64_t[:]
keys: list,
sorted_labels: list[npt.NDArray[np.int64]],
) -> dict[Hashable, npt.NDArray[np.intp]]: ...
def generate_slices(
labels: np.ndarray, ngroups: int # const intp_t[:]
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
def count_level_2d(
mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
labels: np.ndarray, # const intp_t[:]
max_bin: int,
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2]
def get_level_sorter(
codes: np.ndarray, # const int64_t[:]
starts: np.ndarray, # const intp_t[:]
) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1]
def generate_bins_dt64(
values: npt.NDArray[np.int64],
binner: np.ndarray, # const int64_t[:]
closed: object = ...,
hasnans: bool = ...,
) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
def array_equivalent_object(
left: npt.NDArray[np.object_],
right: npt.NDArray[np.object_],
) -> bool: ...
def has_infs(arr: np.ndarray) -> bool: ... # const floating[:]
def has_only_ints_or_nan(arr: np.ndarray) -> bool: ... # const floating[:]
def get_reverse_indexer(
indexer: np.ndarray, # const intp_t[:]
length: int,
) -> npt.NDArray[np.intp]: ...
def is_bool_list(obj: list) -> bool: ...
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
def is_range_indexer(
left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
) -> bool: ...

View File

@ -0,0 +1,16 @@
import numpy as np
from numpy import typing as npt
class NAType:
def __new__(cls, *args, **kwargs): ...
NA: NAType
def is_matching_na(
left: object, right: object, nan_matches_none: bool = ...
) -> bool: ...
def isposinf_scalar(val: object) -> bool: ...
def isneginf_scalar(val: object) -> bool: ...
def checknull(val: object, inf_as_na: bool = ...) -> bool: ...
def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ...
def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ...

View File

@ -0,0 +1,51 @@
from typing import (
Any,
Callable,
Iterable,
Literal,
TypeAlias,
overload,
)
import numpy as np
from pandas._typing import npt
_BinOp: TypeAlias = Callable[[Any, Any], Any]
_BoolOp: TypeAlias = Callable[[Any, Any], bool]
def scalar_compare(
values: np.ndarray, # object[:]
val: object,
op: _BoolOp, # {operator.eq, operator.ne, ...}
) -> npt.NDArray[np.bool_]: ...
def vec_compare(
left: npt.NDArray[np.object_],
right: npt.NDArray[np.object_],
op: _BoolOp, # {operator.eq, operator.ne, ...}
) -> npt.NDArray[np.bool_]: ...
def scalar_binop(
values: np.ndarray, # object[:]
val: object,
op: _BinOp, # binary operator
) -> np.ndarray: ...
def vec_binop(
left: np.ndarray, # object[:]
right: np.ndarray, # object[:]
op: _BinOp, # binary operator
) -> np.ndarray: ...
@overload
def maybe_convert_bool(
arr: npt.NDArray[np.object_],
true_values: Iterable | None = None,
false_values: Iterable | None = None,
convert_to_masked_nullable: Literal[False] = ...,
) -> tuple[np.ndarray, None]: ...
@overload
def maybe_convert_bool(
arr: npt.NDArray[np.object_],
true_values: Iterable = ...,
false_values: Iterable = ...,
*,
convert_to_masked_nullable: Literal[True],
) -> tuple[np.ndarray, np.ndarray]: ...

View File

@ -0,0 +1,5 @@
import numpy as np
def maybe_dispatch_ufunc_to_dunder_op(
self, ufunc: np.ufunc, method: str, *inputs, **kwargs
): ...

View File

@ -0,0 +1,77 @@
from typing import (
Hashable,
Literal,
)
import numpy as np
from pandas._typing import (
ArrayLike,
Dtype,
npt,
)
STR_NA_VALUES: set[str]
DEFAULT_BUFFER_HEURISTIC: int
def sanitize_objects(
values: npt.NDArray[np.object_],
na_values: set,
) -> int: ...
class TextReader:
unnamed_cols: set[str]
table_width: int # int64_t
leading_cols: int # int64_t
header: list[list[int]] # non-negative integers
def __init__(
self,
source,
delimiter: bytes | str = ..., # single-character only
header=...,
header_start: int = ..., # int64_t
header_end: int = ..., # uint64_t
index_col=...,
names=...,
tokenize_chunksize: int = ..., # int64_t
delim_whitespace: bool = ...,
converters=...,
skipinitialspace: bool = ...,
escapechar: bytes | str | None = ..., # single-character only
doublequote: bool = ...,
quotechar: str | bytes | None = ..., # at most 1 character
quoting: int = ...,
lineterminator: bytes | str | None = ..., # at most 1 character
comment=...,
decimal: bytes | str = ..., # single-character only
thousands: bytes | str | None = ..., # single-character only
dtype: Dtype | dict[Hashable, Dtype] = ...,
usecols=...,
error_bad_lines: bool = ...,
warn_bad_lines: bool = ...,
na_filter: bool = ...,
na_values=...,
na_fvalues=...,
keep_default_na: bool = ...,
true_values=...,
false_values=...,
allow_leading_cols: bool = ...,
skiprows=...,
skipfooter: int = ..., # int64_t
verbose: bool = ...,
float_precision: Literal["round_trip", "legacy", "high"] | None = ...,
skip_blank_lines: bool = ...,
encoding_errors: bytes | str = ...,
) -> None: ...
def set_noconvert(self, i: int) -> None: ...
def remove_noconvert(self, i: int) -> None: ...
def close(self) -> None: ...
def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ...
def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ...
# _maybe_upcast, na_values are only exposed for testing
na_values: dict
def _maybe_upcast(
arr, use_dtype_backend: bool = ..., dtype_backend: str = ...
) -> np.ndarray: ...

View File

@ -0,0 +1,27 @@
from typing import (
Sequence,
overload,
)
from pandas._typing import (
AnyArrayLike,
DataFrame,
Index,
Series,
)
# note: this is a lie to make type checkers happy (they special
# case property). cache_readonly uses attribute names similar to
# property (fget) but it does not provide fset and fdel.
cache_readonly = property
class AxisProperty:
axis: int
def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
@overload
def __get__(self, obj: DataFrame | Series, type) -> Index: ...
@overload
def __get__(self, obj: None, type) -> AxisProperty: ...
def __set__(
self, obj: DataFrame | Series, value: AnyArrayLike | Sequence
) -> None: ...

View File

@ -0,0 +1,16 @@
import numpy as np
from pandas._typing import npt
def unstack(
values: np.ndarray, # reshape_t[:, :]
mask: np.ndarray, # const uint8_t[:]
stride: int,
length: int,
width: int,
new_values: np.ndarray, # reshape_t[:, :]
new_mask: np.ndarray, # uint8_t[:, :]
) -> None: ...
def explode(
values: npt.NDArray[np.object_],
) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]: ...

View File

@ -0,0 +1,7 @@
from pandas.io.sas.sas7bdat import SAS7BDATReader
class Parser:
def __init__(self, parser: SAS7BDATReader) -> None: ...
def read(self, nrows: int) -> None: ...
def get_subheader_index(signature: bytes) -> int: ...

View File

@ -0,0 +1,51 @@
from typing import Sequence
import numpy as np
from pandas._typing import (
Self,
npt,
)
class SparseIndex:
length: int
npoints: int
def __init__(self) -> None: ...
@property
def ngaps(self) -> int: ...
@property
def nbytes(self) -> int: ...
@property
def indices(self) -> npt.NDArray[np.int32]: ...
def equals(self, other) -> bool: ...
def lookup(self, index: int) -> np.int32: ...
def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ...
def to_int_index(self) -> IntIndex: ...
def to_block_index(self) -> BlockIndex: ...
def intersect(self, y_: SparseIndex) -> Self: ...
def make_union(self, y_: SparseIndex) -> Self: ...
class IntIndex(SparseIndex):
indices: npt.NDArray[np.int32]
def __init__(
self, length: int, indices: Sequence[int], check_integrity: bool = ...
) -> None: ...
class BlockIndex(SparseIndex):
nblocks: int
blocs: np.ndarray
blengths: np.ndarray
def __init__(
self, length: int, blocs: np.ndarray, blengths: np.ndarray
) -> None: ...
# Override to have correct parameters
def intersect(self, other: SparseIndex) -> Self: ...
def make_union(self, y: SparseIndex) -> Self: ...
def make_mask_object_ndarray(
arr: npt.NDArray[np.object_], fill_value
) -> npt.NDArray[np.bool_]: ...
def get_blocks(
indices: npt.NDArray[np.int32],
) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ...

View File

@ -0,0 +1,12 @@
def assert_dict_equal(a, b, compare_keys: bool = ...): ...
def assert_almost_equal(
a,
b,
rtol: float = ...,
atol: float = ...,
check_dtype: bool = ...,
obj=...,
lobj=...,
robj=...,
index_values=...,
): ...

View File

@ -0,0 +1,37 @@
from datetime import tzinfo
import numpy as np
from pandas._typing import npt
def format_array_from_datetime(
values: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
format: str | None = ...,
na_rep: str | float = ...,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def array_with_unit_to_datetime(
values: npt.NDArray[np.object_],
unit: str,
errors: str = ...,
) -> tuple[np.ndarray, tzinfo | None]: ...
def first_non_null(values: np.ndarray) -> int: ...
def array_to_datetime(
values: npt.NDArray[np.object_],
errors: str = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: bool = ...,
creso: int = ...,
) -> tuple[np.ndarray, tzinfo | None]: ...
# returned ndarray may be object dtype or datetime64[ns]
def array_to_datetime_with_tz(
values: npt.NDArray[np.object_],
tz: tzinfo,
dayfirst: bool,
yearfirst: bool,
creso: int,
) -> npt.NDArray[np.int64]: ...

View File

@ -0,0 +1,87 @@
__all__ = [
"dtypes",
"localize_pydatetime",
"NaT",
"NaTType",
"iNaT",
"nat_strings",
"OutOfBoundsDatetime",
"OutOfBoundsTimedelta",
"IncompatibleFrequency",
"Period",
"Resolution",
"Timedelta",
"normalize_i8_timestamps",
"is_date_array_normalized",
"dt64arr_to_periodarr",
"delta_to_nanoseconds",
"ints_to_pydatetime",
"ints_to_pytimedelta",
"get_resolution",
"Timestamp",
"tz_convert_from_utc_single",
"tz_convert_from_utc",
"to_offset",
"Tick",
"BaseOffset",
"tz_compare",
"is_unitless",
"astype_overflowsafe",
"get_unit_from_dtype",
"periods_per_day",
"periods_per_second",
"guess_datetime_format",
"add_overflowsafe",
"get_supported_dtype",
"is_supported_dtype",
]
from pandas._libs.tslibs import dtypes # pylint: disable=import-self
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.dtypes import (
Resolution,
periods_per_day,
periods_per_second,
)
from pandas._libs.tslibs.nattype import (
NaT,
NaTType,
iNaT,
nat_strings,
)
from pandas._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
add_overflowsafe,
astype_overflowsafe,
get_supported_dtype,
is_supported_dtype,
is_unitless,
py_get_unit_from_dtype as get_unit_from_dtype,
)
from pandas._libs.tslibs.offsets import (
BaseOffset,
Tick,
to_offset,
)
from pandas._libs.tslibs.parsing import guess_datetime_format
from pandas._libs.tslibs.period import (
IncompatibleFrequency,
Period,
)
from pandas._libs.tslibs.timedeltas import (
Timedelta,
delta_to_nanoseconds,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._libs.tslibs.timezones import tz_compare
from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single
from pandas._libs.tslibs.vectorized import (
dt64arr_to_periodarr,
get_resolution,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
tz_convert_from_utc,
)

View File

@ -0,0 +1,12 @@
DAYS: list[str]
MONTH_ALIASES: dict[int, str]
MONTH_NUMBERS: dict[str, int]
MONTHS: list[str]
int_to_weekday: dict[int, str]
def get_firstbday(year: int, month: int) -> int: ...
def get_lastbday(year: int, month: int) -> int: ...
def get_day_of_year(year: int, month: int, day: int) -> int: ...
def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ...
def get_week_of_year(year: int, month: int, day: int) -> int: ...
def get_days_in_month(year: int, month: int) -> int: ...

View File

@ -0,0 +1,14 @@
from datetime import (
datetime,
tzinfo,
)
import numpy as np
DT64NS_DTYPE: np.dtype
TD64NS_DTYPE: np.dtype
def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
def cast_from_unit_vectorized(
values: np.ndarray, unit: str, out_unit: str = ...
) -> np.ndarray: ...

View File

@ -0,0 +1,83 @@
from enum import Enum
OFFSET_TO_PERIOD_FREQSTR: dict[str, str]
def periods_per_day(reso: int = ...) -> int: ...
def periods_per_second(reso: int) -> int: ...
def abbrev_to_npy_unit(abbrev: str | None) -> int: ...
def freq_to_period_freqstr(freq_n: int, freq_name: str) -> str: ...
class PeriodDtypeBase:
_dtype_code: int # PeriodDtypeCode
_n: int
# actually __cinit__
def __new__(cls, code: int, n: int): ...
@property
def _freq_group_code(self) -> int: ...
@property
def _resolution_obj(self) -> Resolution: ...
def _get_to_timestamp_base(self) -> int: ...
@property
def _freqstr(self) -> str: ...
def __hash__(self) -> int: ...
def _is_tick_like(self) -> bool: ...
@property
def _creso(self) -> int: ...
@property
def _td64_unit(self) -> str: ...
class FreqGroup(Enum):
FR_ANN: int
FR_QTR: int
FR_MTH: int
FR_WK: int
FR_BUS: int
FR_DAY: int
FR_HR: int
FR_MIN: int
FR_SEC: int
FR_MS: int
FR_US: int
FR_NS: int
FR_UND: int
@staticmethod
def from_period_dtype_code(code: int) -> FreqGroup: ...
class Resolution(Enum):
RESO_NS: int
RESO_US: int
RESO_MS: int
RESO_SEC: int
RESO_MIN: int
RESO_HR: int
RESO_DAY: int
RESO_MTH: int
RESO_QTR: int
RESO_YR: int
def __lt__(self, other: Resolution) -> bool: ...
def __ge__(self, other: Resolution) -> bool: ...
@property
def attrname(self) -> str: ...
@classmethod
def from_attrname(cls, attrname: str) -> Resolution: ...
@classmethod
def get_reso_from_freqstr(cls, freq: str) -> Resolution: ...
@property
def attr_abbrev(self) -> str: ...
class NpyDatetimeUnit(Enum):
NPY_FR_Y: int
NPY_FR_M: int
NPY_FR_W: int
NPY_FR_D: int
NPY_FR_h: int
NPY_FR_m: int
NPY_FR_s: int
NPY_FR_ms: int
NPY_FR_us: int
NPY_FR_ns: int
NPY_FR_ps: int
NPY_FR_fs: int
NPY_FR_as: int
NPY_FR_GENERIC: int

View File

@ -0,0 +1,62 @@
import numpy as np
from pandas._typing import npt
def build_field_sarray(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
reso: int, # NPY_DATETIMEUNIT
) -> np.ndarray: ...
def month_position_check(fields, weekdays) -> str | None: ...
def get_date_name_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
locale: str | None = ...,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def get_start_end_field(
dtindex: npt.NDArray[np.int64],
field: str,
freqstr: str | None = ...,
month_kw: int = ...,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.bool_]: ...
def get_date_field(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int32]: ...
def get_timedelta_field(
tdindex: npt.NDArray[np.int64], # const int64_t[:]
field: str,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int32]: ...
def get_timedelta_days(
tdindex: npt.NDArray[np.int64], # const int64_t[:]
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...
def isleapyear_arr(
years: np.ndarray,
) -> npt.NDArray[np.bool_]: ...
def build_isocalendar_sarray(
dtindex: npt.NDArray[np.int64], # const int64_t[:]
reso: int, # NPY_DATETIMEUNIT
) -> np.ndarray: ...
def _get_locale_names(name_type: str, locale: str | None = ...): ...
class RoundTo:
@property
def MINUS_INFTY(self) -> int: ...
@property
def PLUS_INFTY(self) -> int: ...
@property
def NEAREST_HALF_EVEN(self) -> int: ...
@property
def NEAREST_HALF_PLUS_INFTY(self) -> int: ...
@property
def NEAREST_HALF_MINUS_INFTY(self) -> int: ...
def round_nsint64(
values: npt.NDArray[np.int64],
mode: RoundTo,
nanos: int,
) -> npt.NDArray[np.int64]: ...

View File

@ -0,0 +1,141 @@
from datetime import (
datetime,
timedelta,
tzinfo as _tzinfo,
)
import typing
import numpy as np
from pandas._libs.tslibs.period import Period
from pandas._typing import Self
NaT: NaTType
iNaT: int
nat_strings: set[str]
_NaTComparisonTypes: typing.TypeAlias = (
datetime | timedelta | Period | np.datetime64 | np.timedelta64
)
class _NatComparison:
def __call__(self, other: _NaTComparisonTypes) -> bool: ...
class NaTType:
_value: np.int64
@property
def value(self) -> int: ...
@property
def asm8(self) -> np.datetime64: ...
def to_datetime64(self) -> np.datetime64: ...
def to_numpy(
self, dtype: np.dtype | str | None = ..., copy: bool = ...
) -> np.datetime64 | np.timedelta64: ...
@property
def is_leap_year(self) -> bool: ...
@property
def is_month_start(self) -> bool: ...
@property
def is_quarter_start(self) -> bool: ...
@property
def is_year_start(self) -> bool: ...
@property
def is_month_end(self) -> bool: ...
@property
def is_quarter_end(self) -> bool: ...
@property
def is_year_end(self) -> bool: ...
@property
def day_of_year(self) -> float: ...
@property
def dayofyear(self) -> float: ...
@property
def days_in_month(self) -> float: ...
@property
def daysinmonth(self) -> float: ...
@property
def day_of_week(self) -> float: ...
@property
def dayofweek(self) -> float: ...
@property
def week(self) -> float: ...
@property
def weekofyear(self) -> float: ...
def day_name(self) -> float: ...
def month_name(self) -> float: ...
def weekday(self) -> float: ...
def isoweekday(self) -> float: ...
def total_seconds(self) -> float: ...
def today(self, *args, **kwargs) -> NaTType: ...
def now(self, *args, **kwargs) -> NaTType: ...
def to_pydatetime(self) -> NaTType: ...
def date(self) -> NaTType: ...
def round(self) -> NaTType: ...
def floor(self) -> NaTType: ...
def ceil(self) -> NaTType: ...
@property
def tzinfo(self) -> None: ...
@property
def tz(self) -> None: ...
def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ...
def tz_localize(
self,
tz: _tzinfo | str | None,
ambiguous: str = ...,
nonexistent: str = ...,
) -> NaTType: ...
def replace(
self,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
nanosecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
fold: int | None = ...,
) -> NaTType: ...
@property
def year(self) -> float: ...
@property
def quarter(self) -> float: ...
@property
def month(self) -> float: ...
@property
def day(self) -> float: ...
@property
def hour(self) -> float: ...
@property
def minute(self) -> float: ...
@property
def second(self) -> float: ...
@property
def millisecond(self) -> float: ...
@property
def microsecond(self) -> float: ...
@property
def nanosecond(self) -> float: ...
# inject Timedelta properties
@property
def days(self) -> float: ...
@property
def microseconds(self) -> float: ...
@property
def nanoseconds(self) -> float: ...
# inject Period properties
@property
def qyear(self) -> float: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
__lt__: _NatComparison
__le__: _NatComparison
__gt__: _NatComparison
__ge__: _NatComparison
def __sub__(self, other: Self | timedelta | datetime) -> Self: ...
def __rsub__(self, other: Self | timedelta | datetime) -> Self: ...
def __add__(self, other: Self | timedelta | datetime) -> Self: ...
def __radd__(self, other: Self | timedelta | datetime) -> Self: ...
def __hash__(self) -> int: ...
def as_unit(self, unit: str, round_ok: bool = ...) -> NaTType: ...

View File

@ -0,0 +1,27 @@
import numpy as np
from pandas._typing import npt
class OutOfBoundsDatetime(ValueError): ...
class OutOfBoundsTimedelta(ValueError): ...
# only exposed for testing
def py_get_unit_from_dtype(dtype: np.dtype): ...
def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ...
def astype_overflowsafe(
values: np.ndarray,
dtype: np.dtype,
copy: bool = ...,
round_ok: bool = ...,
is_coerce: bool = ...,
) -> np.ndarray: ...
def is_unitless(dtype: np.dtype) -> bool: ...
def compare_mismatched_resolutions(
left: np.ndarray, right: np.ndarray, op
) -> npt.NDArray[np.bool_]: ...
def add_overflowsafe(
left: npt.NDArray[np.int64],
right: npt.NDArray[np.int64],
) -> npt.NDArray[np.int64]: ...
def get_supported_dtype(dtype: np.dtype) -> np.dtype: ...
def is_supported_dtype(dtype: np.dtype) -> bool: ...

View File

@ -0,0 +1,287 @@
from datetime import (
datetime,
time,
timedelta,
)
from typing import (
Any,
Collection,
Literal,
TypeVar,
overload,
)
import numpy as np
from pandas._libs.tslibs.nattype import NaTType
from pandas._typing import (
OffsetCalendar,
Self,
npt,
)
from .timedeltas import Timedelta
_BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset)
_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta)
_relativedelta_kwds: set[str]
prefix_mapping: dict[str, type]
class ApplyTypeError(TypeError): ...
class BaseOffset:
n: int
normalize: bool
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
def __eq__(self, other) -> bool: ...
def __ne__(self, other) -> bool: ...
def __hash__(self) -> int: ...
@property
def kwds(self) -> dict: ...
@property
def base(self) -> BaseOffset: ...
@overload
def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __add__(self, other: BaseOffset) -> Self: ...
@overload
def __add__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __radd__(self, other: BaseOffset) -> Self: ...
@overload
def __radd__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __radd__(self, other: NaTType) -> NaTType: ...
def __sub__(self, other: BaseOffset) -> Self: ...
@overload
def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
@overload
def __rsub__(self, other: BaseOffset): ...
@overload
def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ...
@overload
def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ...
@overload
def __mul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __mul__(self, other: int): ...
@overload
def __rmul__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __rmul__(self, other: int) -> Self: ...
def __neg__(self) -> Self: ...
def copy(self) -> Self: ...
@property
def name(self) -> str: ...
@property
def rule_code(self) -> str: ...
@property
def freqstr(self) -> str: ...
def _apply(self, other): ...
def _apply_array(self, dtarr: np.ndarray) -> np.ndarray: ...
def rollback(self, dt: datetime) -> datetime: ...
def rollforward(self, dt: datetime) -> datetime: ...
def is_on_offset(self, dt: datetime) -> bool: ...
def __setstate__(self, state) -> None: ...
def __getstate__(self): ...
@property
def nanos(self) -> int: ...
def is_anchored(self) -> bool: ...
def _get_offset(name: str) -> BaseOffset: ...
class SingleConstructorOffset(BaseOffset):
@classmethod
def _from_name(cls, suffix: None = ...): ...
def __reduce__(self): ...
@overload
def to_offset(freq: None, is_period: bool = ...) -> None: ...
@overload
def to_offset(freq: _BaseOffsetT, is_period: bool = ...) -> _BaseOffsetT: ...
@overload
def to_offset(freq: timedelta | str, is_period: bool = ...) -> BaseOffset: ...
class Tick(SingleConstructorOffset):
_creso: int
_prefix: str
def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
@property
def delta(self) -> Timedelta: ...
@property
def nanos(self) -> int: ...
def delta_to_tick(delta: timedelta) -> Tick: ...
class Day(Tick): ...
class Hour(Tick): ...
class Minute(Tick): ...
class Second(Tick): ...
class Milli(Tick): ...
class Micro(Tick): ...
class Nano(Tick): ...
class RelativeDeltaOffset(BaseOffset):
def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ...
class BusinessMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
) -> None: ...
class BusinessDay(BusinessMixin): ...
class BusinessHour(BusinessMixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
) -> None: ...
class WeekOfMonthMixin(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., weekday: int = ...
) -> None: ...
class YearOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., month: int | None = ...
) -> None: ...
class BYearEnd(YearOffset): ...
class BYearBegin(YearOffset): ...
class YearEnd(YearOffset): ...
class YearBegin(YearOffset): ...
class QuarterOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ...
) -> None: ...
class BQuarterEnd(QuarterOffset): ...
class BQuarterBegin(QuarterOffset): ...
class QuarterEnd(QuarterOffset): ...
class QuarterBegin(QuarterOffset): ...
class MonthOffset(SingleConstructorOffset): ...
class MonthEnd(MonthOffset): ...
class MonthBegin(MonthOffset): ...
class BusinessMonthEnd(MonthOffset): ...
class BusinessMonthBegin(MonthOffset): ...
class SemiMonthOffset(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ...
) -> None: ...
class SemiMonthEnd(SemiMonthOffset): ...
class SemiMonthBegin(SemiMonthOffset): ...
class Week(SingleConstructorOffset):
def __init__(
self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
) -> None: ...
class WeekOfMonth(WeekOfMonthMixin):
def __init__(
self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
) -> None: ...
class LastWeekOfMonth(WeekOfMonthMixin): ...
class FY5253Mixin(SingleConstructorOffset):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
variation: Literal["nearest", "last"] = ...,
) -> None: ...
class FY5253(FY5253Mixin): ...
class FY5253Quarter(FY5253Mixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekday: int = ...,
startingMonth: int = ...,
qtr_with_extra_week: int = ...,
variation: Literal["nearest", "last"] = ...,
) -> None: ...
class Easter(SingleConstructorOffset): ...
class _CustomBusinessMonth(BusinessMixin):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessDay(BusinessDay):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessHour(BusinessHour):
def __init__(
self,
n: int = ...,
normalize: bool = ...,
weekmask: str = ...,
holidays: list | None = ...,
calendar: OffsetCalendar | None = ...,
start: str | time | Collection[str | time] = ...,
end: str | time | Collection[str | time] = ...,
offset: timedelta = ...,
) -> None: ...
class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
class OffsetMeta(type): ...
class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ...
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def roll_qtrday(
other: datetime, n: int, month: int, day_opt: str, modby: int
) -> int: ...
INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"]
def shift_months(
dtindex: npt.NDArray[np.int64],
months: int,
day_opt: str | None = ...,
reso: int = ...,
) -> npt.NDArray[np.int64]: ...
_offset_map: dict[str, BaseOffset]

View File

@ -0,0 +1,33 @@
from datetime import datetime
import numpy as np
from pandas._typing import npt
class DateParseError(ValueError): ...
def py_parse_datetime_string(
date_string: str,
dayfirst: bool = ...,
yearfirst: bool = ...,
) -> datetime: ...
def parse_datetime_string_with_reso(
date_string: str,
freq: str | None = ...,
dayfirst: bool | None = ...,
yearfirst: bool | None = ...,
) -> tuple[datetime, str]: ...
def _does_string_look_like_datetime(py_string: str) -> bool: ...
def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ...
def try_parse_dates(
values: npt.NDArray[np.object_], # object[:]
parser,
) -> npt.NDArray[np.object_]: ...
def guess_datetime_format(
dt_str: str,
dayfirst: bool | None = ...,
) -> str | None: ...
def concat_date_cols(
date_cols: tuple,
) -> npt.NDArray[np.object_]: ...
def get_rule_month(source: str) -> str: ...

View File

@ -0,0 +1,135 @@
from datetime import timedelta
from typing import Literal
import numpy as np
from pandas._libs.tslibs.dtypes import PeriodDtypeBase
from pandas._libs.tslibs.nattype import NaTType
from pandas._libs.tslibs.offsets import BaseOffset
from pandas._libs.tslibs.timestamps import Timestamp
from pandas._typing import (
Frequency,
npt,
)
INVALID_FREQ_ERR_MSG: str
DIFFERENT_FREQ: str
class IncompatibleFrequency(ValueError): ...
def periodarr_to_dt64arr(
periodarr: npt.NDArray[np.int64], # const int64_t[:]
freq: int,
) -> npt.NDArray[np.int64]: ...
def period_asfreq_arr(
arr: npt.NDArray[np.int64],
freq1: int,
freq2: int,
end: bool,
) -> npt.NDArray[np.int64]: ...
def get_period_field_arr(
field: str,
arr: npt.NDArray[np.int64], # const int64_t[:]
freq: int,
) -> npt.NDArray[np.int64]: ...
def from_ordinals(
values: npt.NDArray[np.int64], # const int64_t[:]
freq: timedelta | BaseOffset | str,
) -> npt.NDArray[np.int64]: ...
def extract_ordinals(
values: npt.NDArray[np.object_],
freq: Frequency | int,
) -> npt.NDArray[np.int64]: ...
def extract_freq(
values: npt.NDArray[np.object_],
) -> BaseOffset: ...
def period_array_strftime(
values: npt.NDArray[np.int64],
dtype_code: int,
na_rep,
date_format: str | None,
) -> npt.NDArray[np.object_]: ...
# exposed for tests
def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ...
def period_ordinal(
y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int
) -> int: ...
def freq_to_dtype_code(freq: BaseOffset) -> int: ...
def validate_end_alias(how: str) -> Literal["E", "S"]: ...
class PeriodMixin:
@property
def end_time(self) -> Timestamp: ...
@property
def start_time(self) -> Timestamp: ...
def _require_matching_freq(self, other: BaseOffset, base: bool = ...) -> None: ...
class Period(PeriodMixin):
ordinal: int # int64_t
freq: BaseOffset
_dtype: PeriodDtypeBase
# error: "__new__" must return a class instance (got "Union[Period, NaTType]")
def __new__( # type: ignore[misc]
cls,
value=...,
freq: int | str | BaseOffset | None = ...,
ordinal: int | None = ...,
year: int | None = ...,
month: int | None = ...,
quarter: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
) -> Period | NaTType: ...
@classmethod
def _maybe_convert_freq(cls, freq) -> BaseOffset: ...
@classmethod
def _from_ordinal(cls, ordinal: int, freq: BaseOffset) -> Period: ...
@classmethod
def now(cls, freq: Frequency) -> Period: ...
def strftime(self, fmt: str | None) -> str: ...
def to_timestamp(
self,
freq: str | BaseOffset | None = ...,
how: str = ...,
) -> Timestamp: ...
def asfreq(self, freq: str | BaseOffset, how: str = ...) -> Period: ...
@property
def freqstr(self) -> str: ...
@property
def is_leap_year(self) -> bool: ...
@property
def daysinmonth(self) -> int: ...
@property
def days_in_month(self) -> int: ...
@property
def qyear(self) -> int: ...
@property
def quarter(self) -> int: ...
@property
def day_of_year(self) -> int: ...
@property
def weekday(self) -> int: ...
@property
def day_of_week(self) -> int: ...
@property
def week(self) -> int: ...
@property
def weekofyear(self) -> int: ...
@property
def second(self) -> int: ...
@property
def minute(self) -> int: ...
@property
def hour(self) -> int: ...
@property
def day(self) -> int: ...
@property
def month(self) -> int: ...
@property
def year(self) -> int: ...
def __sub__(self, other) -> Period | BaseOffset: ...
def __add__(self, other) -> Period: ...

View File

@ -0,0 +1,14 @@
import numpy as np
from pandas._typing import npt
def array_strptime(
values: npt.NDArray[np.object_],
fmt: str | None,
exact: bool = ...,
errors: str = ...,
utc: bool = ...,
creso: int = ..., # NPY_DATETIMEUNIT
) -> tuple[np.ndarray, np.ndarray]: ...
# first ndarray is M8[ns], second is object ndarray of tzinfo | None

View File

@ -0,0 +1,174 @@
from datetime import timedelta
from typing import (
ClassVar,
Literal,
TypeAlias,
TypeVar,
overload,
)
import numpy as np
from pandas._libs.tslibs import (
NaTType,
Tick,
)
from pandas._typing import (
Frequency,
Self,
npt,
)
# This should be kept consistent with the keys in the dict timedelta_abbrevs
# in pandas/_libs/tslibs/timedeltas.pyx
UnitChoices: TypeAlias = Literal[
"Y",
"y",
"M",
"W",
"w",
"D",
"d",
"days",
"day",
"hours",
"hour",
"hr",
"h",
"m",
"minute",
"min",
"minutes",
"T",
"t",
"s",
"seconds",
"sec",
"second",
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"L",
"l",
"us",
"microseconds",
"microsecond",
"µs",
"micro",
"micros",
"u",
"ns",
"nanoseconds",
"nano",
"nanos",
"nanosecond",
"n",
]
_S = TypeVar("_S", bound=timedelta)
def get_unit_for_round(freq, creso: int) -> int: ...
def disallow_ambiguous_unit(unit: str | None) -> None: ...
def ints_to_pytimedelta(
m8values: npt.NDArray[np.timedelta64],
box: bool = ...,
) -> npt.NDArray[np.object_]: ...
def array_to_timedelta64(
values: npt.NDArray[np.object_],
unit: str | None = ...,
errors: str = ...,
) -> np.ndarray: ... # np.ndarray[m8ns]
def parse_timedelta_unit(unit: str | None) -> UnitChoices: ...
def delta_to_nanoseconds(
delta: np.timedelta64 | timedelta | Tick,
reso: int = ..., # NPY_DATETIMEUNIT
round_ok: bool = ...,
) -> int: ...
def floordiv_object_array(
left: np.ndarray, right: npt.NDArray[np.object_]
) -> np.ndarray: ...
def truediv_object_array(
left: np.ndarray, right: npt.NDArray[np.object_]
) -> np.ndarray: ...
class Timedelta(timedelta):
_creso: int
min: ClassVar[Timedelta]
max: ClassVar[Timedelta]
resolution: ClassVar[Timedelta]
value: int # np.int64
_value: int # np.int64
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
def __new__( # type: ignore[misc]
cls: type[_S],
value=...,
unit: str | None = ...,
**kwargs: float | np.integer | np.floating,
) -> _S | NaTType: ...
@classmethod
def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
@property
def days(self) -> int: ...
@property
def seconds(self) -> int: ...
@property
def microseconds(self) -> int: ...
def total_seconds(self) -> float: ...
def to_pytimedelta(self) -> timedelta: ...
def to_timedelta64(self) -> np.timedelta64: ...
@property
def asm8(self) -> np.timedelta64: ...
# TODO: round/floor/ceil could return NaT?
def round(self, freq: Frequency) -> Self: ...
def floor(self, freq: Frequency) -> Self: ...
def ceil(self, freq: Frequency) -> Self: ...
@property
def resolution_string(self) -> str: ...
def __add__(self, other: timedelta) -> Timedelta: ...
def __radd__(self, other: timedelta) -> Timedelta: ...
def __sub__(self, other: timedelta) -> Timedelta: ...
def __rsub__(self, other: timedelta) -> Timedelta: ...
def __neg__(self) -> Timedelta: ...
def __pos__(self) -> Timedelta: ...
def __abs__(self) -> Timedelta: ...
def __mul__(self, other: float) -> Timedelta: ...
def __rmul__(self, other: float) -> Timedelta: ...
# error: Signature of "__floordiv__" incompatible with supertype "timedelta"
@overload # type: ignore[override]
def __floordiv__(self, other: timedelta) -> int: ...
@overload
def __floordiv__(self, other: float) -> Timedelta: ...
@overload
def __floordiv__(
self, other: npt.NDArray[np.timedelta64]
) -> npt.NDArray[np.intp]: ...
@overload
def __floordiv__(
self, other: npt.NDArray[np.number]
) -> npt.NDArray[np.timedelta64] | Timedelta: ...
@overload
def __rfloordiv__(self, other: timedelta | str) -> int: ...
@overload
def __rfloordiv__(self, other: None | NaTType) -> NaTType: ...
@overload
def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ...
@overload
def __truediv__(self, other: timedelta) -> float: ...
@overload
def __truediv__(self, other: float) -> Timedelta: ...
def __mod__(self, other: timedelta) -> Timedelta: ...
def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ...
def __le__(self, other: timedelta) -> bool: ...
def __lt__(self, other: timedelta) -> bool: ...
def __ge__(self, other: timedelta) -> bool: ...
def __gt__(self, other: timedelta) -> bool: ...
def __hash__(self) -> int: ...
def isoformat(self) -> str: ...
def to_numpy(
self, dtype: npt.DTypeLike = ..., copy: bool = False
) -> np.timedelta64: ...
def view(self, dtype: npt.DTypeLike) -> object: ...
@property
def unit(self) -> str: ...
def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ...

View File

@ -0,0 +1,241 @@
from datetime import (
date as _date,
datetime,
time as _time,
timedelta,
tzinfo as _tzinfo,
)
from time import struct_time
from typing import (
ClassVar,
Literal,
TypeAlias,
overload,
)
import numpy as np
from pandas._libs.tslibs import (
BaseOffset,
NaTType,
Period,
Tick,
Timedelta,
)
from pandas._typing import (
Self,
TimestampNonexistent,
)
_TimeZones: TypeAlias = str | _tzinfo | None | int
def integer_op_not_supported(obj: object) -> TypeError: ...
class Timestamp(datetime):
_creso: int
min: ClassVar[Timestamp]
max: ClassVar[Timestamp]
resolution: ClassVar[Timedelta]
_value: int # np.int64
# error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
def __new__( # type: ignore[misc]
cls: type[Self],
ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
tzinfo: _tzinfo | None = ...,
*,
nanosecond: int | None = ...,
tz: _TimeZones = ...,
unit: str | int | None = ...,
fold: int | None = ...,
) -> Self | NaTType: ...
@classmethod
def _from_value_and_reso(
cls, value: int, reso: int, tz: _TimeZones
) -> Timestamp: ...
@property
def value(self) -> int: ... # np.int64
@property
def year(self) -> int: ...
@property
def month(self) -> int: ...
@property
def day(self) -> int: ...
@property
def hour(self) -> int: ...
@property
def minute(self) -> int: ...
@property
def second(self) -> int: ...
@property
def microsecond(self) -> int: ...
@property
def nanosecond(self) -> int: ...
@property
def tzinfo(self) -> _tzinfo | None: ...
@property
def tz(self) -> _tzinfo | None: ...
@property
def fold(self) -> int: ...
@classmethod
def fromtimestamp(cls, ts: float, tz: _TimeZones = ...) -> Self: ...
@classmethod
def utcfromtimestamp(cls, ts: float) -> Self: ...
@classmethod
def today(cls, tz: _TimeZones = ...) -> Self: ...
@classmethod
def fromordinal(
cls,
ordinal: int,
tz: _TimeZones = ...,
) -> Self: ...
@classmethod
def now(cls, tz: _TimeZones = ...) -> Self: ...
@classmethod
def utcnow(cls) -> Self: ...
# error: Signature of "combine" incompatible with supertype "datetime"
@classmethod
def combine( # type: ignore[override]
cls, date: _date, time: _time
) -> datetime: ...
@classmethod
def fromisoformat(cls, date_string: str) -> Self: ...
def strftime(self, format: str) -> str: ...
def __format__(self, fmt: str) -> str: ...
def toordinal(self) -> int: ...
def timetuple(self) -> struct_time: ...
def timestamp(self) -> float: ...
def utctimetuple(self) -> struct_time: ...
def date(self) -> _date: ...
def time(self) -> _time: ...
def timetz(self) -> _time: ...
# LSP violation: nanosecond is not present in datetime.datetime.replace
# and has positional args following it
def replace( # type: ignore[override]
self,
year: int | None = ...,
month: int | None = ...,
day: int | None = ...,
hour: int | None = ...,
minute: int | None = ...,
second: int | None = ...,
microsecond: int | None = ...,
nanosecond: int | None = ...,
tzinfo: _tzinfo | type[object] | None = ...,
fold: int | None = ...,
) -> Self: ...
# LSP violation: datetime.datetime.astimezone has a default value for tz
def astimezone(self, tz: _TimeZones) -> Self: ... # type: ignore[override]
def ctime(self) -> str: ...
def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
@classmethod
def strptime(
# Note: strptime is actually disabled and raises NotImplementedError
cls,
date_string: str,
format: str,
) -> Self: ...
def utcoffset(self) -> timedelta | None: ...
def tzname(self) -> str | None: ...
def dst(self) -> timedelta | None: ...
def __le__(self, other: datetime) -> bool: ... # type: ignore[override]
def __lt__(self, other: datetime) -> bool: ... # type: ignore[override]
def __ge__(self, other: datetime) -> bool: ... # type: ignore[override]
def __gt__(self, other: datetime) -> bool: ... # type: ignore[override]
# error: Signature of "__add__" incompatible with supertype "date"/"datetime"
@overload # type: ignore[override]
def __add__(self, other: np.ndarray) -> np.ndarray: ...
@overload
def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
def __radd__(self, other: timedelta) -> Self: ...
@overload # type: ignore[override]
def __sub__(self, other: datetime) -> Timedelta: ...
@overload
def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ...
def __hash__(self) -> int: ...
def weekday(self) -> int: ...
def isoweekday(self) -> int: ...
# Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return
# type "_IsoCalendarDate" in supertype "date"
def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override]
@property
def is_leap_year(self) -> bool: ...
@property
def is_month_start(self) -> bool: ...
@property
def is_quarter_start(self) -> bool: ...
@property
def is_year_start(self) -> bool: ...
@property
def is_month_end(self) -> bool: ...
@property
def is_quarter_end(self) -> bool: ...
@property
def is_year_end(self) -> bool: ...
def to_pydatetime(self, warn: bool = ...) -> datetime: ...
def to_datetime64(self) -> np.datetime64: ...
def to_period(self, freq: BaseOffset | str | None = None) -> Period: ...
def to_julian_date(self) -> np.float64: ...
@property
def asm8(self) -> np.datetime64: ...
def tz_convert(self, tz: _TimeZones) -> Self: ...
# TODO: could return NaT?
def tz_localize(
self,
tz: _TimeZones,
ambiguous: bool | Literal["raise", "NaT"] = ...,
nonexistent: TimestampNonexistent = ...,
) -> Self: ...
def normalize(self) -> Self: ...
# TODO: round/floor/ceil could return NaT?
def round(
self,
freq: str,
ambiguous: bool | Literal["raise", "NaT"] = ...,
nonexistent: TimestampNonexistent = ...,
) -> Self: ...
def floor(
self,
freq: str,
ambiguous: bool | Literal["raise", "NaT"] = ...,
nonexistent: TimestampNonexistent = ...,
) -> Self: ...
def ceil(
self,
freq: str,
ambiguous: bool | Literal["raise", "NaT"] = ...,
nonexistent: TimestampNonexistent = ...,
) -> Self: ...
def day_name(self, locale: str | None = ...) -> str: ...
def month_name(self, locale: str | None = ...) -> str: ...
@property
def day_of_week(self) -> int: ...
@property
def dayofweek(self) -> int: ...
@property
def day_of_year(self) -> int: ...
@property
def dayofyear(self) -> int: ...
@property
def quarter(self) -> int: ...
@property
def week(self) -> int: ...
def to_numpy(
self, dtype: np.dtype | None = ..., copy: bool = ...
) -> np.datetime64: ...
@property
def _date_repr(self) -> str: ...
@property
def days_in_month(self) -> int: ...
@property
def daysinmonth(self) -> int: ...
@property
def unit(self) -> str: ...
def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ...

View File

@ -0,0 +1,21 @@
from datetime import (
datetime,
tzinfo,
)
from typing import Callable
import numpy as np
# imported from dateutil.tz
dateutil_gettz: Callable[[str], tzinfo]
def tz_standardize(tz: tzinfo) -> tzinfo: ...
def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ...
def infer_tzinfo(
start: datetime | None,
end: datetime | None,
) -> tzinfo | None: ...
def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ...
def get_timezone(tz: tzinfo) -> tzinfo | str: ...
def is_utc(tz: tzinfo | None) -> bool: ...
def is_fixed_offset(tz: tzinfo) -> bool: ...

View File

@ -0,0 +1,21 @@
from datetime import (
timedelta,
tzinfo,
)
from typing import Iterable
import numpy as np
from pandas._typing import npt
# tz_convert_from_utc_single exposed for testing
def tz_convert_from_utc_single(
utc_val: np.int64, tz: tzinfo, creso: int = ...
) -> np.int64: ...
def tz_localize_to_utc(
vals: npt.NDArray[np.int64],
tz: tzinfo | None,
ambiguous: str | bool | Iterable[bool] | None = ...,
nonexistent: str | timedelta | np.timedelta64 | None = ...,
creso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...

View File

@ -0,0 +1,43 @@
"""
For cython types that cannot be represented precisely, closest-available
python equivalents are used, and the precise types kept as adjacent comments.
"""
from datetime import tzinfo
import numpy as np
from pandas._libs.tslibs.dtypes import Resolution
from pandas._typing import npt
def dt64arr_to_periodarr(
stamps: npt.NDArray[np.int64],
freq: int,
tz: tzinfo | None,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...
def is_date_array_normalized(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
reso: int, # NPY_DATETIMEUNIT
) -> bool: ...
def normalize_i8_timestamps(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
reso: int, # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...
def get_resolution(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
reso: int = ..., # NPY_DATETIMEUNIT
) -> Resolution: ...
def ints_to_pydatetime(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None = ...,
box: str = ...,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.object_]: ...
def tz_convert_from_utc(
stamps: npt.NDArray[np.int64],
tz: tzinfo | None,
reso: int = ..., # NPY_DATETIMEUNIT
) -> npt.NDArray[np.int64]: ...

View File

@ -0,0 +1,127 @@
from typing import (
Any,
Callable,
Literal,
)
import numpy as np
from pandas._typing import (
WindowingRankType,
npt,
)
def roll_sum(
values: np.ndarray, # const float64_t[:]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_mean(
values: np.ndarray, # const float64_t[:]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_var(
values: np.ndarray, # const float64_t[:]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
ddof: int = ...,
) -> np.ndarray: ... # np.ndarray[float]
def roll_skew(
values: np.ndarray, # np.ndarray[np.float64]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_kurt(
values: np.ndarray, # np.ndarray[np.float64]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_median_c(
values: np.ndarray, # np.ndarray[np.float64]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_max(
values: np.ndarray, # np.ndarray[np.float64]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_min(
values: np.ndarray, # np.ndarray[np.float64]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
) -> np.ndarray: ... # np.ndarray[float]
def roll_quantile(
values: np.ndarray, # const float64_t[:]
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
quantile: float, # float64_t
interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
) -> np.ndarray: ... # np.ndarray[float]
def roll_rank(
values: np.ndarray,
start: np.ndarray,
end: np.ndarray,
minp: int,
percentile: bool,
method: WindowingRankType,
ascending: bool,
) -> np.ndarray: ... # np.ndarray[float]
def roll_apply(
obj: object,
start: np.ndarray, # np.ndarray[np.int64]
end: np.ndarray, # np.ndarray[np.int64]
minp: int, # int64_t
function: Callable[..., Any],
raw: bool,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> npt.NDArray[np.float64]: ...
def roll_weighted_sum(
values: np.ndarray, # const float64_t[:]
weights: np.ndarray, # const float64_t[:]
minp: int,
) -> np.ndarray: ... # np.ndarray[np.float64]
def roll_weighted_mean(
values: np.ndarray, # const float64_t[:]
weights: np.ndarray, # const float64_t[:]
minp: int,
) -> np.ndarray: ... # np.ndarray[np.float64]
def roll_weighted_var(
values: np.ndarray, # const float64_t[:]
weights: np.ndarray, # const float64_t[:]
minp: int, # int64_t
ddof: int, # unsigned int
) -> np.ndarray: ... # np.ndarray[np.float64]
def ewm(
vals: np.ndarray, # const float64_t[:]
start: np.ndarray, # const int64_t[:]
end: np.ndarray, # const int64_t[:]
minp: int,
com: float, # float64_t
adjust: bool,
ignore_na: bool,
deltas: np.ndarray | None = None, # const float64_t[:]
normalize: bool = True,
) -> np.ndarray: ... # np.ndarray[np.float64]
def ewmcov(
input_x: np.ndarray, # const float64_t[:]
start: np.ndarray, # const int64_t[:]
end: np.ndarray, # const int64_t[:]
minp: int,
input_y: np.ndarray, # const float64_t[:]
com: float, # float64_t
adjust: bool,
ignore_na: bool,
bias: bool,
) -> np.ndarray: ... # np.ndarray[np.float64]

View File

@ -0,0 +1,12 @@
import numpy as np
from pandas._typing import npt
def calculate_variable_window_bounds(
num_values: int, # int64_t
window_size: int, # int64_t
min_periods,
center: bool,
closed: str | None,
index: np.ndarray, # const int64_t[:]
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...

View File

@ -0,0 +1,20 @@
import numpy as np
from pandas._typing import ArrayLike
def write_csv_rows(
data: list[ArrayLike],
data_index: np.ndarray,
nlevels: int,
cols: np.ndarray,
writer: object, # _csv.writer
) -> None: ...
def convert_json_to_lines(arr: str) -> str: ...
def max_len_string_array(
arr: np.ndarray, # pandas_string[:]
) -> int: ...
def word_len(val: object) -> int: ...
def string_array_replace_from_nan_rep(
arr: np.ndarray, # np.ndarray[object, ndim=1]
nan_rep: object,
) -> None: ...