This commit is contained in:
2024-12-04 13:35:57 +05:00
parent d346bf4b2a
commit 73ce681a55
7059 changed files with 1196501 additions and 0 deletions

View File

@ -0,0 +1,92 @@
"""
``numpy.lib`` is mostly a space for implementing functions that don't
belong in core or in another NumPy submodule with a clear purpose
(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
``numpy.lib``'s private submodules contain basic functions that are used by
other public modules and are useful to have in the main name-space.
"""
# Public submodules
# Note: recfunctions and (maybe) format are public too, but not imported
from . import array_utils
from . import introspect
from . import mixins
from . import npyio
from . import scimath
from . import stride_tricks
# Private submodules
# load module names. See https://github.com/networkx/networkx/issues/5838
from . import _type_check_impl
from . import _index_tricks_impl
from . import _nanfunctions_impl
from . import _function_base_impl
from . import _stride_tricks_impl
from . import _shape_base_impl
from . import _twodim_base_impl
from . import _ufunclike_impl
from . import _histograms_impl
from . import _utils_impl
from . import _arraysetops_impl
from . import _polynomial_impl
from . import _npyio_impl
from . import _arrayterator_impl
from . import _arraypad_impl
from . import _version
# numpy.lib namespace members
from ._arrayterator_impl import Arrayterator
from ._version import NumpyVersion
from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain
from numpy._core.function_base import add_newdoc
__all__ = [
"Arrayterator", "add_docstring", "add_newdoc", "array_utils",
"introspect", "mixins", "NumpyVersion", "npyio", "scimath",
"stride_tricks", "tracemalloc_domain"
]
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def __getattr__(attr):
# Warn for reprecated attributes
import math
import warnings
if attr == "math":
warnings.warn(
"`np.lib.math` is a deprecated alias for the standard library "
"`math` module (Deprecated Numpy 1.25). Replace usages of "
"`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
return math
elif attr == "emath":
raise AttributeError(
"numpy.lib.emath was an alias for emath module that was removed "
"in NumPy 2.0. Replace usages of numpy.lib.emath with "
"numpy.emath.",
name=None
)
elif attr in (
"histograms", "type_check", "nanfunctions", "function_base",
"arraypad", "arraysetops", "ufunclike", "utils", "twodim_base",
"shape_base", "polynomial", "index_tricks",
):
raise AttributeError(
f"numpy.lib.{attr} is now private. If you are using a public "
"function, it should be available in the main numpy namespace, "
"otherwise check the NumPy 2.0 migration guide.",
name=None
)
elif attr == "arrayterator":
raise AttributeError(
"numpy.lib.arrayterator submodule is now private. To access "
"Arrayterator class use numpy.lib.Arrayterator.",
name=None
)
else:
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))

View File

@ -0,0 +1,41 @@
import math as math
from numpy._pytesttester import PytestTester
from numpy import (
ndenumerate as ndenumerate,
ndindex as ndindex,
)
from numpy.version import version
from numpy.lib import (
format as format,
mixins as mixins,
scimath as scimath,
stride_tricks as stride_tricks,
npyio as npyio,
array_utils as array_utils,
)
from numpy.lib._version import (
NumpyVersion as NumpyVersion,
)
from numpy.lib._arrayterator_impl import (
Arrayterator as Arrayterator,
)
from numpy._core.multiarray import (
add_docstring as add_docstring,
tracemalloc_domain as tracemalloc_domain,
)
from numpy._core.function_base import (
add_newdoc as add_newdoc,
)
__all__: list[str]
test: PytestTester
__version__ = version

View File

@ -0,0 +1,62 @@
"""
Miscellaneous utils.
"""
from numpy._core import asarray
from numpy._core.numeric import normalize_axis_tuple, normalize_axis_index
from numpy._utils import set_module
__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]
@set_module("numpy.lib.array_utils")
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> import numpy as np
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2); I.dtype
dtype('float64')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high

View File

@ -0,0 +1,25 @@
from typing import Any, Iterable, Tuple
from numpy import generic
from numpy.typing import NDArray
__all__: list[str]
# NOTE: In practice `byte_bounds` can (potentially) take any object
# implementing the `__array_interface__` protocol. The caveat is
# that certain keys, marked as optional in the spec, must be present for
# `byte_bounds`. This concerns `"strides"` and `"data"`.
def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ...
def normalize_axis_tuple(
axis: int | Iterable[int],
ndim: int = ...,
argname: None | str = ...,
allow_duplicate: None | bool = ...,
) -> Tuple[int, int]: ...
def normalize_axis_index(
axis: int = ...,
ndim: int = ...,
msg_prefix: None | str = ...,
) -> int: ...

View File

@ -0,0 +1,895 @@
"""
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
import numpy as np
from numpy._core.overrides import array_function_dispatch
from numpy.lib._index_tricks_impl import ndindex
__all__ = ['pad']
###############################################################################
# Private utility functions.
def _round_if_needed(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
Parameters
----------
arr : ndarray
Input array.
dtype : dtype
The dtype of the destination array.
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
def _slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> np._slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), (...,))
"""
return (slice(None),) * axis + (sl,) + (...,)
def _view_roi(array, original_area_slice, axis):
"""
Get a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Parameters
----------
array : ndarray
The array with the region of interest.
original_area_slice : tuple of slices
Denotes the area with original values of the unpadded array.
axis : int
The currently padded dimension assuming that `axis` is padded before
`axis` + 1.
Returns
-------
roi : ndarray
The region of interest of the original `array`.
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl]
def _pad_simple(array, pad_width, fill_value=None):
"""
Pad array on all sides with either a single value or undefined values.
Parameters
----------
array : ndarray
Array to grow.
pad_width : sequence of tuple[int, int]
Pad width on both sides for each dimension in `arr`.
fill_value : scalar, optional
If provided the padded area is filled with this value, otherwise
the pad area left undefined.
Returns
-------
padded : ndarray
The padded array with the same dtype as`array`. Its order will default
to C-style if `array` is not F-contiguous.
original_area_slice : tuple
A tuple of slices pointing to the area of the original array.
"""
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
padded = np.empty(new_shape, dtype=array.dtype, order=order)
if fill_value is not None:
padded.fill(fill_value)
# Copy old array into correct space
original_area_slice = tuple(
slice(left, left + size)
for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded, original_area_slice
def _set_pad_area(padded, axis, width_pair, value_pair):
"""
Set empty-padded area in given dimension.
Parameters
----------
padded : ndarray
Array with the pad area which is modified inplace.
axis : int
Dimension with the pad area to set.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
value_pair : tuple of scalars or ndarrays
Values inserted into the pad area on each side. It must match or be
broadcastable to the shape of `arr`.
"""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis)
padded[right_slice] = value_pair[1]
def _get_edges(padded, axis, width_pair):
"""
Retrieve edge values from empty-padded array in given dimension.
Parameters
----------
padded : ndarray
Empty-padded array.
axis : int
Dimension in which the edges are considered.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
Returns
-------
left_edge, right_edge : ndarray
Edge values of the valid area in `padded` in the given dimension. Its
shape will always match `padded` except for the dimension given by
`axis` which will have a length of 1.
"""
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""
Construct linear ramps for empty-padded array in given dimension.
Parameters
----------
padded : ndarray
Empty-padded array.
axis : int
Dimension in which the ramps are constructed.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
end_value_pair : (scalar, scalar)
End values for the linear ramps which form the edge of the fully padded
array. These values are included in the linear ramps.
Returns
-------
left_ramp, right_ramp : ndarray
Linear ramps to set on both sides of `padded`.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp, right_ramp = (
np.linspace(
start=end_value,
stop=edge.squeeze(axis), # Dimension is replaced by linspace
num=width,
endpoint=False,
dtype=padded.dtype,
axis=axis
)
for end_value, edge, width in zip(
end_value_pair, edge_pair, width_pair
)
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""
Calculate statistic for the empty-padded array in given dimension.
Parameters
----------
padded : ndarray
Empty-padded array.
axis : int
Dimension in which the statistic is calculated.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
length_pair : 2-element sequence of None or int
Gives the number of values in valid area from each side that is
taken into account when calculating the statistic. If None the entire
valid area in `padded` is considered.
stat_func : function
Function to compute statistic. The expected signature is
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
Returns
-------
left_stat, right_stat : ndarray
Calculated statistic for both sides of `padded`.
"""
# Calculate indices of the edges of the area with original values
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
# as well as its length
max_length = right_index - left_index
# Limit stat_lengths to max_length
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
if (left_length == 0 or right_length == 0) \
and stat_func in {np.amax, np.amin}:
# amax and amin can't operate on an empty array,
# raise a more descriptive warning here instead of the default one
raise ValueError("stat_length of 0 yields no value for padding")
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis)
left_chunk = padded[left_slice]
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
_round_if_needed(left_stat, padded.dtype)
if left_length == right_length == max_length:
# return early as right_stat must be identical to left_stat
return left_stat, left_stat
# Calculate statistic for the right side
right_slice = _slice_at_axis(
slice(right_index - right_length, right_index), axis)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method,
original_period, include_edge=False):
"""
Pad `axis` of `arr` with reflection.
Parameters
----------
padded : ndarray
Input array of arbitrary shape.
axis : int
Axis along which to pad `arr`.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
method : str
Controls method of reflection; options are 'even' or 'odd'.
original_period : int
Original length of data on `axis` of `arr`.
include_edge : bool
If true, edge value is included in reflection, otherwise the edge
value forms the symmetric axis to the reflection.
Returns
-------
pad_amt : tuple of ints, length 2
New index positions of padding to do along the `axis`. If these are
both 0, padding is done in this dimension.
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Avoid wrapping with only a subset of the original area
# by ensuring period can only be a multiple of the original
# area's length.
old_length = old_length // original_period * original_period
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
# Avoid wrapping with only a subset of the original area
# by ensuring period can only be a multiple of the original
# area's length.
old_length = ((old_length - 1) // (original_period - 1)
* (original_period - 1) + 1)
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, left_pad)
# Slice right to left, stop on or next to edge, start relative to stop
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == "odd":
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
# Insert chunk into padded area
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
# Adjust pointer to left edge for next iteration
left_pad -= chunk_length
if right_pad > 0:
# Pad with reflected values on right side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, right_pad)
# Slice right to left, start on or next to edge, stop relative to start
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == "odd":
# Negate chunk and align with edge
edge_slice = _slice_at_axis(
slice(-right_pad - 1, -right_pad), axis)
right_chunk = 2 * padded[edge_slice] - right_chunk
# Insert chunk into padded area
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
# Adjust pointer to right edge for next iteration
right_pad -= chunk_length
return left_pad, right_pad
def _set_wrap_both(padded, axis, width_pair, original_period):
"""
Pad `axis` of `arr` with wrapped values.
Parameters
----------
padded : ndarray
Input array of arbitrary shape.
axis : int
Axis along which to pad `arr`.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
original_period : int
Original length of data on `axis` of `arr`.
Returns
-------
pad_amt : tuple of ints, length 2
New index positions of padding to do along the `axis`. If these are
both 0, padding is done in this dimension.
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
# Avoid wrapping with only a subset of the original area by ensuring period
# can only be a multiple of the original area's length.
period = period // original_period * original_period
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
# Each time the pad area shrinks on both sides which is communicated with
# these variables.
new_left_pad = 0
new_right_pad = 0
if left_pad > 0:
# Pad with wrapped values on left side
# First slice chunk from left side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
# pad area.
slice_end = left_pad + period
slice_start = slice_end - min(period, left_pad)
right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
right_chunk = padded[right_slice]
if left_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
new_left_pad = left_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if right_pad > 0:
# Pad with wrapped values on right side
# First slice chunk from right side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
# pad area.
slice_start = -right_pad - period
slice_end = slice_start + min(period, right_pad)
left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
left_chunk = padded[left_slice]
if right_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(
slice(-right_pad, -right_pad + period), axis)
new_right_pad = right_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
padded[pad_area] = left_chunk
return new_left_pad, new_right_pad
def _as_pairs(x, ndim, as_index=False):
"""
Broadcast `x` to an array with the shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Parameters
----------
x : {None, scalar, array-like}
The object to broadcast to the shape (`ndim`, 2).
ndim : int
Number of pairs the broadcasted `x` will have.
as_index : bool, optional
If `x` is not None, try to round each element of `x` to an integer
(dtype `np.intp`) and ensure every element is positive.
Returns
-------
pairs : nested iterables, shape (`ndim`, 2)
The broadcasted version of `x`.
Raises
------
ValueError
If `as_index` is True and `x` contains negative elements.
Or if `x` is not broadcastable to the shape (`ndim`, 2).
"""
if x is None:
# Pass through None as a special case, otherwise np.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
x = np.array(x)
if as_index:
x = np.round(x).astype(np.intp, copy=False)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `np.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
return np.broadcast_to(x, (ndim, 2)).tolist()
def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
return (array,)
###############################################################################
# Public functions
@array_function_dispatch(_pad_dispatcher, module='numpy')
def pad(array, pad_width, mode='constant', **kwargs):
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
for each axis.
``(before, after)`` or ``((before, after),)`` yields same before
and after pad for each axis.
``(pad,)`` or ``int`` is a shortcut for before = after = pad width
for all axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
'empty'
Pads with undefined values.
.. versionadded:: 1.17
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
``((before_1, after_1), ... (before_N, after_N))`` unique statistic
lengths for each axis.
``(before, after)`` or ``((before, after),)`` yields same before
and after statistic lengths for each axis.
``(stat_length,)`` or ``int`` is a shortcut for
``before = after = statistic`` length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
for each axis.
``(before, after)`` or ``((before, after),)`` yields same before
and after constants for each axis.
``(constant,)`` or ``constant`` is a shortcut for
``before = after = constant`` for all axes.
Default is 0.
end_values : sequence or scalar, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
``((before_1, after_1), ... (before_N, after_N))`` unique end values
for each axis.
``(before, after)`` or ``((before, after),)`` yields same before
and after end values for each axis.
``(constant,)`` or ``constant`` is a shortcut for
``before = after = constant`` for all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Notes
-----
.. versionadded:: 1.7.0
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should modify a rank 1 array in-place. It
has the following signature::
padding_func(vector, iaxis_pad_width, iaxis, kwargs)
where
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
the end of vector.
iaxis : int
The axis currently being calculated.
kwargs : dict
Any keyword arguments the function requires.
Examples
--------
>>> import numpy as np
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
array([4, 4, 1, ..., 6, 6, 6])
>>> np.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> np.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> np.pad(a, (2,), 'median')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> np.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> np.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def pad_with(vector, pad_width, iaxis, kwargs):
... pad_value = kwargs.get('padder', 10)
... vector[:pad_width[0]] = pad_value
... vector[-pad_width[1]:] = pad_value
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
>>> np.pad(a, 2, pad_with, padder=100)
array([[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 0, 1, 2, 100, 100],
[100, 100, 3, 4, 5, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100]])
"""
array = np.asarray(array)
pad_width = np.asarray(pad_width)
if not pad_width.dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
# Broadcast to shape (array.ndim, 2)
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
if callable(mode):
# Old behavior: Use user-supplied function with np.apply_along_axis
function = mode
# Create a new zero padded array
padded, _ = _pad_simple(array, pad_width, fill_value=0)
# And apply along each axis
for axis in range(padded.ndim):
# Iterate using ndindex as in apply_along_axis, but assuming that
# function operates inplace on the padded array.
# view with the iteration axis at the end
view = np.moveaxis(padded, axis, -1)
# compute indices for the iteration axes, and append a trailing
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
inds = ndindex(view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
function(view[ind], pad_width[axis], axis, kwargs)
return padded
# Make sure that no unsupported keywords were passed for the current mode
allowed_kwargs = {
'empty': [], 'edge': [], 'wrap': [],
'constant': ['constant_values'],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
}
try:
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
except KeyError:
raise ValueError("mode '{}' is not supported".format(mode)) from None
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
stat_functions = {"maximum": np.amax, "minimum": np.amin,
"mean": np.mean, "median": np.median}
# Create array with final shape and original values
# (padded area is undefined)
padded, original_area_slice = _pad_simple(array, pad_width)
# And prepare iteration over all dimensions
# (zipping may be more readable than using enumerate)
axes = range(padded.ndim)
if mode == "constant":
values = kwargs.get("constant_values", 0)
values = _as_pairs(values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, values):
roi = _view_roi(padded, original_area_slice, axis)
_set_pad_area(roi, axis, width_pair, value_pair)
elif mode == "empty":
pass # Do nothing as _pad_simple already returned the correct result
elif array.size == 0:
# Only modes "constant" and "empty" can extend empty axes, all other
# modes depend on `array` not being empty
# -> ensure every empty axis is only "padded with 0"
for axis, width_pair in zip(axes, pad_width):
if array.shape[axis] == 0 and any(width_pair):
raise ValueError(
"can't extend empty axis {} using modes other than "
"'constant' or 'empty'".format(axis)
)
# passed, don't need to do anything more as _pad_simple already
# returned the correct result
elif mode == "edge":
for axis, width_pair in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
edge_pair = _get_edges(roi, axis, width_pair)
_set_pad_area(roi, axis, width_pair, edge_pair)
elif mode == "linear_ramp":
end_values = kwargs.get("end_values", 0)
end_values = _as_pairs(end_values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
roi = _view_roi(padded, original_area_slice, axis)
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
_set_pad_area(roi, axis, width_pair, ramp_pair)
elif mode in stat_functions:
func = stat_functions[mode]
length = kwargs.get("stat_length", None)
length = _as_pairs(length, padded.ndim, as_index=True)
for axis, width_pair, length_pair in zip(axes, pad_width, length):
roi = _view_roi(padded, original_area_slice, axis)
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
_set_pad_area(roi, axis, width_pair, stat_pair)
elif mode in {"reflect", "symmetric"}:
method = kwargs.get("reflect_type", "even")
include_edge = mode == "symmetric"
for axis, (left_index, right_index) in zip(axes, pad_width):
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
edge_pair = _get_edges(padded, axis, (left_index, right_index))
_set_pad_area(
padded, axis, (left_index, right_index), edge_pair)
continue
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with reflected
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_reflect_both(
roi, axis, (left_index, right_index),
method, array.shape[axis], include_edge
)
elif mode == "wrap":
for axis, (left_index, right_index) in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
original_period = padded.shape[axis] - right_index - left_index
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with wrapped
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_wrap_both(
roi, axis, (left_index, right_index), original_period)
return padded

View File

@ -0,0 +1,85 @@
from typing import (
Literal as L,
Any,
overload,
TypeVar,
Protocol,
)
from numpy import generic
from numpy._typing import (
ArrayLike,
NDArray,
_ArrayLikeInt,
_ArrayLike,
)
_SCT = TypeVar("_SCT", bound=generic)
class _ModeFunc(Protocol):
def __call__(
self,
vector: NDArray[Any],
iaxis_pad_width: tuple[int, int],
iaxis: int,
kwargs: dict[str, Any],
/,
) -> None: ...
_ModeKind = L[
"constant",
"edge",
"linear_ramp",
"maximum",
"mean",
"median",
"minimum",
"reflect",
"symmetric",
"wrap",
"empty",
]
__all__: list[str]
# TODO: In practice each keyword argument is exclusive to one or more
# specific modes. Consider adding more overloads to express this in the future.
# Expand `**kwargs` into explicit keyword-only arguments
@overload
def pad(
array: _ArrayLike[_SCT],
pad_width: _ArrayLikeInt,
mode: _ModeKind = ...,
*,
stat_length: None | _ArrayLikeInt = ...,
constant_values: ArrayLike = ...,
end_values: ArrayLike = ...,
reflect_type: L["odd", "even"] = ...,
) -> NDArray[_SCT]: ...
@overload
def pad(
array: ArrayLike,
pad_width: _ArrayLikeInt,
mode: _ModeKind = ...,
*,
stat_length: None | _ArrayLikeInt = ...,
constant_values: ArrayLike = ...,
end_values: ArrayLike = ...,
reflect_type: L["odd", "even"] = ...,
) -> NDArray[Any]: ...
@overload
def pad(
array: _ArrayLike[_SCT],
pad_width: _ArrayLikeInt,
mode: _ModeFunc,
**kwargs: Any,
) -> NDArray[_SCT]: ...
@overload
def pad(
array: ArrayLike,
pad_width: _ArrayLikeInt,
mode: _ModeFunc,
**kwargs: Any,
) -> NDArray[Any]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,399 @@
from typing import (
Any,
Generic,
Literal as L,
NamedTuple,
overload,
SupportsIndex,
TypeVar,
)
import numpy as np
from numpy import (
generic,
number,
ushort,
ubyte,
uintc,
uint,
ulonglong,
short,
int8,
byte,
intc,
int_,
intp,
longlong,
half,
single,
double,
longdouble,
csingle,
cdouble,
clongdouble,
timedelta64,
datetime64,
object_,
str_,
bytes_,
void,
)
from numpy._typing import (
ArrayLike,
NDArray,
_ArrayLike,
_ArrayLikeBool_co,
_ArrayLikeDT64_co,
_ArrayLikeTD64_co,
_ArrayLikeObject_co,
_ArrayLikeNumber_co,
)
_SCT = TypeVar("_SCT", bound=generic)
_NumberType = TypeVar("_NumberType", bound=number[Any])
# Explicitly set all allowed values to prevent accidental castings to
# abstract dtypes (their common super-type).
#
# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
# which could result in, for example, `int64` and `float64`producing a
# `number[_64Bit]` array
_SCTNoCast = TypeVar(
"_SCTNoCast",
np.bool,
ushort,
ubyte,
uintc,
uint,
ulonglong,
short,
byte,
intc,
int_,
longlong,
half,
single,
double,
longdouble,
csingle,
cdouble,
clongdouble,
timedelta64,
datetime64,
object_,
str_,
bytes_,
void,
)
class UniqueAllResult(NamedTuple, Generic[_SCT]):
values: NDArray[_SCT]
indices: NDArray[intp]
inverse_indices: NDArray[intp]
counts: NDArray[intp]
class UniqueCountsResult(NamedTuple, Generic[_SCT]):
values: NDArray[_SCT]
counts: NDArray[intp]
class UniqueInverseResult(NamedTuple, Generic[_SCT]):
values: NDArray[_SCT]
inverse_indices: NDArray[intp]
__all__: list[str]
@overload
def ediff1d(
ary: _ArrayLikeBool_co,
to_end: None | ArrayLike = ...,
to_begin: None | ArrayLike = ...,
) -> NDArray[int8]: ...
@overload
def ediff1d(
ary: _ArrayLike[_NumberType],
to_end: None | ArrayLike = ...,
to_begin: None | ArrayLike = ...,
) -> NDArray[_NumberType]: ...
@overload
def ediff1d(
ary: _ArrayLikeNumber_co,
to_end: None | ArrayLike = ...,
to_begin: None | ArrayLike = ...,
) -> NDArray[Any]: ...
@overload
def ediff1d(
ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
to_end: None | ArrayLike = ...,
to_begin: None | ArrayLike = ...,
) -> NDArray[timedelta64]: ...
@overload
def ediff1d(
ary: _ArrayLikeObject_co,
to_end: None | ArrayLike = ...,
to_begin: None | ArrayLike = ...,
) -> NDArray[object_]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[False] = ...,
return_inverse: L[False] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> NDArray[_SCT]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[False] = ...,
return_inverse: L[False] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> NDArray[Any]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[True] = ...,
return_inverse: L[False] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[True] = ...,
return_inverse: L[False] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[False] = ...,
return_inverse: L[True] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[False] = ...,
return_inverse: L[True] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[False] = ...,
return_inverse: L[False] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[False] = ...,
return_inverse: L[False] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[True] = ...,
return_inverse: L[True] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[True] = ...,
return_inverse: L[True] = ...,
return_counts: L[False] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[True] = ...,
return_inverse: L[False] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[True] = ...,
return_inverse: L[False] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[False] = ...,
return_inverse: L[True] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[False] = ...,
return_inverse: L[True] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: _ArrayLike[_SCT],
return_index: L[True] = ...,
return_inverse: L[True] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
@overload
def unique(
ar: ArrayLike,
return_index: L[True] = ...,
return_inverse: L[True] = ...,
return_counts: L[True] = ...,
axis: None | SupportsIndex = ...,
*,
equal_nan: bool = ...,
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
@overload
def unique_all(
x: _ArrayLike[_SCT], /
) -> UniqueAllResult[_SCT]: ...
@overload
def unique_all(
x: ArrayLike, /
) -> UniqueAllResult[Any]: ...
@overload
def unique_counts(
x: _ArrayLike[_SCT], /
) -> UniqueCountsResult[_SCT]: ...
@overload
def unique_counts(
x: ArrayLike, /
) -> UniqueCountsResult[Any]: ...
@overload
def unique_inverse(x: _ArrayLike[_SCT], /) -> UniqueInverseResult[_SCT]: ...
@overload
def unique_inverse(x: ArrayLike, /) -> UniqueInverseResult[Any]: ...
@overload
def unique_values(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
def unique_values(x: ArrayLike, /) -> NDArray[Any]: ...
@overload
def intersect1d(
ar1: _ArrayLike[_SCTNoCast],
ar2: _ArrayLike[_SCTNoCast],
assume_unique: bool = ...,
return_indices: L[False] = ...,
) -> NDArray[_SCTNoCast]: ...
@overload
def intersect1d(
ar1: ArrayLike,
ar2: ArrayLike,
assume_unique: bool = ...,
return_indices: L[False] = ...,
) -> NDArray[Any]: ...
@overload
def intersect1d(
ar1: _ArrayLike[_SCTNoCast],
ar2: _ArrayLike[_SCTNoCast],
assume_unique: bool = ...,
return_indices: L[True] = ...,
) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
@overload
def intersect1d(
ar1: ArrayLike,
ar2: ArrayLike,
assume_unique: bool = ...,
return_indices: L[True] = ...,
) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
@overload
def setxor1d(
ar1: _ArrayLike[_SCTNoCast],
ar2: _ArrayLike[_SCTNoCast],
assume_unique: bool = ...,
) -> NDArray[_SCTNoCast]: ...
@overload
def setxor1d(
ar1: ArrayLike,
ar2: ArrayLike,
assume_unique: bool = ...,
) -> NDArray[Any]: ...
def isin(
element: ArrayLike,
test_elements: ArrayLike,
assume_unique: bool = ...,
invert: bool = ...,
*,
kind: None | str = ...,
) -> NDArray[np.bool]: ...
@overload
def union1d(
ar1: _ArrayLike[_SCTNoCast],
ar2: _ArrayLike[_SCTNoCast],
) -> NDArray[_SCTNoCast]: ...
@overload
def union1d(
ar1: ArrayLike,
ar2: ArrayLike,
) -> NDArray[Any]: ...
@overload
def setdiff1d(
ar1: _ArrayLike[_SCTNoCast],
ar2: _ArrayLike[_SCTNoCast],
assume_unique: bool = ...,
) -> NDArray[_SCTNoCast]: ...
@overload
def setdiff1d(
ar1: ArrayLike,
ar2: ArrayLike,
assume_unique: bool = ...,
) -> NDArray[Any]: ...

View File

@ -0,0 +1,222 @@
"""
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
without having to read it into memory. The `Arrayterator` class wraps
an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
from operator import mul
from functools import reduce
__all__ = ['Arrayterator']
class Arrayterator:
"""
Buffered iterator for big arrays.
`Arrayterator` creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
file system. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
`Arrayterator` can be used with any object that supports multidimensional
slices. This includes NumPy arrays, but also variables from
Scientific.IO.NetCDF or pynetcdf for example.
Parameters
----------
var : array_like
The object to iterate over.
buf_size : int, optional
The buffer size. If `buf_size` is supplied, the maximum amount of
data that will be read into memory is `buf_size` elements.
Default is None, which will read as many element as possible
into memory.
Attributes
----------
var
buf_size
start
stop
step
shape
flat
See Also
--------
numpy.ndenumerate : Multidimensional array iterator.
numpy.flatiter : Flat array iterator.
numpy.memmap : Create a memory-map to an array stored
in a binary file on disk.
Notes
-----
The algorithm works by first finding a "running dimension", along which
the blocks will be extracted. Given an array of dimensions
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
first dimension will be used. If, on the other hand,
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
Blocks are extracted along this dimension, and when the last block is
returned the process continues from the next dimension, until all
elements have been read.
Examples
--------
>>> import numpy as np
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.Arrayterator(a, 2)
>>> a_itor.shape
(3, 4, 5, 6)
Now we can iterate over ``a_itor``, and it will return arrays of size
two. Since `buf_size` was smaller than any dimension, the first
dimension will be iterated over first:
>>> for subarr in a_itor:
... if not subarr.all():
... print(subarr, subarr.shape) # doctest: +SKIP
>>> # [[[[0 1]]]] (1, 1, 1, 2)
"""
def __init__(self, var, buf_size=None):
self.var = var
self.buf_size = buf_size
self.start = [0 for dim in var.shape]
self.stop = [dim for dim in var.shape]
self.step = [1 for dim in var.shape]
def __getattr__(self, attr):
return getattr(self.var, attr)
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple):
index = (index,)
fixed = []
length, dims = len(index), self.ndim
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, int):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
def __array__(self, dtype=None, copy=None):
"""
Return corresponding data.
"""
slice_ = tuple(slice(*t) for t in zip(
self.start, self.stop, self.step))
return self.var[slice_]
@property
def flat(self):
"""
A 1-D flat iterator for Arrayterator objects.
This iterator returns elements of the array to be iterated over in
`~lib.Arrayterator` one by one.
It is similar to `flatiter`.
See Also
--------
lib.Arrayterator
flatiter
Examples
--------
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.Arrayterator(a, 2)
>>> for subarr in a_itor.flat:
... if not subarr:
... print(subarr, type(subarr))
...
0 <class 'numpy.int64'>
"""
for block in self:
yield from block.flat
@property
def shape(self):
"""
The shape of the array to be iterated over.
For an example, see `Arrayterator`.
"""
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
# Skip arrays with degenerate dimensions
if [dim for dim in self.shape if dim <= 0]:
return
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = self.var.ndim
while True:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
# running dimension (ie, the dimension along which
# the blocks will be built from)
rundim = 0
for i in range(ndims-1, -1, -1):
# if count is zero we ran out of elements to read
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]:
# limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
# read everything along this dimension
stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
# yield a block
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
# Update start position, taking care of overflow to
# other dimensions
start[rundim] = stop[rundim] # start where we stopped
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
return

View File

@ -0,0 +1,48 @@
from collections.abc import Generator
from typing import (
Any,
TypeVar,
overload,
)
from numpy import ndarray, dtype, generic
from numpy._typing import DTypeLike, NDArray
# TODO: Set a shape bound once we've got proper shape support
_Shape = TypeVar("_Shape", bound=Any)
_DType = TypeVar("_DType", bound=dtype[Any])
_ScalarType = TypeVar("_ScalarType", bound=generic)
_Index = (
ellipsis
| int
| slice
| tuple[ellipsis | int | slice, ...]
)
__all__: list[str]
# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
# but its ``__getattr__` method does wrap around the former and thus has
# access to all its methods
class Arrayterator(ndarray[_Shape, _DType]):
var: ndarray[_Shape, _DType] # type: ignore[assignment]
buf_size: None | int
start: list[int]
stop: list[int]
step: list[int]
@property # type: ignore[misc]
def shape(self) -> tuple[int, ...]: ...
@property
def flat(self: NDArray[_ScalarType]) -> Generator[_ScalarType, None, None]: ...
def __init__(
self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
) -> None: ...
@overload
def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[Any, _DType]: ...
@overload
def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ...
def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...

View File

@ -0,0 +1,700 @@
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seamlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip, bz2 and xz are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> from numpy import DataSource
>>> ds = DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
>>>
>>> # Use the file as you normally would
>>> fp.read() # doctest: +SKIP
>>> fp.close() # doctest: +SKIP
"""
import os
from .._utils import set_module
_open = open
def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
Parameters
----------
mode : str
File open mode.
encoding : str
File encoding.
newline : str
Newline for text files.
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
# Using a class instead of a module-level dictionary
# to reduce the initial 'import numpy' overhead by
# deferring the import of lzma, bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners:
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> import gzip
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz', '.xz', '.lzma']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.open
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
try:
import lzma
self._file_openers[".xz"] = lzma.open
self._file_openers[".lzma"] = lzma.open
except (ImportError, AttributeError):
# There are incompatible backports of lzma that do not have the
# lzma.open attribute, so catch that as well as ImportError.
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str or pathlib.Path
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode, encoding=encoding, newline=newline)
@set_module('numpy.lib.npyio')
class DataSource:
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = np.lib.npyio.DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = np.lib.npyio.DataSource('/home/guido')
>>> urlname = 'http://www.google.com/'
>>> gfile = ds.open('http://www.google.com/')
>>> ds.abspath(urlname)
'/home/guido/www.google.com/index.html'
>>> ds = np.lib.npyio.DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/.../home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if hasattr(self, '_istmpdest') and self._istmpdest:
import shutil
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
return any(c in _writemodes for c in mode)
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
Returns
-------
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing them is slow and
# a significant fraction of numpy's total import time.
import shutil
from urllib.request import urlopen
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
with urlopen(path) as openedurl:
with _open(upath, 'wb') as f:
shutil.copyfileobj(openedurl, f)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurrence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).removeprefix('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# First test for local path
if os.path.exists(path):
return True
# We import this here because importing urllib is slow and
# a significant fraction of numpy's total import time.
from urllib.request import urlopen
from urllib.error import URLError
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str or pathlib.Path
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
raise FileNotFoundError(f"{path} not found.")
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r', encoding=None, newline=None):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str or pathlib.Path
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
encoding : {None, str}, optional
Open text file with given encoding. The default encoding will be
what `open` uses.
newline : {None, str}, optional
Newline to use when reading text file.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode,
encoding=encoding, newline=newline)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str or pathlib.Path
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,775 @@
from collections.abc import Sequence, Iterator, Callable, Iterable
from typing import (
Literal as L,
Any,
TypeVar,
overload,
Protocol,
SupportsIndex,
SupportsInt,
TypeGuard
)
from numpy import (
vectorize as vectorize,
generic,
integer,
floating,
complexfloating,
intp,
float64,
complex128,
timedelta64,
datetime64,
object_,
bool as bool_,
_OrderKACF,
)
from numpy._typing import (
NDArray,
ArrayLike,
DTypeLike,
_ShapeLike,
_ScalarLike_co,
_DTypeLike,
_ArrayLike,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeTD64_co,
_ArrayLikeDT64_co,
_ArrayLikeObject_co,
_FloatLike_co,
_ComplexLike_co,
)
from numpy._core.multiarray import (
bincount as bincount,
)
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_2Tuple = tuple[_T, _T]
class _TrimZerosSequence(Protocol[_T_co]):
def __len__(self) -> int: ...
def __getitem__(self, key: slice, /) -> _T_co: ...
def __iter__(self) -> Iterator[Any]: ...
class _SupportsWriteFlush(Protocol):
def write(self, s: str, /) -> object: ...
def flush(self) -> object: ...
__all__: list[str]
@overload
def rot90(
m: _ArrayLike[_SCT],
k: int = ...,
axes: tuple[int, int] = ...,
) -> NDArray[_SCT]: ...
@overload
def rot90(
m: ArrayLike,
k: int = ...,
axes: tuple[int, int] = ...,
) -> NDArray[Any]: ...
@overload
def flip(m: _SCT, axis: None = ...) -> _SCT: ...
@overload
def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
@overload
def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
@overload
def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
@overload
def average(
a: _ArrayLikeFloat_co,
axis: None = ...,
weights: None | _ArrayLikeFloat_co= ...,
returned: L[False] = ...,
keepdims: L[False] = ...,
) -> floating[Any]: ...
@overload
def average(
a: _ArrayLikeComplex_co,
axis: None = ...,
weights: None | _ArrayLikeComplex_co = ...,
returned: L[False] = ...,
keepdims: L[False] = ...,
) -> complexfloating[Any, Any]: ...
@overload
def average(
a: _ArrayLikeObject_co,
axis: None = ...,
weights: None | Any = ...,
returned: L[False] = ...,
keepdims: L[False] = ...,
) -> Any: ...
@overload
def average(
a: _ArrayLikeFloat_co,
axis: None = ...,
weights: None | _ArrayLikeFloat_co= ...,
returned: L[True] = ...,
keepdims: L[False] = ...,
) -> _2Tuple[floating[Any]]: ...
@overload
def average(
a: _ArrayLikeComplex_co,
axis: None = ...,
weights: None | _ArrayLikeComplex_co = ...,
returned: L[True] = ...,
keepdims: L[False] = ...,
) -> _2Tuple[complexfloating[Any, Any]]: ...
@overload
def average(
a: _ArrayLikeObject_co,
axis: None = ...,
weights: None | Any = ...,
returned: L[True] = ...,
keepdims: L[False] = ...,
) -> _2Tuple[Any]: ...
@overload
def average(
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
weights: None | Any = ...,
returned: L[False] = ...,
keepdims: bool = ...,
) -> Any: ...
@overload
def average(
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
weights: None | Any = ...,
returned: L[True] = ...,
keepdims: bool = ...,
) -> _2Tuple[Any]: ...
@overload
def asarray_chkfinite(
a: _ArrayLike[_SCT],
dtype: None = ...,
order: _OrderKACF = ...,
) -> NDArray[_SCT]: ...
@overload
def asarray_chkfinite(
a: object,
dtype: None = ...,
order: _OrderKACF = ...,
) -> NDArray[Any]: ...
@overload
def asarray_chkfinite(
a: Any,
dtype: _DTypeLike[_SCT],
order: _OrderKACF = ...,
) -> NDArray[_SCT]: ...
@overload
def asarray_chkfinite(
a: Any,
dtype: DTypeLike,
order: _OrderKACF = ...,
) -> NDArray[Any]: ...
# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
# xref python/mypy#8645
@overload
def piecewise(
x: _ArrayLike[_SCT],
condlist: ArrayLike,
funclist: Sequence[Any | Callable[..., Any]],
*args: Any,
**kw: Any,
) -> NDArray[_SCT]: ...
@overload
def piecewise(
x: ArrayLike,
condlist: ArrayLike,
funclist: Sequence[Any | Callable[..., Any]],
*args: Any,
**kw: Any,
) -> NDArray[Any]: ...
def select(
condlist: Sequence[ArrayLike],
choicelist: Sequence[ArrayLike],
default: ArrayLike = ...,
) -> NDArray[Any]: ...
@overload
def copy(
a: _ArrayType,
order: _OrderKACF,
subok: L[True],
) -> _ArrayType: ...
@overload
def copy(
a: _ArrayType,
order: _OrderKACF = ...,
*,
subok: L[True],
) -> _ArrayType: ...
@overload
def copy(
a: _ArrayLike[_SCT],
order: _OrderKACF = ...,
subok: L[False] = ...,
) -> NDArray[_SCT]: ...
@overload
def copy(
a: ArrayLike,
order: _OrderKACF = ...,
subok: L[False] = ...,
) -> NDArray[Any]: ...
def gradient(
f: ArrayLike,
*varargs: ArrayLike,
axis: None | _ShapeLike = ...,
edge_order: L[1, 2] = ...,
) -> Any: ...
@overload
def diff(
a: _T,
n: L[0],
axis: SupportsIndex = ...,
prepend: ArrayLike = ...,
append: ArrayLike = ...,
) -> _T: ...
@overload
def diff(
a: ArrayLike,
n: int = ...,
axis: SupportsIndex = ...,
prepend: ArrayLike = ...,
append: ArrayLike = ...,
) -> NDArray[Any]: ...
@overload
def interp(
x: _ArrayLikeFloat_co,
xp: _ArrayLikeFloat_co,
fp: _ArrayLikeFloat_co,
left: None | _FloatLike_co = ...,
right: None | _FloatLike_co = ...,
period: None | _FloatLike_co = ...,
) -> NDArray[float64]: ...
@overload
def interp(
x: _ArrayLikeFloat_co,
xp: _ArrayLikeFloat_co,
fp: _ArrayLikeComplex_co,
left: None | _ComplexLike_co = ...,
right: None | _ComplexLike_co = ...,
period: None | _FloatLike_co = ...,
) -> NDArray[complex128]: ...
@overload
def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
@overload
def angle(z: object_, deg: bool = ...) -> Any: ...
@overload
def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
@overload
def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
@overload
def unwrap(
p: _ArrayLikeFloat_co,
discont: None | float = ...,
axis: int = ...,
*,
period: float = ...,
) -> NDArray[floating[Any]]: ...
@overload
def unwrap(
p: _ArrayLikeObject_co,
discont: None | float = ...,
axis: int = ...,
*,
period: float = ...,
) -> NDArray[object_]: ...
def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
def trim_zeros(
filt: _TrimZerosSequence[_T],
trim: L["f", "b", "fb", "bf"] = ...,
) -> _T: ...
@overload
def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
@overload
def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
@overload
def cov(
m: _ArrayLikeFloat_co,
y: None | _ArrayLikeFloat_co = ...,
rowvar: bool = ...,
bias: bool = ...,
ddof: None | SupportsIndex | SupportsInt = ...,
fweights: None | ArrayLike = ...,
aweights: None | ArrayLike = ...,
*,
dtype: None = ...,
) -> NDArray[floating[Any]]: ...
@overload
def cov(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
bias: bool = ...,
ddof: None | SupportsIndex | SupportsInt = ...,
fweights: None | ArrayLike = ...,
aweights: None | ArrayLike = ...,
*,
dtype: None = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def cov(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
bias: bool = ...,
ddof: None | SupportsIndex | SupportsInt = ...,
fweights: None | ArrayLike = ...,
aweights: None | ArrayLike = ...,
*,
dtype: _DTypeLike[_SCT],
) -> NDArray[_SCT]: ...
@overload
def cov(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
bias: bool = ...,
ddof: None | SupportsIndex | SupportsInt = ...,
fweights: None | ArrayLike = ...,
aweights: None | ArrayLike = ...,
*,
dtype: DTypeLike,
) -> NDArray[Any]: ...
# NOTE `bias` and `ddof` have been deprecated
@overload
def corrcoef(
m: _ArrayLikeFloat_co,
y: None | _ArrayLikeFloat_co = ...,
rowvar: bool = ...,
*,
dtype: None = ...,
) -> NDArray[floating[Any]]: ...
@overload
def corrcoef(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
*,
dtype: None = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def corrcoef(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
*,
dtype: _DTypeLike[_SCT],
) -> NDArray[_SCT]: ...
@overload
def corrcoef(
m: _ArrayLikeComplex_co,
y: None | _ArrayLikeComplex_co = ...,
rowvar: bool = ...,
*,
dtype: DTypeLike,
) -> NDArray[Any]: ...
def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
def kaiser(
M: _FloatLike_co,
beta: _FloatLike_co,
) -> NDArray[floating[Any]]: ...
@overload
def sinc(x: _FloatLike_co) -> floating[Any]: ...
@overload
def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def median(
a: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
keepdims: L[False] = ...,
) -> floating[Any]: ...
@overload
def median(
a: _ArrayLikeComplex_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
keepdims: L[False] = ...,
) -> complexfloating[Any, Any]: ...
@overload
def median(
a: _ArrayLikeTD64_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
keepdims: L[False] = ...,
) -> timedelta64: ...
@overload
def median(
a: _ArrayLikeObject_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
keepdims: L[False] = ...,
) -> Any: ...
@overload
def median(
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
out: None = ...,
overwrite_input: bool = ...,
keepdims: bool = ...,
) -> Any: ...
@overload
def median(
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
axis: None | _ShapeLike,
out: _ArrayType,
/,
overwrite_input: bool = ...,
keepdims: bool = ...,
) -> _ArrayType: ...
@overload
def median(
a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
axis: None | _ShapeLike = ...,
*,
out: _ArrayType,
overwrite_input: bool = ...,
keepdims: bool = ...,
) -> _ArrayType: ...
_MethodKind = L[
"inverted_cdf",
"averaged_inverted_cdf",
"closest_observation",
"interpolated_inverted_cdf",
"hazen",
"weibull",
"linear",
"median_unbiased",
"normal_unbiased",
"lower",
"higher",
"midpoint",
"nearest",
]
@overload
def percentile(
a: _ArrayLikeFloat_co,
q: _FloatLike_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> floating[Any]: ...
@overload
def percentile(
a: _ArrayLikeComplex_co,
q: _FloatLike_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> complexfloating[Any, Any]: ...
@overload
def percentile(
a: _ArrayLikeTD64_co,
q: _FloatLike_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> timedelta64: ...
@overload
def percentile(
a: _ArrayLikeDT64_co,
q: _FloatLike_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> datetime64: ...
@overload
def percentile(
a: _ArrayLikeObject_co,
q: _FloatLike_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> Any: ...
@overload
def percentile(
a: _ArrayLikeFloat_co,
q: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> NDArray[floating[Any]]: ...
@overload
def percentile(
a: _ArrayLikeComplex_co,
q: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def percentile(
a: _ArrayLikeTD64_co,
q: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> NDArray[timedelta64]: ...
@overload
def percentile(
a: _ArrayLikeDT64_co,
q: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> NDArray[datetime64]: ...
@overload
def percentile(
a: _ArrayLikeObject_co,
q: _ArrayLikeFloat_co,
axis: None = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: L[False] = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> NDArray[object_]: ...
@overload
def percentile(
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
q: _ArrayLikeFloat_co,
axis: None | _ShapeLike = ...,
out: None = ...,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: bool = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> Any: ...
@overload
def percentile(
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
q: _ArrayLikeFloat_co,
axis: None | _ShapeLike,
out: _ArrayType,
/,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: bool = ...,
*,
weights: None | _ArrayLikeFloat_co = ...,
) -> _ArrayType: ...
@overload
def percentile(
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
q: _ArrayLikeFloat_co,
axis: None | _ShapeLike = ...,
*,
out: _ArrayType,
overwrite_input: bool = ...,
method: _MethodKind = ...,
keepdims: bool = ...,
weights: None | _ArrayLikeFloat_co = ...,
) -> _ArrayType: ...
# NOTE: Not an alias, but they do have identical signatures
# (that we can reuse)
quantile = percentile
_SCT_fm = TypeVar(
"_SCT_fm",
bound=floating[Any] | complexfloating[Any, Any] | timedelta64,
)
class _SupportsRMulFloat(Protocol[_T_co]):
def __rmul__(self, other: float, /) -> _T_co: ...
@overload
def trapezoid( # type: ignore[overload-overlap]
y: Sequence[_FloatLike_co],
x: Sequence[_FloatLike_co] | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> float64: ...
@overload
def trapezoid(
y: Sequence[_ComplexLike_co],
x: Sequence[_ComplexLike_co] | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> complex128: ...
@overload
def trapezoid(
y: _ArrayLike[bool_ | integer[Any]],
x: _ArrayLike[bool_ | integer[Any]] | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> float64 | NDArray[float64]: ...
@overload
def trapezoid( # type: ignore[overload-overlap]
y: _ArrayLikeObject_co,
x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> float | NDArray[object_]: ...
@overload
def trapezoid(
y: _ArrayLike[_SCT_fm],
x: _ArrayLike[_SCT_fm] | _ArrayLikeInt_co | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> _SCT_fm | NDArray[_SCT_fm]: ...
@overload
def trapezoid(
y: Sequence[_SupportsRMulFloat[_T]],
x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> _T: ...
@overload
def trapezoid(
y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ...,
dx: float = ...,
axis: SupportsIndex = ...,
) -> (
floating[Any] | complexfloating[Any, Any] | timedelta64
| NDArray[floating[Any] | complexfloating[Any, Any] | timedelta64 | object_]
): ...
def meshgrid(
*xi: ArrayLike,
copy: bool = ...,
sparse: bool = ...,
indexing: L["xy", "ij"] = ...,
) -> tuple[NDArray[Any], ...]: ...
@overload
def delete(
arr: _ArrayLike[_SCT],
obj: slice | _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
) -> NDArray[_SCT]: ...
@overload
def delete(
arr: ArrayLike,
obj: slice | _ArrayLikeInt_co,
axis: None | SupportsIndex = ...,
) -> NDArray[Any]: ...
@overload
def insert(
arr: _ArrayLike[_SCT],
obj: slice | _ArrayLikeInt_co,
values: ArrayLike,
axis: None | SupportsIndex = ...,
) -> NDArray[_SCT]: ...
@overload
def insert(
arr: ArrayLike,
obj: slice | _ArrayLikeInt_co,
values: ArrayLike,
axis: None | SupportsIndex = ...,
) -> NDArray[Any]: ...
def append(
arr: ArrayLike,
values: ArrayLike,
axis: None | SupportsIndex = ...,
) -> NDArray[Any]: ...
@overload
def digitize(
x: _FloatLike_co,
bins: _ArrayLikeFloat_co,
right: bool = ...,
) -> intp: ...
@overload
def digitize(
x: _ArrayLikeFloat_co,
bins: _ArrayLikeFloat_co,
right: bool = ...,
) -> NDArray[intp]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
from collections.abc import Sequence
from typing import (
Literal as L,
Any,
SupportsIndex,
)
from numpy._typing import (
NDArray,
ArrayLike,
)
_BinKind = L[
"stone",
"auto",
"doane",
"fd",
"rice",
"scott",
"sqrt",
"sturges",
]
__all__: list[str]
def histogram_bin_edges(
a: ArrayLike,
bins: _BinKind | SupportsIndex | ArrayLike = ...,
range: None | tuple[float, float] = ...,
weights: None | ArrayLike = ...,
) -> NDArray[Any]: ...
def histogram(
a: ArrayLike,
bins: _BinKind | SupportsIndex | ArrayLike = ...,
range: None | tuple[float, float] = ...,
density: bool = ...,
weights: None | ArrayLike = ...,
) -> tuple[NDArray[Any], NDArray[Any]]: ...
def histogramdd(
sample: ArrayLike,
bins: SupportsIndex | ArrayLike = ...,
range: Sequence[tuple[float, float]] = ...,
density: None | bool = ...,
weights: None | ArrayLike = ...,
) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,154 @@
from collections.abc import Sequence
from typing import (
Any,
TypeVar,
Generic,
overload,
Literal,
SupportsIndex,
)
import numpy as np
from numpy import (
# Circumvent a naming conflict with `AxisConcatenator.matrix`
matrix as _Matrix,
ndenumerate as ndenumerate,
ndindex as ndindex,
ndarray,
dtype,
str_,
bytes_,
int_,
float64,
complex128,
)
from numpy._typing import (
# Arrays
ArrayLike,
_NestedSequence,
_FiniteNestedSequence,
NDArray,
# DTypes
DTypeLike,
_SupportsDType,
)
from numpy._core.multiarray import (
unravel_index as unravel_index,
ravel_multi_index as ravel_multi_index,
)
_T = TypeVar("_T")
_DType = TypeVar("_DType", bound=dtype[Any])
_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
_TupType = TypeVar("_TupType", bound=tuple[Any, ...])
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
__all__: list[str]
@overload
def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
@overload
def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
@overload
def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
@overload
def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ...
@overload
def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
@overload
def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float64], ...]: ...
@overload
def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex128], ...]: ...
class nd_grid(Generic[_BoolType]):
sparse: _BoolType
def __init__(self, sparse: _BoolType = ...) -> None: ...
@overload
def __getitem__(
self: nd_grid[Literal[False]],
key: slice | Sequence[slice],
) -> NDArray[Any]: ...
@overload
def __getitem__(
self: nd_grid[Literal[True]],
key: slice | Sequence[slice],
) -> tuple[NDArray[Any], ...]: ...
class MGridClass(nd_grid[Literal[False]]):
def __init__(self) -> None: ...
mgrid: MGridClass
class OGridClass(nd_grid[Literal[True]]):
def __init__(self) -> None: ...
ogrid: OGridClass
class AxisConcatenator:
axis: int
matrix: bool
ndmin: int
trans1d: int
def __init__(
self,
axis: int = ...,
matrix: bool = ...,
ndmin: int = ...,
trans1d: int = ...,
) -> None: ...
@staticmethod
@overload
def concatenate( # type: ignore[misc]
*a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
) -> NDArray[Any]: ...
@staticmethod
@overload
def concatenate(
*a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
) -> _ArrayType: ...
@staticmethod
def makemat(
data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
) -> _Matrix[Any, Any]: ...
# TODO: Sort out this `__getitem__` method
def __getitem__(self, key: Any) -> Any: ...
class RClass(AxisConcatenator):
axis: Literal[0]
matrix: Literal[False]
ndmin: Literal[1]
trans1d: Literal[-1]
def __init__(self) -> None: ...
r_: RClass
class CClass(AxisConcatenator):
axis: Literal[-1]
matrix: Literal[False]
ndmin: Literal[2]
trans1d: Literal[0]
def __init__(self) -> None: ...
c_: CClass
class IndexExpression(Generic[_BoolType]):
maketuple: _BoolType
def __init__(self, maketuple: _BoolType) -> None: ...
@overload
def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
@overload
def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
@overload
def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
index_exp: IndexExpression[Literal[True]]
s_: IndexExpression[Literal[False]]
def fill_diagonal(a: NDArray[Any], val: Any, wrap: bool = ...) -> None: ...
def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`

View File

@ -0,0 +1,899 @@
"""A collection of functions designed to help I/O with ascii files.
"""
__docformat__ = "restructuredtext en"
import numpy as np
import numpy._core.numeric as nx
from numpy._utils import asbytes, asunicode
def _decode_line(line, encoding=None):
"""Decode bytes from binary input streams.
Defaults to decoding from 'latin1'. That differs from the behavior of
np.compat.asunicode that decodes from 'ascii'.
Parameters
----------
line : str or bytes
Line to be decoded.
encoding : str
Encoding used to decode `line`.
Returns
-------
decoded_line : str
"""
if type(line) is bytes:
if encoding is None:
encoding = "latin1"
line = line.decode(encoding)
return line
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + b''
except (TypeError, ValueError):
return False
return True
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError
If `ndtype` does not have a `names` attribute.
Examples
--------
>>> import numpy as np
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
return any(ndtype[name].names is not None for name in ndtype.names or ())
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : bool, optional
If True, transform a field with a shape into several fields. Default is
False.
Examples
--------
>>> import numpy as np
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('S4'),
dtype('float64'),
dtype('float64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
info = ndtype.fields[field]
flat_dt = flatten_dtype(info[0], flatten_base)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
def __init__(self, delimiter=None, comments='#', autostrip=True,
encoding=None):
delimiter = _decode_line(delimiter)
comments = _decode_line(comments)
self.comments = comments
# Delimiter is a character
if (delimiter is None) or isinstance(delimiter, str):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (
self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
self.encoding = encoding
def _delimited_splitter(self, line):
"""Chop off comments, strip, and split at delimiter. """
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(" \r\n")
if not line:
return []
return line.split(self.delimiter)
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip("\r\n")
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
def __call__(self, line):
return self._handyman(_decode_line(line, self.encoding))
class NameValidator:
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list
of names to exclude, as well as a list of invalid characters. Names in
the exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of
names, and a list of valid names will be created. The `__call__`
method accepts an optional keyword "default" that sets the default name
in case of ambiguity. By default this is 'f', so that names will
default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default
list ['return', 'file', 'print']. Excluded names are appended an
underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its
method `validate`.
Examples
--------
>>> import numpy as np
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
('file_', 'field2', 'with_space', 'CaSe')
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
... deletechars='q',
... case_sensitive=False)
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
"""
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, str):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter:
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
_mapper = [(nx.bool, str2bool, False),
(nx.int_, int, -1),]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.int_ is nx.int32.
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),
# Last, try with the string types (must be last, because
# `_mapper[-1]` is used as default in some cases)
(nx.str_, asunicode, '???'),
(nx.bytes_, asbytes, '???'),
])
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
if dtype.type == np.datetime64:
return dtype
return dtype.type
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = dateutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if callable(func):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for fct, dft in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
@classmethod
def _find_map_entry(cls, dtype):
# if a converter for the specific dtype is available use that
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if dtype.type == deftype:
return i, (deftype, func, default_def)
# otherwise find an inexact match
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if np.issubdtype(dtype.type, deftype):
return i, (deftype, func, default_def)
raise LookupError
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not callable(dtype_or_func):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# find the best match in our mapper
try:
self._status, (_, func, default_def) = self._find_map_entry(dtype)
except LookupError:
# no match
self.default = default
_, func, _ = self._mapper[-1]
self._status = 0
else:
# use the found default only if we did not already have one
if default is None:
self.default = default_def
else:
self.default = default
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = {''}
else:
if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
def __call__(self, value):
return self._callingfunction(value)
def _do_upgrade(self):
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
self.type, self.func, default = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
self._do_upgrade()
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
self._do_upgrade()
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is ``''``.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
ndtype.names = validate([''] * len(ndtype.names),
defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
from numpy._core.fromnumeric import (
amin,
amax,
argmin,
argmax,
sum,
prod,
cumsum,
cumprod,
mean,
var,
std
)
from numpy.lib._function_base_impl import (
median,
percentile,
quantile,
)
__all__: list[str]
# NOTE: In reaility these functions are not aliases but distinct functions
# with identical signatures.
nanmin = amin
nanmax = amax
nanargmin = argmin
nanargmax = argmax
nansum = sum
nanprod = prod
nancumsum = cumsum
nancumprod = cumprod
nanmean = mean
nanvar = var
nanstd = std
nanmedian = median
nanpercentile = percentile
nanquantile = quantile

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,348 @@
import os
import sys
import zipfile
import types
from re import Pattern
from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
from typing import (
Literal as L,
Any,
TypeVar,
Generic,
IO,
overload,
Protocol,
)
from numpy import (
ndarray,
recarray,
dtype,
generic,
float64,
void,
record,
)
from numpy.ma.mrecords import MaskedRecords
from numpy._typing import (
ArrayLike,
DTypeLike,
NDArray,
_DTypeLike,
_SupportsArrayFunc,
)
from numpy._core.multiarray import (
packbits as packbits,
unpackbits as unpackbits,
)
_T = TypeVar("_T")
_T_contra = TypeVar("_T_contra", contravariant=True)
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
class _SupportsGetItem(Protocol[_T_contra, _T_co]):
def __getitem__(self, key: _T_contra, /) -> _T_co: ...
class _SupportsRead(Protocol[_CharType_co]):
def read(self) -> _CharType_co: ...
class _SupportsReadSeek(Protocol[_CharType_co]):
def read(self, n: int, /) -> _CharType_co: ...
def seek(self, offset: int, whence: int, /) -> object: ...
class _SupportsWrite(Protocol[_CharType_contra]):
def write(self, s: _CharType_contra, /) -> object: ...
__all__: list[str]
class BagObj(Generic[_T_co]):
def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
def __getattribute__(self, key: str) -> _T_co: ...
def __dir__(self) -> list[str]: ...
class NpzFile(Mapping[str, NDArray[Any]]):
zip: zipfile.ZipFile
fid: None | IO[str]
files: list[str]
allow_pickle: bool
pickle_kwargs: None | Mapping[str, Any]
_MAX_REPR_ARRAY_COUNT: int
# Represent `f` as a mutable property so we can access the type of `self`
@property
def f(self: _T) -> BagObj[_T]: ...
@f.setter
def f(self: _T, value: BagObj[_T]) -> None: ...
def __init__(
self,
fid: IO[str],
own_fid: bool = ...,
allow_pickle: bool = ...,
pickle_kwargs: None | Mapping[str, Any] = ...,
) -> None: ...
def __enter__(self: _T) -> _T: ...
def __exit__(
self,
exc_type: None | type[BaseException],
exc_value: None | BaseException,
traceback: None | types.TracebackType,
/,
) -> None: ...
def close(self) -> None: ...
def __del__(self) -> None: ...
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: str) -> NDArray[Any]: ...
def __contains__(self, key: str) -> bool: ...
def __repr__(self) -> str: ...
class DataSource:
def __init__(
self,
destpath: None | str | os.PathLike[str] = ...,
) -> None: ...
def __del__(self) -> None: ...
def abspath(self, path: str) -> str: ...
def exists(self, path: str) -> bool: ...
# Whether the file-object is opened in string or bytes mode (by default)
# depends on the file-extension of `path`
def open(
self,
path: str,
mode: str = ...,
encoding: None | str = ...,
newline: None | str = ...,
) -> IO[Any]: ...
# NOTE: Returns a `NpzFile` if file is a zip file;
# returns an `ndarray`/`memmap` otherwise
def load(
file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
allow_pickle: bool = ...,
fix_imports: bool = ...,
encoding: L["ASCII", "latin1", "bytes"] = ...,
) -> Any: ...
def save(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
arr: ArrayLike,
allow_pickle: bool = ...,
fix_imports: bool = ...,
) -> None: ...
def savez(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
*args: ArrayLike,
**kwds: ArrayLike,
) -> None: ...
def savez_compressed(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
*args: ArrayLike,
**kwds: ArrayLike,
) -> None: ...
# File-like objects only have to implement `__iter__` and,
# optionally, `encoding`
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: None = ...,
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] | None = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[float64]: ...
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: _DTypeLike[_SCT],
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] | None = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: DTypeLike,
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] | None = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
def savetxt(
fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
X: ArrayLike,
fmt: str | Sequence[str] = ...,
delimiter: str = ...,
newline: str = ...,
header: str = ...,
footer: str = ...,
comments: str = ...,
encoding: None | str = ...,
) -> None: ...
@overload
def fromregex(
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
regexp: str | bytes | Pattern[Any],
dtype: _DTypeLike[_SCT],
encoding: None | str = ...
) -> NDArray[_SCT]: ...
@overload
def fromregex(
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
regexp: str | bytes | Pattern[Any],
dtype: DTypeLike,
encoding: None | str = ...
) -> NDArray[Any]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: None = ...,
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: _DTypeLike[_SCT],
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: DTypeLike,
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def recfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> recarray[Any, dtype[record]]: ...
@overload
def recfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[True],
**kwargs: Any,
) -> MaskedRecords[Any, dtype[void]]: ...
@overload
def recfromcsv(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> recarray[Any, dtype[record]]: ...
@overload
def recfromcsv(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[True],
**kwargs: Any,
) -> MaskedRecords[Any, dtype[void]]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,302 @@
from typing import (
Literal as L,
overload,
Any,
SupportsInt,
SupportsIndex,
TypeVar,
NoReturn,
)
import numpy as np
from numpy import (
poly1d as poly1d,
unsignedinteger,
signedinteger,
floating,
complexfloating,
int32,
int64,
float64,
complex128,
object_,
)
from numpy._typing import (
NDArray,
ArrayLike,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeObject_co,
)
_T = TypeVar("_T")
_2Tup = tuple[_T, _T]
_5Tup = tuple[
_T,
NDArray[float64],
NDArray[int32],
NDArray[float64],
NDArray[float64],
]
__all__: list[str]
def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
# Returns either a float or complex array depending on the input values.
# See `np.linalg.eigvals`.
def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
@overload
def polyint(
p: poly1d,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
) -> poly1d: ...
@overload
def polyint(
p: _ArrayLikeFloat_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeFloat_co = ...,
) -> NDArray[floating[Any]]: ...
@overload
def polyint(
p: _ArrayLikeComplex_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeComplex_co = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyint(
p: _ArrayLikeObject_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeObject_co = ...,
) -> NDArray[object_]: ...
@overload
def polyder(
p: poly1d,
m: SupportsInt | SupportsIndex = ...,
) -> poly1d: ...
@overload
def polyder(
p: _ArrayLikeFloat_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[floating[Any]]: ...
@overload
def polyder(
p: _ArrayLikeComplex_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyder(
p: _ArrayLikeObject_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[object_]: ...
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[False] = ...,
) -> NDArray[float64]: ...
@overload
def polyfit(
x: _ArrayLikeComplex_co,
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[False] = ...,
) -> NDArray[complex128]: ...
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[True, "unscaled"] = ...,
) -> _2Tup[NDArray[float64]]: ...
@overload
def polyfit(
x: _ArrayLikeComplex_co,
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[True, "unscaled"] = ...,
) -> _2Tup[NDArray[complex128]]: ...
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[True] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: bool | L["unscaled"] = ...,
) -> _5Tup[NDArray[float64]]: ...
@overload
def polyfit(
x: _ArrayLikeComplex_co,
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[True] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: bool | L["unscaled"] = ...,
) -> _5Tup[NDArray[complex128]]: ...
@overload
def polyval(
p: _ArrayLikeBool_co,
x: _ArrayLikeBool_co,
) -> NDArray[int64]: ...
@overload
def polyval(
p: _ArrayLikeUInt_co,
x: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polyval(
p: _ArrayLikeInt_co,
x: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polyval(
p: _ArrayLikeFloat_co,
x: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polyval(
p: _ArrayLikeComplex_co,
x: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyval(
p: _ArrayLikeObject_co,
x: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
@overload
def polyadd(
a1: poly1d,
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> poly1d: ...
@overload
def polyadd(
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
a2: poly1d,
) -> poly1d: ...
@overload
def polyadd(
a1: _ArrayLikeBool_co,
a2: _ArrayLikeBool_co,
) -> NDArray[np.bool]: ...
@overload
def polyadd(
a1: _ArrayLikeUInt_co,
a2: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeInt_co,
a2: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeFloat_co,
a2: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeComplex_co,
a2: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeObject_co,
a2: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
@overload
def polysub(
a1: poly1d,
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> poly1d: ...
@overload
def polysub(
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
a2: poly1d,
) -> poly1d: ...
@overload
def polysub(
a1: _ArrayLikeBool_co,
a2: _ArrayLikeBool_co,
) -> NoReturn: ...
@overload
def polysub(
a1: _ArrayLikeUInt_co,
a2: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeInt_co,
a2: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeFloat_co,
a2: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeComplex_co,
a2: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polysub(
a1: _ArrayLikeObject_co,
a2: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
# NOTE: Not an alias, but they do have the same signature (that we can reuse)
polymul = polyadd
@overload
def polydiv(
u: poly1d,
v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> _2Tup[poly1d]: ...
@overload
def polydiv(
u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
v: poly1d,
) -> _2Tup[poly1d]: ...
@overload
def polydiv(
u: _ArrayLikeFloat_co,
v: _ArrayLikeFloat_co,
) -> _2Tup[NDArray[floating[Any]]]: ...
@overload
def polydiv(
u: _ArrayLikeComplex_co,
v: _ArrayLikeComplex_co,
) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
@overload
def polydiv(
u: _ArrayLikeObject_co,
v: _ArrayLikeObject_co,
) -> _2Tup[NDArray[Any]]: ...

View File

@ -0,0 +1,650 @@
"""
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
For example, for functions like `log` with branch cuts, the versions in this
module provide the mathematically valid answers in the complex plane::
>>> import math
>>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
True
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
Functions
---------
.. autosummary::
:toctree: generated/
sqrt
log
log2
logn
log10
power
arccos
arcsin
arctanh
"""
import numpy._core.numeric as nx
import numpy._core.numerictypes as nt
from numpy._core.numeric import asarray, any
from numpy._core.overrides import array_function_dispatch
from numpy.lib._type_check_impl import isreal
__all__ = [
'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
'arctanh'
]
_ln2 = nx.log(2.0)
def _tocomplex(arr):
"""Convert its input `arr` to a complex array.
The input is returned as a complex array of the smallest type that will fit
the original data: types like single, byte, short, etc. become csingle,
while others become cdouble.
A copy of the input is always made.
Parameters
----------
arr : array
Returns
-------
array
An array with the same input data as the input but in complex form.
Examples
--------
>>> import numpy as np
First, consider an input of type short:
>>> a = np.array([1,2,3],np.short)
>>> ac = np.lib.scimath._tocomplex(a); ac
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> ac.dtype
dtype('complex64')
If the input is of type double, the output is correspondingly of the
complex double type as well:
>>> b = np.array([1,2,3],np.double)
>>> bc = np.lib.scimath._tocomplex(b); bc
array([1.+0.j, 2.+0.j, 3.+0.j])
>>> bc.dtype
dtype('complex128')
Note that even if the input was complex to begin with, a copy is still
made, since the astype() method always copies:
>>> c = np.array([1,2,3],np.csingle)
>>> cc = np.lib.scimath._tocomplex(c); cc
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> c *= 2; c
array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
>>> cc
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
"""
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort, nt.csingle)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
"""Convert `x` to complex if it has real, negative components.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> import numpy as np
>>> np.lib.scimath._fix_real_lt_zero([1,2])
array([1, 2])
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
array([-1.+0.j, 2.+0.j])
"""
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = _tocomplex(x)
return x
def _fix_int_lt_zero(x):
"""Convert `x` to double if it has real, negative components.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> import numpy as np
>>> np.lib.scimath._fix_int_lt_zero([1,2])
array([1, 2])
>>> np.lib.scimath._fix_int_lt_zero([-1,2])
array([-1., 2.])
"""
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = x * 1.0
return x
def _fix_real_abs_gt_1(x):
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> import numpy as np
>>> np.lib.scimath._fix_real_abs_gt_1([0,1])
array([0, 1])
>>> np.lib.scimath._fix_real_abs_gt_1([0,2])
array([0.+0.j, 2.+0.j])
"""
x = asarray(x)
if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
return x
def _unary_dispatcher(x):
return (x,)
@array_function_dispatch(_unary_dispatcher)
def sqrt(x):
"""
Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> import numpy as np
>>> np.emath.sqrt(1)
1.0
>>> np.emath.sqrt([1, 4])
array([1., 2.])
But it automatically handles negative inputs:
>>> np.emath.sqrt(-1)
1j
>>> np.emath.sqrt([-1,4])
array([0.+1.j, 2.+0.j])
Different results are expected because:
floating point 0.0 and -0.0 are distinct.
For more control, explicitly use complex() as follows:
>>> np.emath.sqrt(complex(-4.0, 0.0))
2j
>>> np.emath.sqrt(complex(-4.0, -0.0))
-2j
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
@array_function_dispatch(_unary_dispatcher)
def log(x):
"""
Compute the natural logarithm of `x`.
Return the "principal value" (for a description of this, see `numpy.log`)
of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
complex principle value is returned.
Parameters
----------
x : array_like
The value(s) whose log is (are) required.
Returns
-------
out : ndarray or scalar
The log of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.log
Notes
-----
For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
(note, however, that otherwise `numpy.log` and this `log` are identical,
i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
notably, the complex principle value if ``x.imag != 0``).
Examples
--------
>>> import numpy as np
>>> np.emath.log(np.exp(1))
1.0
Negative arguments are handled "correctly" (recall that
``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
>>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
True
"""
x = _fix_real_lt_zero(x)
return nx.log(x)
@array_function_dispatch(_unary_dispatcher)
def log10(x):
"""
Compute the logarithm base 10 of `x`.
Return the "principal value" (for a description of this, see
`numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
returns ``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose log base 10 is (are) required.
Returns
-------
out : ndarray or scalar
The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array object is returned.
See Also
--------
numpy.log10
Notes
-----
For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
(note, however, that otherwise `numpy.log10` and this `log10` are
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
>>> import numpy as np
(We set the printing precision so the example can be auto-tested)
>>> np.set_printoptions(precision=4)
>>> np.emath.log10(10**1)
1.0
>>> np.emath.log10([-10**1, -10**2, 10**2])
array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log10(x)
def _logn_dispatcher(n, x):
return (n, x,)
@array_function_dispatch(_logn_dispatcher)
def logn(n, x):
"""
Take log base n of x.
If `x` contains negative inputs, the answer is computed and returned in the
complex domain.
Parameters
----------
n : array_like
The integer base(s) in which the log is taken.
x : array_like
The value(s) whose log base `n` is (are) required.
Returns
-------
out : ndarray or scalar
The log base `n` of the `x` value(s). If `x` was a scalar, so is
`out`, otherwise an array is returned.
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.logn(2, [4, 8])
array([2., 3.])
>>> np.emath.logn(2, [-4, -8, 8])
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
@array_function_dispatch(_unary_dispatcher)
def log2(x):
"""
Compute the logarithm base 2 of `x`.
Return the "principal value" (for a description of this, see
`numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like
The value(s) whose log base 2 is (are) required.
Returns
-------
out : ndarray or scalar
The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.log2
Notes
-----
For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
(note, however, that otherwise `numpy.log2` and this `log2` are
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
We set the printing precision so the example can be auto-tested:
>>> np.set_printoptions(precision=4)
>>> np.emath.log2(8)
3.0
>>> np.emath.log2([-4, -8, 8])
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log2(x)
def _power_dispatcher(x, p):
return (x, p)
@array_function_dispatch(_power_dispatcher)
def power(x, p):
"""
Return x to the power p, (x**p).
If `x` contains negative values, the output is converted to the
complex domain.
Parameters
----------
x : array_like
The input value(s).
p : array_like of ints
The power(s) to which `x` is raised. If `x` contains multiple values,
`p` has to either be a scalar, or contain the same number of values
as `x`. In the latter case, the result is
``x[0]**p[0], x[1]**p[1], ...``.
Returns
-------
out : ndarray or scalar
The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.power
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.power(2, 2)
4
>>> np.emath.power([2, 4], 2)
array([ 4, 16])
>>> np.emath.power([2, 4], -2)
array([0.25 , 0.0625])
>>> np.emath.power([-2, 4], 2)
array([ 4.-0.j, 16.+0.j])
>>> np.emath.power([2, 4], [2, 4])
array([ 4, 256])
"""
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
@array_function_dispatch(_unary_dispatcher)
def arccos(x):
"""
Compute the inverse cosine of x.
Return the "principal value" (for a description of this, see
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arccos is (are) required.
Returns
-------
out : ndarray or scalar
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arccos
Notes
-----
For an arccos() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # a scalar is returned
0.0
>>> np.emath.arccos([1,2])
array([0.-0.j , 0.-1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
@array_function_dispatch(_unary_dispatcher)
def arcsin(x):
"""
Compute the inverse sine of x.
Return the "principal value" (for a description of this, see
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arcsin is (are) required.
Returns
-------
out : ndarray or scalar
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arcsin
Notes
-----
For an arcsin() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arcsin`.
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.arcsin(0)
0.0
>>> np.emath.arcsin([0,1])
array([0. , 1.5708])
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
@array_function_dispatch(_unary_dispatcher)
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
Return the "principal value" (for a description of this, see
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
complex, the result is complex. Finally, `x = 1` returns``inf`` and
``x=-1`` returns ``-inf``.
Parameters
----------
x : array_like
The value(s) whose arctanh is (are) required.
Returns
-------
out : ndarray or scalar
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
a scalar so is `out`, otherwise an array is returned.
See Also
--------
numpy.arctanh
Notes
-----
For an arctanh() that returns ``NAN`` when real `x` is not in the
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
return +/-inf for ``x = +/-1``).
Examples
--------
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> np.emath.arctanh(0.5)
0.5493061443340549
>>> from numpy.testing import suppress_warnings
>>> with suppress_warnings() as sup:
... sup.filter(RuntimeWarning)
... np.emath.arctanh(np.eye(2))
array([[inf, 0.],
[ 0., inf]])
>>> np.emath.arctanh([1j])
array([0.+0.7854j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)

View File

@ -0,0 +1,94 @@
from typing import overload, Any
from numpy import complexfloating
from numpy._typing import (
NDArray,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ComplexLike_co,
_FloatLike_co,
)
__all__: list[str]
@overload
def sqrt(x: _FloatLike_co) -> Any: ...
@overload
def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def log(x: _FloatLike_co) -> Any: ...
@overload
def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def log10(x: _FloatLike_co) -> Any: ...
@overload
def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def log2(x: _FloatLike_co) -> Any: ...
@overload
def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
@overload
def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
@overload
def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def arccos(x: _FloatLike_co) -> Any: ...
@overload
def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def arcsin(x: _FloatLike_co) -> Any: ...
@overload
def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def arctanh(x: _FloatLike_co) -> Any: ...
@overload
def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
@overload
def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
@overload
def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,204 @@
from collections.abc import Callable, Sequence
from typing import (
TypeVar,
Any,
overload,
SupportsIndex,
Protocol,
ParamSpec,
Concatenate,
)
import numpy as np
from numpy import (
generic,
integer,
ufunc,
unsignedinteger,
signedinteger,
floating,
complexfloating,
object_,
)
from numpy._typing import (
ArrayLike,
NDArray,
_ShapeLike,
_ArrayLike,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeObject_co,
)
from numpy._core.shape_base import vstack
_P = ParamSpec("_P")
_SCT = TypeVar("_SCT", bound=generic)
# Signature of `__array_wrap__`
class _ArrayWrap(Protocol):
def __call__(
self,
array: NDArray[Any],
context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
return_scalar: bool = ...,
/,
) -> Any: ...
class _SupportsArrayWrap(Protocol):
@property
def __array_wrap__(self) -> _ArrayWrap: ...
__all__: list[str]
def take_along_axis(
arr: _SCT | NDArray[_SCT],
indices: NDArray[integer[Any]],
axis: None | int,
) -> NDArray[_SCT]: ...
def put_along_axis(
arr: NDArray[_SCT],
indices: NDArray[integer[Any]],
values: ArrayLike,
axis: None | int,
) -> None: ...
@overload
def apply_along_axis(
func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]],
axis: SupportsIndex,
arr: ArrayLike,
*args: _P.args,
**kwargs: _P.kwargs,
) -> NDArray[_SCT]: ...
@overload
def apply_along_axis(
func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike],
axis: SupportsIndex,
arr: ArrayLike,
*args: _P.args,
**kwargs: _P.kwargs,
) -> NDArray[Any]: ...
def apply_over_axes(
func: Callable[[NDArray[Any], int], NDArray[_SCT]],
a: ArrayLike,
axes: int | Sequence[int],
) -> NDArray[_SCT]: ...
@overload
def expand_dims(
a: _ArrayLike[_SCT],
axis: _ShapeLike,
) -> NDArray[_SCT]: ...
@overload
def expand_dims(
a: ArrayLike,
axis: _ShapeLike,
) -> NDArray[Any]: ...
@overload
def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
@overload
def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
@overload
def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
@overload
def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
@overload
def array_split(
ary: _ArrayLike[_SCT],
indices_or_sections: _ShapeLike,
axis: SupportsIndex = ...,
) -> list[NDArray[_SCT]]: ...
@overload
def array_split(
ary: ArrayLike,
indices_or_sections: _ShapeLike,
axis: SupportsIndex = ...,
) -> list[NDArray[Any]]: ...
@overload
def split(
ary: _ArrayLike[_SCT],
indices_or_sections: _ShapeLike,
axis: SupportsIndex = ...,
) -> list[NDArray[_SCT]]: ...
@overload
def split(
ary: ArrayLike,
indices_or_sections: _ShapeLike,
axis: SupportsIndex = ...,
) -> list[NDArray[Any]]: ...
@overload
def hsplit(
ary: _ArrayLike[_SCT],
indices_or_sections: _ShapeLike,
) -> list[NDArray[_SCT]]: ...
@overload
def hsplit(
ary: ArrayLike,
indices_or_sections: _ShapeLike,
) -> list[NDArray[Any]]: ...
@overload
def vsplit(
ary: _ArrayLike[_SCT],
indices_or_sections: _ShapeLike,
) -> list[NDArray[_SCT]]: ...
@overload
def vsplit(
ary: ArrayLike,
indices_or_sections: _ShapeLike,
) -> list[NDArray[Any]]: ...
@overload
def dsplit(
ary: _ArrayLike[_SCT],
indices_or_sections: _ShapeLike,
) -> list[NDArray[_SCT]]: ...
@overload
def dsplit(
ary: ArrayLike,
indices_or_sections: _ShapeLike,
) -> list[NDArray[Any]]: ...
@overload
def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
@overload
def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
@overload
def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc]
@overload
def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
@overload
def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
@overload
def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
@overload
def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
@overload
def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
@overload
def tile(
A: _ArrayLike[_SCT],
reps: int | Sequence[int],
) -> NDArray[_SCT]: ...
@overload
def tile(
A: ArrayLike,
reps: int | Sequence[int],
) -> NDArray[Any]: ...

View File

@ -0,0 +1,563 @@
"""
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the :ref:`arrays.ndarray`.
Functions
---------
.. autosummary::
:toctree: generated/
"""
import numpy as np
from numpy._core.numeric import normalize_axis_tuple
from numpy._core.overrides import array_function_dispatch, set_module
__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
class DummyArray:
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
@set_module("numpy.lib.stride_tricks")
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
"""
Create a view into the array with the given shape and strides.
.. warning:: This function has to be used with extreme care, see notes.
Parameters
----------
x : ndarray
Array to create a new.
shape : sequence of int, optional
The shape of the new array. Defaults to ``x.shape``.
strides : sequence of int, optional
The strides of the new array. Defaults to ``x.strides``.
subok : bool, optional
.. versionadded:: 1.10
If True, subclasses are preserved.
writeable : bool, optional
.. versionadded:: 1.12
If set to False, the returned array will always be readonly.
Otherwise it will be writable if the original array was. It
is advisable to set this to False if possible (see Notes).
Returns
-------
view : ndarray
See also
--------
broadcast_to : broadcast an array to a given shape.
reshape : reshape an array.
lib.stride_tricks.sliding_window_view :
userfriendly and safe function for a creation of sliding window views.
Notes
-----
``as_strided`` creates a view into the array given the exact strides
and shape. This means it manipulates the internal data structure of
ndarray and, if done incorrectly, the array elements can point to
invalid memory and can corrupt results or crash your program.
It is advisable to always use the original ``x.strides`` when
calculating new strides to avoid reliance on a contiguous memory
layout.
Furthermore, arrays created with this function often contain self
overlapping memory, so that two elements are identical.
Vectorized write operations on such arrays will typically be
unpredictable. They may even give different results for small, large,
or transposed arrays.
Since writing to these arrays has to be tested and done with great
care, you may want to use ``writeable=False`` to avoid accidental write
operations.
For these reasons it is advisable to avoid ``as_strided`` when
possible.
"""
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=None, subok=subok)
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# The route via `__interface__` does not preserve structured
# dtypes. Since dtype should remain unchanged, we set it explicitly.
array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
if view.flags.writeable and not writeable:
view.flags.writeable = False
return view
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
subok=None, writeable=None):
return (x,)
@array_function_dispatch(
_sliding_window_view_dispatcher, module="numpy.lib.stride_tricks"
)
def sliding_window_view(x, window_shape, axis=None, *,
subok=False, writeable=False):
"""
Create a sliding window view into the array with the given window shape.
Also known as rolling or moving window, the window slides across all
dimensions of the array and extracts subsets of the array at all window
positions.
.. versionadded:: 1.20.0
Parameters
----------
x : array_like
Array to create the sliding window view from.
window_shape : int or tuple of int
Size of window over each axis that takes part in the sliding window.
If `axis` is not present, must have same length as the number of input
array dimensions. Single integers `i` are treated as if they were the
tuple `(i,)`.
axis : int or tuple of int, optional
Axis or axes along which the sliding window is applied.
By default, the sliding window is applied to all axes and
`window_shape[i]` will refer to axis `i` of `x`.
If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
the axis `axis[i]` of `x`.
Single integers `i` are treated as if they were the tuple `(i,)`.
subok : bool, optional
If True, sub-classes will be passed-through, otherwise the returned
array will be forced to be a base-class array (default).
writeable : bool, optional
When true, allow writing to the returned view. The default is false,
as this should be used with caution: the returned view contains the
same memory location multiple times, so writing to one location will
cause others to change.
Returns
-------
view : ndarray
Sliding window view of the array. The sliding window dimensions are
inserted at the end, and the original dimensions are trimmed as
required by the size of the sliding window.
That is, ``view.shape = x_shape_trimmed + window_shape``, where
``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
than the corresponding window size.
See Also
--------
lib.stride_tricks.as_strided: A lower-level and less safe routine for
creating arbitrary views from custom shape and strides.
broadcast_to: broadcast an array to a given shape.
Notes
-----
For many applications using a sliding window view can be convenient, but
potentially very slow. Often specialized solutions exist, for example:
- `scipy.signal.fftconvolve`
- filtering functions in `scipy.ndimage`
- moving window functions provided by
`bottleneck <https://github.com/pydata/bottleneck>`_.
As a rough estimate, a sliding window approach with an input size of `N`
and a window size of `W` will scale as `O(N*W)` where frequently a special
algorithm can achieve `O(N)`. That means that the sliding window variant
for a window size of 100 can be a 100 times slower than a more specialized
version.
Nevertheless, for small window sizes, when no custom algorithm exists, or
as a prototyping and developing tool, this function can be a good solution.
Examples
--------
>>> import numpy as np
>>> from numpy.lib.stride_tricks import sliding_window_view
>>> x = np.arange(6)
>>> x.shape
(6,)
>>> v = sliding_window_view(x, 3)
>>> v.shape
(4, 3)
>>> v
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
This also works in more dimensions, e.g.
>>> i, j = np.ogrid[:3, :4]
>>> x = 10*i + j
>>> x.shape
(3, 4)
>>> x
array([[ 0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]])
>>> shape = (2,2)
>>> v = sliding_window_view(x, shape)
>>> v.shape
(2, 3, 2, 2)
>>> v
array([[[[ 0, 1],
[10, 11]],
[[ 1, 2],
[11, 12]],
[[ 2, 3],
[12, 13]]],
[[[10, 11],
[20, 21]],
[[11, 12],
[21, 22]],
[[12, 13],
[22, 23]]]])
The axis can be specified explicitly:
>>> v = sliding_window_view(x, 3, 0)
>>> v.shape
(1, 4, 3)
>>> v
array([[[ 0, 10, 20],
[ 1, 11, 21],
[ 2, 12, 22],
[ 3, 13, 23]]])
The same axis can be used several times. In that case, every use reduces
the corresponding original dimension:
>>> v = sliding_window_view(x, (2, 3), (1, 1))
>>> v.shape
(3, 1, 2, 3)
>>> v
array([[[[ 0, 1, 2],
[ 1, 2, 3]]],
[[[10, 11, 12],
[11, 12, 13]]],
[[[20, 21, 22],
[21, 22, 23]]]])
Combining with stepped slicing (`::step`), this can be used to take sliding
views which skip elements:
>>> x = np.arange(7)
>>> sliding_window_view(x, 5)[:, ::2]
array([[0, 2, 4],
[1, 3, 5],
[2, 4, 6]])
or views which move by multiple elements
>>> x = np.arange(7)
>>> sliding_window_view(x, 3)[::2, :]
array([[0, 1, 2],
[2, 3, 4],
[4, 5, 6]])
A common application of `sliding_window_view` is the calculation of running
statistics. The simplest example is the
`moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
>>> x = np.arange(6)
>>> x.shape
(6,)
>>> v = sliding_window_view(x, 3)
>>> v.shape
(4, 3)
>>> v
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
>>> moving_average = v.mean(axis=-1)
>>> moving_average
array([1., 2., 3., 4.])
Note that a sliding window approach is often **not** optimal (see Notes).
"""
window_shape = (tuple(window_shape)
if np.iterable(window_shape)
else (window_shape,))
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=None, subok=subok)
window_shape_array = np.array(window_shape)
if np.any(window_shape_array < 0):
raise ValueError('`window_shape` cannot contain negative values')
if axis is None:
axis = tuple(range(x.ndim))
if len(window_shape) != len(axis):
raise ValueError(f'Since axis is `None`, must provide '
f'window_shape for all dimensions of `x`; '
f'got {len(window_shape)} window_shape elements '
f'and `x.ndim` is {x.ndim}.')
else:
axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
if len(window_shape) != len(axis):
raise ValueError(f'Must provide matching length window_shape and '
f'axis; got {len(window_shape)} window_shape '
f'elements and {len(axis)} axes elements.')
out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
# note: same axis can be windowed repeatedly
x_shape_trimmed = list(x.shape)
for ax, dim in zip(axis, window_shape):
if x_shape_trimmed[ax] < dim:
raise ValueError(
'window shape cannot be larger than input array shape')
x_shape_trimmed[ax] -= dim - 1
out_shape = tuple(x_shape_trimmed) + window_shape
return as_strided(x, strides=out_strides, shape=out_shape,
subok=subok, writeable=writeable)
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=None, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
extras = []
it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
op_flags=['readonly'], itershape=shape, order='C')
with it:
# never really has writebackifcopy semantics
broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
# In a future version this will go away
if not readonly and array.flags._writeable_no_warn:
result.flags.writeable = True
result.flags._warn_on_write = True
return result
def _broadcast_to_dispatcher(array, shape, subok=None):
return (array,)
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple or int
The shape of the desired array. A single integer ``i`` is interpreted
as ``(i,)``.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_shapes
Notes
-----
.. versionadded:: 1.10.0
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
# unfortunately, it cannot handle 32 or more arguments directly
for pos in range(32, len(args), 31):
# ironically, np.broadcast does not properly handle np.broadcast
# objects (it treats them as scalars)
# use broadcasting to avoid allocating the full array
b = broadcast_to(0, b.shape)
b = np.broadcast(b, *args[pos:(pos + 31)])
return b.shape
_size0_dtype = np.dtype([])
@set_module('numpy')
def broadcast_shapes(*args):
"""
Broadcast the input shapes into a single shape.
:ref:`Learn more about broadcasting here <basics.broadcasting>`.
.. versionadded:: 1.20.0
Parameters
----------
*args : tuples of ints, or ints
The shapes to be broadcast against each other.
Returns
-------
tuple
Broadcasted shape.
Raises
------
ValueError
If the shapes are not compatible and cannot be broadcast according
to NumPy's broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_to
Examples
--------
>>> import numpy as np
>>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
(3, 2)
>>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
(5, 6, 7)
"""
arrays = [np.empty(x, dtype=_size0_dtype) for x in args]
return _broadcast_shape(*arrays)
def _broadcast_arrays_dispatcher(*args, subok=None):
return args
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, subok=False):
"""
Broadcast any number of arrays against each other.
Parameters
----------
*args : array_likes
The arrays to broadcast.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned arrays will be forced to be a base-class array (default).
Returns
-------
broadcasted : tuple of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you need
to write to the arrays, make copies first. While you can set the
``writable`` flag True, writing to a single output value may end up
changing more than one location in the output array.
.. deprecated:: 1.17
The output is currently marked so that if written to, a deprecation
warning will be emitted. A future version will set the
``writable`` flag False so writing to it will raise an error.
See Also
--------
broadcast
broadcast_to
broadcast_shapes
Examples
--------
>>> import numpy as np
>>> x = np.array([[1,2,3]])
>>> y = np.array([[4],[5]])
>>> np.broadcast_arrays(x, y)
(array([[1, 2, 3],
[1, 2, 3]]),
array([[4, 4, 4],
[5, 5, 5]]))
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3]]),
array([[4, 4, 4],
[5, 5, 5]])]
"""
# nditer is not used here to avoid the limit of 32 arrays.
# Otherwise, something like the following one-liner would suffice:
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
# order='C').itviews
args = [np.array(_m, copy=None, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
result = [array if array.shape == shape
else _broadcast_to(array, shape, subok=subok, readonly=False)
for array in args]
return tuple(result)

View File

@ -0,0 +1,80 @@
from collections.abc import Iterable
from typing import Any, TypeVar, overload, SupportsIndex
from numpy import generic
from numpy._typing import (
NDArray,
ArrayLike,
_ShapeLike,
_Shape,
_ArrayLike
)
_SCT = TypeVar("_SCT", bound=generic)
__all__: list[str]
class DummyArray:
__array_interface__: dict[str, Any]
base: None | NDArray[Any]
def __init__(
self,
interface: dict[str, Any],
base: None | NDArray[Any] = ...,
) -> None: ...
@overload
def as_strided(
x: _ArrayLike[_SCT],
shape: None | Iterable[int] = ...,
strides: None | Iterable[int] = ...,
subok: bool = ...,
writeable: bool = ...,
) -> NDArray[_SCT]: ...
@overload
def as_strided(
x: ArrayLike,
shape: None | Iterable[int] = ...,
strides: None | Iterable[int] = ...,
subok: bool = ...,
writeable: bool = ...,
) -> NDArray[Any]: ...
@overload
def sliding_window_view(
x: _ArrayLike[_SCT],
window_shape: int | Iterable[int],
axis: None | SupportsIndex = ...,
*,
subok: bool = ...,
writeable: bool = ...,
) -> NDArray[_SCT]: ...
@overload
def sliding_window_view(
x: ArrayLike,
window_shape: int | Iterable[int],
axis: None | SupportsIndex = ...,
*,
subok: bool = ...,
writeable: bool = ...,
) -> NDArray[Any]: ...
@overload
def broadcast_to(
array: _ArrayLike[_SCT],
shape: int | Iterable[int],
subok: bool = ...,
) -> NDArray[_SCT]: ...
@overload
def broadcast_to(
array: ArrayLike,
shape: int | Iterable[int],
subok: bool = ...,
) -> NDArray[Any]: ...
def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
def broadcast_arrays(
*args: ArrayLike,
subok: bool = ...,
) -> tuple[NDArray[Any], ...]: ...

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,422 @@
import builtins
from collections.abc import Callable, Sequence
from typing import (
Any,
TypeAlias,
overload,
TypeVar,
Literal as L,
)
import numpy as np
from numpy import (
generic,
number,
timedelta64,
datetime64,
int_,
intp,
float64,
complex128,
signedinteger,
floating,
complexfloating,
object_,
_OrderCF,
)
from numpy._typing import (
DTypeLike,
_DTypeLike,
ArrayLike,
_ArrayLike,
NDArray,
_SupportsArray,
_SupportsArrayFunc,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeObject_co,
)
_T = TypeVar("_T")
_SCT = TypeVar("_SCT", bound=generic)
# The returned arrays dtype must be compatible with `np.equal`
_MaskFunc = Callable[
[NDArray[int_], _T],
NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_],
]
__all__: list[str]
@overload
def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
@overload
def fliplr(m: ArrayLike) -> NDArray[Any]: ...
@overload
def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
@overload
def flipud(m: ArrayLike) -> NDArray[Any]: ...
@overload
def eye(
N: int,
M: None | int = ...,
k: int = ...,
dtype: None = ...,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def eye(
N: int,
M: None | int = ...,
k: int = ...,
dtype: _DTypeLike[_SCT] = ...,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def eye(
N: int,
M: None | int = ...,
k: int = ...,
dtype: DTypeLike = ...,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
@overload
def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
@overload
def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
@overload
def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
@overload
def tri(
N: int,
M: None | int = ...,
k: int = ...,
dtype: None = ...,
*,
like: None | _SupportsArrayFunc = ...
) -> NDArray[float64]: ...
@overload
def tri(
N: int,
M: None | int = ...,
k: int = ...,
dtype: _DTypeLike[_SCT] = ...,
*,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
@overload
def tri(
N: int,
M: None | int = ...,
k: int = ...,
dtype: DTypeLike = ...,
*,
like: None | _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
@overload
def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
@overload
def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
@overload
def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
@overload
def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
@overload
def vander( # type: ignore[misc]
x: _ArrayLikeInt_co,
N: None | int = ...,
increasing: bool = ...,
) -> NDArray[signedinteger[Any]]: ...
@overload
def vander( # type: ignore[misc]
x: _ArrayLikeFloat_co,
N: None | int = ...,
increasing: bool = ...,
) -> NDArray[floating[Any]]: ...
@overload
def vander(
x: _ArrayLikeComplex_co,
N: None | int = ...,
increasing: bool = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def vander(
x: _ArrayLikeObject_co,
N: None | int = ...,
increasing: bool = ...,
) -> NDArray[object_]: ...
_Int_co: TypeAlias = np.integer[Any] | np.bool
_Float_co: TypeAlias = np.floating[Any] | _Int_co
_Number_co: TypeAlias = np.number[Any] | np.bool
_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT]
_ArrayLike2D: TypeAlias = (
_SupportsArray[np.dtype[_SCT]]
| Sequence[_ArrayLike1D[_SCT]]
)
_ArrayLike1DInt_co = (
_SupportsArray[np.dtype[_Int_co]]
| Sequence[int | _Int_co]
)
_ArrayLike1DFloat_co = (
_SupportsArray[np.dtype[_Float_co]]
| Sequence[float | int | _Float_co]
)
_ArrayLike2DFloat_co = (
_SupportsArray[np.dtype[_Float_co]]
| Sequence[_ArrayLike1DFloat_co]
)
_ArrayLike1DNumber_co = (
_SupportsArray[np.dtype[_Number_co]]
| Sequence[int | float | complex | _Number_co]
)
_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any])
_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any])
_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co)
@overload
def histogram2d(
x: _ArrayLike1D[_SCT_complex],
y: _ArrayLike1D[_SCT_complex | _Float_co],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_complex],
NDArray[_SCT_complex],
]: ...
@overload
def histogram2d(
x: _ArrayLike1D[_SCT_complex | _Float_co],
y: _ArrayLike1D[_SCT_complex],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_complex],
NDArray[_SCT_complex],
]: ...
@overload
def histogram2d(
x: _ArrayLike1D[_SCT_inexact],
y: _ArrayLike1D[_SCT_inexact | _Int_co],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_inexact],
NDArray[_SCT_inexact],
]: ...
@overload
def histogram2d(
x: _ArrayLike1D[_SCT_inexact | _Int_co],
y: _ArrayLike1D[_SCT_inexact],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_inexact],
NDArray[_SCT_inexact],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DInt_co | Sequence[float | int],
y: _ArrayLike1DInt_co | Sequence[float | int],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[float64],
NDArray[float64],
]: ...
@overload
def histogram2d(
x: Sequence[complex | float | int],
y: Sequence[complex | float | int],
bins: int | Sequence[int] = ...,
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[complex128 | float64],
NDArray[complex128 | float64],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DNumber_co,
y: _ArrayLike1DNumber_co,
bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_number_co],
NDArray[_SCT_number_co],
]: ...
@overload
def histogram2d(
x: _ArrayLike1D[_SCT_inexact],
y: _ArrayLike1D[_SCT_inexact],
bins: Sequence[_ArrayLike1D[_SCT_number_co] | int],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_number_co | _SCT_inexact],
NDArray[_SCT_number_co | _SCT_inexact],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DInt_co | Sequence[float | int],
y: _ArrayLike1DInt_co | Sequence[float | int],
bins: Sequence[_ArrayLike1D[_SCT_number_co] | int],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_number_co | float64],
NDArray[_SCT_number_co | float64],
]: ...
@overload
def histogram2d(
x: Sequence[complex | float | int],
y: Sequence[complex | float | int],
bins: Sequence[_ArrayLike1D[_SCT_number_co] | int],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[_SCT_number_co | complex128 | float64],
NDArray[_SCT_number_co | complex128 | float64] ,
]: ...
@overload
def histogram2d(
x: _ArrayLike1DNumber_co,
y: _ArrayLike1DNumber_co,
bins: Sequence[Sequence[bool]],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[np.bool],
NDArray[np.bool],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DNumber_co,
y: _ArrayLike1DNumber_co,
bins: Sequence[Sequence[int | bool]],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[np.int_ | np.bool],
NDArray[np.int_ | np.bool],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DNumber_co,
y: _ArrayLike1DNumber_co,
bins: Sequence[Sequence[float | int | bool]],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[np.float64 | np.int_ | np.bool],
NDArray[np.float64 | np.int_ | np.bool],
]: ...
@overload
def histogram2d(
x: _ArrayLike1DNumber_co,
y: _ArrayLike1DNumber_co,
bins: Sequence[Sequence[complex | float | int | bool]],
range: None | _ArrayLike2DFloat_co = ...,
density: None | bool = ...,
weights: None | _ArrayLike1DFloat_co = ...,
) -> tuple[
NDArray[float64],
NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
NDArray[np.complex128 | np.float64 | np.int_ | np.bool],
]: ...
# NOTE: we're assuming/demanding here the `mask_func` returns
# an ndarray of shape `(n, n)`; otherwise there is the possibility
# of the output tuple having more or less than 2 elements
@overload
def mask_indices(
n: int,
mask_func: _MaskFunc[int],
k: int = ...,
) -> tuple[NDArray[intp], NDArray[intp]]: ...
@overload
def mask_indices(
n: int,
mask_func: _MaskFunc[_T],
k: _T,
) -> tuple[NDArray[intp], NDArray[intp]]: ...
def tril_indices(
n: int,
k: int = ...,
m: None | int = ...,
) -> tuple[NDArray[int_], NDArray[int_]]: ...
def tril_indices_from(
arr: NDArray[Any],
k: int = ...,
) -> tuple[NDArray[int_], NDArray[int_]]: ...
def triu_indices(
n: int,
k: int = ...,
m: None | int = ...,
) -> tuple[NDArray[int_], NDArray[int_]]: ...
def triu_indices_from(
arr: NDArray[Any],
k: int = ...,
) -> tuple[NDArray[int_], NDArray[int_]]: ...

View File

@ -0,0 +1,709 @@
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
"""
import functools
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
'typename', 'mintypecode',
'common_type']
from .._utils import set_module
import numpy._core.numeric as _nx
from numpy._core.numeric import asarray, asanyarray, isnan, zeros
from numpy._core import overrides, getlimits
from ._ufunclike_impl import isneginf, isposinf
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
@set_module('numpy')
def mintypecode(typechars, typeset='GDFgdf', default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars)
intersection = set(t for t in typecodes if t in typeset)
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
return min(intersection, key=_typecodes_by_elsize.index)
def _real_dispatcher(val):
return (val,)
@array_function_dispatch(_real_dispatcher)
def real(val):
"""
Return the real part of the complex argument.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray or scalar
The real component of the complex argument. If `val` is real, the type
of `val` is used for the output. If `val` has complex elements, the
returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> import numpy as np
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([1., 3., 5.])
>>> a.real = 9
>>> a
array([9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
array([9.+2.j, 8.+4.j, 7.+6.j])
>>> np.real(1 + 1j)
1.0
"""
try:
return val.real
except AttributeError:
return asanyarray(val).real
def _imag_dispatcher(val):
return (val,)
@array_function_dispatch(_imag_dispatcher)
def imag(val):
"""
Return the imaginary part of the complex argument.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray or scalar
The imaginary component of the complex argument. If `val` is real,
the type of `val` is used for the output. If `val` has complex
elements, the returned type is float.
See Also
--------
real, angle, real_if_close
Examples
--------
>>> import numpy as np
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
array([2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
array([1. +8.j, 3.+10.j, 5.+12.j])
>>> np.imag(1 + 1j)
1.0
"""
try:
return val.imag
except AttributeError:
return asanyarray(val).imag
def _is_type_dispatcher(x):
return (x,)
@array_function_dispatch(_is_type_dispatcher)
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> import numpy as np
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True])
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return res[()] # convert to scalar if needed
@array_function_dispatch(_is_type_dispatcher)
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If element has complex type with zero imaginary part, the return value
for that element is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
Notes
-----
`isreal` may behave unexpectedly for string or object arrays (see examples)
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> import numpy as np
>>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
>>> np.isreal(a)
array([False, True, True, True, True, False])
The function does not work on string arrays.
>>> a = np.array([2j, "a"], dtype="U")
>>> np.isreal(a) # Warns about non-elementwise comparison
False
Returns True for all elements in input array of ``dtype=object`` even if
any of the elements is complex.
>>> a = np.array([1, "2", 3+4j], dtype=object)
>>> np.isreal(a)
array([ True, True, True])
isreal should not be used with object arrays
>>> a = np.array([1+2j, 2+1j], dtype=object)
>>> np.isreal(a)
array([ True, True])
"""
return imag(x) == 0
@array_function_dispatch(_is_type_dispatcher)
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
The type of the input is checked, not the value. Even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
iscomplexobj : bool
The return value, True if `x` is of a complex type or has at least
one complex element.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> import numpy as np
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
>>> np.iscomplexobj([3, 1+0j, True])
True
"""
try:
dtype = x.dtype
type_ = dtype.type
except AttributeError:
type_ = asarray(x).dtype.type
return issubclass(type_, _nx.complexfloating)
@array_function_dispatch(_is_type_dispatcher)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Notes
-----
The function is only meant for arrays with numerical values but it
accepts all other objects. Since it assumes array input, the return
value of other objects may be True.
>>> np.isrealobj('A string')
True
>>> np.isrealobj(False)
True
>>> np.isrealobj(None)
True
Examples
--------
>>> import numpy as np
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not iscomplexobj(x)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
from numpy._core import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
return (x,)
@array_function_dispatch(_nan_to_num_dispatcher)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar or array_like
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
.. versionadded:: 1.13
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
.. versionadded:: 1.17
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
.. versionadded:: 1.17
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.17
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
See Also
--------
isinf : Shows which elements are positive or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> import numpy as np
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
-1.2800000e+02, 1.2800000e+02])
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(y)
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
isscalar = (x.ndim == 0)
if not issubclass(xtype, _nx.inexact):
return x[()] if isscalar else x
iscomplex = issubclass(xtype, _nx.complexfloating)
dest = (x.real, x.imag) if iscomplex else (x,)
maxf, minf = _getmaxmin(x.real.dtype)
if posinf is not None:
maxf = posinf
if neginf is not None:
minf = neginf
for d in dest:
idx_nan = isnan(d)
idx_posinf = isposinf(d)
idx_neginf = isneginf(d)
_nx.copyto(d, nan, where=idx_nan)
_nx.copyto(d, maxf, where=idx_posinf)
_nx.copyto(d, minf, where=idx_neginf)
return x[()] if isscalar else x
#-----------------------------------------------------------------------------
def _real_if_close_dispatcher(a, tol=None):
return (a,)
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
If input is complex with all imaginary parts close to zero, return
real parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array. If the tolerance is <=1, then the absolute tolerance
is used.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> import numpy as np
>>> np.finfo(float).eps
2.2204460492503131e-16 # may vary
>>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
array([2.1, 5.2])
>>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
array([2.1+4.e-13j, 5.2 + 3e-15j])
"""
a = asanyarray(a)
type_ = a.dtype.type
if not issubclass(type_, _nx.complexfloating):
return a
if tol > 1:
f = getlimits.finfo(type_)
tol = f.eps * tol
if _nx.all(_nx.absolute(a.imag) < tol):
a = a.real
return a
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
'?': 'bool',
'b': 'signed char',
'B': 'unsigned char',
'h': 'short',
'H': 'unsigned short',
'i': 'integer',
'I': 'unsigned integer',
'l': 'long integer',
'L': 'unsigned long integer',
'q': 'long long integer',
'Q': 'unsigned long long integer',
'f': 'single precision',
'd': 'double precision',
'g': 'long precision',
'F': 'complex single precision',
'D': 'complex double precision',
'G': 'complex long double precision',
'S': 'string',
'U': 'unicode',
'V': 'void',
'O': 'object'
}
@set_module('numpy')
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print(typechar, ' : ', np.typename(typechar))
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble],
[None, _nx.complex64, _nx.complex128, _nx.clongdouble]]
array_precision = {_nx.float16: 0,
_nx.float32: 1,
_nx.float64: 2,
_nx.longdouble: 3,
_nx.complex64: 1,
_nx.complex128: 2,
_nx.clongdouble: 3}
def _common_type_dispatcher(*arrays):
return arrays
@array_function_dispatch(_common_type_dispatcher)
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<class 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<class 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<class 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 2 # array_precision[_nx.double]
else:
p = array_precision.get(t)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]

View File

@ -0,0 +1,204 @@
from collections.abc import Container, Iterable
from typing import (
Literal as L,
Any,
overload,
TypeVar,
Protocol,
)
import numpy as np
from numpy import (
dtype,
generic,
floating,
float64,
complexfloating,
integer,
)
from numpy._typing import (
ArrayLike,
DTypeLike,
NBitBase,
NDArray,
_64Bit,
_SupportsDType,
_ScalarLike_co,
_ArrayLike,
_DTypeLikeComplex,
)
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
class _SupportsReal(Protocol[_T_co]):
@property
def real(self) -> _T_co: ...
class _SupportsImag(Protocol[_T_co]):
@property
def imag(self) -> _T_co: ...
__all__: list[str]
def mintypecode(
typechars: Iterable[str | ArrayLike],
typeset: Container[str] = ...,
default: str = ...,
) -> str: ...
@overload
def real(val: _SupportsReal[_T]) -> _T: ...
@overload
def real(val: ArrayLike) -> NDArray[Any]: ...
@overload
def imag(val: _SupportsImag[_T]) -> _T: ...
@overload
def imag(val: ArrayLike) -> NDArray[Any]: ...
@overload
def iscomplex(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc]
@overload
def iscomplex(x: ArrayLike) -> NDArray[np.bool]: ...
@overload
def isreal(x: _ScalarLike_co) -> np.bool: ... # type: ignore[misc]
@overload
def isreal(x: ArrayLike) -> NDArray[np.bool]: ...
def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
@overload
def nan_to_num( # type: ignore[misc]
x: _SCT,
copy: bool = ...,
nan: float = ...,
posinf: None | float = ...,
neginf: None | float = ...,
) -> _SCT: ...
@overload
def nan_to_num(
x: _ScalarLike_co,
copy: bool = ...,
nan: float = ...,
posinf: None | float = ...,
neginf: None | float = ...,
) -> Any: ...
@overload
def nan_to_num(
x: _ArrayLike[_SCT],
copy: bool = ...,
nan: float = ...,
posinf: None | float = ...,
neginf: None | float = ...,
) -> NDArray[_SCT]: ...
@overload
def nan_to_num(
x: ArrayLike,
copy: bool = ...,
nan: float = ...,
posinf: None | float = ...,
neginf: None | float = ...,
) -> NDArray[Any]: ...
# If one passes a complex array to `real_if_close`, then one is reasonably
# expected to verify the output dtype (so we can return an unsafe union here)
@overload
def real_if_close( # type: ignore[misc]
a: _ArrayLike[complexfloating[_NBit1, _NBit1]],
tol: float = ...,
) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ...
@overload
def real_if_close(
a: _ArrayLike[_SCT],
tol: float = ...,
) -> NDArray[_SCT]: ...
@overload
def real_if_close(
a: ArrayLike,
tol: float = ...,
) -> NDArray[Any]: ...
@overload
def typename(char: L['S1']) -> L['character']: ...
@overload
def typename(char: L['?']) -> L['bool']: ...
@overload
def typename(char: L['b']) -> L['signed char']: ...
@overload
def typename(char: L['B']) -> L['unsigned char']: ...
@overload
def typename(char: L['h']) -> L['short']: ...
@overload
def typename(char: L['H']) -> L['unsigned short']: ...
@overload
def typename(char: L['i']) -> L['integer']: ...
@overload
def typename(char: L['I']) -> L['unsigned integer']: ...
@overload
def typename(char: L['l']) -> L['long integer']: ...
@overload
def typename(char: L['L']) -> L['unsigned long integer']: ...
@overload
def typename(char: L['q']) -> L['long long integer']: ...
@overload
def typename(char: L['Q']) -> L['unsigned long long integer']: ...
@overload
def typename(char: L['f']) -> L['single precision']: ...
@overload
def typename(char: L['d']) -> L['double precision']: ...
@overload
def typename(char: L['g']) -> L['long precision']: ...
@overload
def typename(char: L['F']) -> L['complex single precision']: ...
@overload
def typename(char: L['D']) -> L['complex double precision']: ...
@overload
def typename(char: L['G']) -> L['complex long double precision']: ...
@overload
def typename(char: L['S']) -> L['string']: ...
@overload
def typename(char: L['U']) -> L['unicode']: ...
@overload
def typename(char: L['V']) -> L['void']: ...
@overload
def typename(char: L['O']) -> L['object']: ...
@overload
def common_type( # type: ignore[misc]
*arrays: _SupportsDType[dtype[
integer[Any]
]]
) -> type[floating[_64Bit]]: ...
@overload
def common_type( # type: ignore[misc]
*arrays: _SupportsDType[dtype[
floating[_NBit1]
]]
) -> type[floating[_NBit1]]: ...
@overload
def common_type( # type: ignore[misc]
*arrays: _SupportsDType[dtype[
integer[Any] | floating[_NBit1]
]]
) -> type[floating[_NBit1 | _64Bit]]: ...
@overload
def common_type( # type: ignore[misc]
*arrays: _SupportsDType[dtype[
floating[_NBit1] | complexfloating[_NBit2, _NBit2]
]]
) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ...
@overload
def common_type(
*arrays: _SupportsDType[dtype[
integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]
]]
) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ...

View File

@ -0,0 +1,209 @@
"""
Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
"""
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy._core.numeric as nx
from numpy._core.overrides import array_function_dispatch
import warnings
import functools
def _dispatcher(x, out=None):
return (x, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
def fix(x, out=None):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values have the same data-type as the input.
Parameters
----------
x : array_like
An array to be rounded
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the input broadcasts to. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
out : ndarray of floats
An array with the same dimensions and data-type as the input.
If second argument is not supplied then a new array is returned
with the rounded values.
If a second argument is supplied the result is stored there.
The return value ``out`` is then a reference to that array.
See Also
--------
rint, trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> import numpy as np
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
"""
# promote back to an array if flattened
res = nx.asanyarray(nx.ceil(x, out=out))
res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
# when no out argument is passed and no subclasses are involved, flatten
# scalars
if out is None and type(res) is nx.ndarray:
res = res[()]
return res
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
out : array_like, optional
A location into which the result is stored. If provided, it must have a
shape that the input broadcasts to. If not provided or None, a
freshly-allocated boolean array is returned.
Returns
-------
out : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a boolean array is returned
with values True where the corresponding element of the input is
positive infinity and values False where the element of the input is
not positive infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as zeros
and ones, if the type is boolean then as False and True.
The return value `out` is then a reference to that array.
See Also
--------
isinf, isneginf, isfinite, isnan
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
input, if first and second arguments have different shapes, or if the
first argument has complex values
Examples
--------
>>> import numpy as np
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf([-np.inf, 0., np.inf])
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isposinf(x, y)
array([0, 0, 1])
>>> y
array([0, 0, 1])
"""
is_inf = nx.isinf(x)
try:
signbit = ~nx.signbit(x)
except TypeError as e:
dtype = nx.asanyarray(x).dtype
raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)
@array_function_dispatch(_dispatcher, verify=False, module='numpy')
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
out : array_like, optional
A location into which the result is stored. If provided, it must have a
shape that the input broadcasts to. If not provided or None, a
freshly-allocated boolean array is returned.
Returns
-------
out : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a numpy boolean array is
returned with values True where the corresponding element of the
input is negative infinity and values False where the element of
the input is not negative infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as
zeros and ones, if the type is boolean then as False and True. The
return value `out` is then a reference to that array.
See Also
--------
isinf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
input, if first and second arguments have different shapes, or if the
first argument has complex values.
Examples
--------
>>> import numpy as np
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf([-np.inf, 0., np.inf])
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isneginf(x, y)
array([1, 0, 0])
>>> y
array([1, 0, 0])
"""
is_inf = nx.isinf(x)
try:
signbit = nx.signbit(x)
except TypeError as e:
dtype = nx.asanyarray(x).dtype
raise TypeError(f'This operation is not supported for {dtype} values '
'because it would be ambiguous.') from e
else:
return nx.logical_and(is_inf, signbit, out)

View File

@ -0,0 +1,67 @@
from typing import Any, overload, TypeVar
import numpy as np
from numpy import floating, object_
from numpy._typing import (
NDArray,
_FloatLike_co,
_ArrayLikeFloat_co,
_ArrayLikeObject_co,
)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
__all__: list[str]
@overload
def fix( # type: ignore[misc]
x: _FloatLike_co,
out: None = ...,
) -> floating[Any]: ...
@overload
def fix(
x: _ArrayLikeFloat_co,
out: None = ...,
) -> NDArray[floating[Any]]: ...
@overload
def fix(
x: _ArrayLikeObject_co,
out: None = ...,
) -> NDArray[object_]: ...
@overload
def fix(
x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
out: _ArrayType,
) -> _ArrayType: ...
@overload
def isposinf( # type: ignore[misc]
x: _FloatLike_co,
out: None = ...,
) -> np.bool: ...
@overload
def isposinf(
x: _ArrayLikeFloat_co,
out: None = ...,
) -> NDArray[np.bool]: ...
@overload
def isposinf(
x: _ArrayLikeFloat_co,
out: _ArrayType,
) -> _ArrayType: ...
@overload
def isneginf( # type: ignore[misc]
x: _FloatLike_co,
out: None = ...,
) -> np.bool: ...
@overload
def isneginf(
x: _ArrayLikeFloat_co,
out: None = ...,
) -> NDArray[np.bool]: ...
@overload
def isneginf(
x: _ArrayLikeFloat_co,
out: _ArrayType,
) -> _ArrayType: ...

View File

@ -0,0 +1,289 @@
"""
Container class for backward compatibility with NumArray.
The user_array.container class exists for backward compatibility with NumArray
and is not meant to be used in new code. If you need to create an array
container class, we recommend either creating a class that wraps an ndarray
or subclasses ndarray.
"""
from numpy._core import (
array, asarray, absolute, add, subtract, multiply, divide,
remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
greater_equal, shape, reshape, arange, sin, sqrt, transpose
)
class container:
"""
container(data, dtype=None, copy=True)
Standard container-class for easy multiple-inheritance.
Methods
-------
copy
tostring
byteswap
astype
"""
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if self.ndim > 0:
return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
return self.__class__.__name__ + "(" + repr(self.array) + ")"
def __array__(self, t=None):
if t:
return self.array.astype(t)
return self.array
# Array as sequence
def __len__(self):
return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
return self._rc(self.array + asarray(other))
__radd__ = __add__
def __iadd__(self, other):
add(self.array, other, self.array)
return self
def __sub__(self, other):
return self._rc(self.array - asarray(other))
def __rsub__(self, other):
return self._rc(asarray(other) - self.array)
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
__rmul__ = __mul__
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
def __invert__(self):
return self._rc(invert(self.array))
def _scalarfunc(self, func):
if self.ndim == 0:
return func(self[0])
else:
raise TypeError(
"only rank-0 arrays can be converted to Python scalars.")
def __complex__(self):
return self._scalarfunc(complex)
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
def __hex__(self):
return self._scalarfunc(hex)
def __oct__(self):
return self._scalarfunc(oct)
def __lt__(self, other):
return self._rc(less(self.array, other))
def __le__(self, other):
return self._rc(less_equal(self.array, other))
def __eq__(self, other):
return self._rc(equal(self.array, other))
def __ne__(self, other):
return self._rc(not_equal(self.array, other))
def __gt__(self, other):
return self._rc(greater(self.array, other))
def __ge__(self, other):
return self._rc(greater_equal(self.array, other))
def copy(self):
""
return self._rc(self.array.copy())
def tostring(self):
""
return self.array.tostring()
def tobytes(self):
""
return self.array.tobytes()
def byteswap(self):
""
return self._rc(self.array.byteswap())
def astype(self, typecode):
""
return self._rc(self.array.astype(typecode))
def _rc(self, a):
if len(shape(a)) == 0:
return a
else:
return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
def __setattr__(self, attr, value):
if attr == 'array':
object.__setattr__(self, attr, value)
return
try:
self.array.__setattr__(attr, value)
except AttributeError:
object.__setattr__(self, attr, value)
# Only called after other approaches fail.
def __getattr__(self, attr):
if (attr == 'array'):
return object.__getattribute__(self, attr)
return self.array.__getattribute__(attr)
#############################################################
# Test of class container
#############################################################
if __name__ == '__main__':
temp = reshape(arange(10000), (100, 100))
ua = container(temp)
# new object created begin test
print(dir(ua))
print(shape(ua), ua.shape) # I have changed Numeric.py
ua_small = ua[:3, :5]
print(ua_small)
# this did not change ua[0,0], which is not normal behavior
ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))

View File

@ -0,0 +1,776 @@
import os
import sys
import textwrap
import types
import re
import warnings
import functools
import platform
from numpy._core import ndarray
from numpy._utils import set_module
import numpy as np
__all__ = [
'get_include', 'info', 'show_runtime'
]
@set_module('numpy')
def show_runtime():
"""
Print information about various resources in the system
including available intrinsic support and BLAS/LAPACK library
in use
.. versionadded:: 1.24.0
See Also
--------
show_config : Show libraries in the system on which NumPy was built.
Notes
-----
1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
library if available.
2. SIMD related information is derived from ``__cpu_features__``,
``__cpu_baseline__`` and ``__cpu_dispatch__``
"""
from numpy._core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
from pprint import pprint
config_found = [{
"numpy_version": np.__version__,
"python": sys.version,
"uname": platform.uname(),
}]
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
config_found.append({
"simd_extensions": {
"baseline": __cpu_baseline__,
"found": features_found,
"not_found": features_not_found
}
})
try:
from threadpoolctl import threadpool_info
config_found.extend(threadpool_info())
except ImportError:
print("WARNING: `threadpoolctl` not found in system!"
" Install it by `pip install threadpoolctl`."
" Once installed, try `np.show_runtime` again"
" for more detailed build information")
pprint(config_found)
@set_module('numpy')
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy may need to use this
function to locate the appropriate include directory.
Notes
-----
When using ``setuptools``, for example in ``setup.py``::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using
that is likely preferred for build systems other than ``setuptools``::
$ numpy-config --cflags
-I/path/to/site-packages/numpy/_core/include
# Or rely on pkg-config:
$ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)
$ pkg-config --cflags
-I/path/to/site-packages/numpy/_core/include
Examples
--------
>>> np.get_include()
'.../site-packages/numpy/core/include' # may vary
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')
else:
# using installed numpy core headers
import numpy._core as _core
d = os.path.join(os.path.dirname(_core.__file__), 'include')
return d
class _Deprecate:
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
if old_name is None:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
@functools.wraps(func)
def newfunc(*args, **kwds):
warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
newfunc.__name__ = old_name
doc = func.__doc__
if doc is None:
doc = depdoc
else:
lines = doc.expandtabs().split('\n')
indent = _get_indent(lines[1:])
if lines[0].lstrip():
# Indent the original first line to let inspect.cleandoc()
# dedent the docstring despite the deprecation notice.
doc = indent * ' ' + doc
else:
# Remove the same leading blank lines as cleandoc() would.
skip = len(lines[0]) + 1
for line in lines[1:]:
if len(line) > indent:
break
skip += len(line) + 1
doc = doc[skip:]
depdoc = textwrap.indent(depdoc, ' ' * indent)
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
return newfunc
def _get_indent(lines):
"""
Determines the leading whitespace that could be removed from all the lines.
"""
indent = sys.maxsize
for line in lines:
content = len(line.lstrip())
if content:
indent = min(indent, len(line) - content)
if indent == sys.maxsize:
indent = 0
return indent
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
.. deprecated:: 2.0
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.lib.utils.deprecate(np.uint)
DeprecationWarning: `uint64` is deprecated! # may vary
>>> olduint(6)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`deprecate` is deprecated, "
"use `warn` with `DeprecationWarning` instead. "
"(deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
if args:
fn = args[0]
args = args[1:]
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
def deprecate_with_doc(msg):
"""
Deprecates a function and includes the deprecation in its docstring.
.. deprecated:: 2.0
Use `~warnings.warn` with :exc:`DeprecationWarning` instead.
This function is used as a decorator. It returns an object that can be
used to issue a DeprecationWarning, by passing the to-be decorated
function as argument, this adds warning to the to-be decorated function's
docstring and returns the new function object.
See Also
--------
deprecate : Decorate a function such that it issues a
:exc:`DeprecationWarning`
Parameters
----------
msg : str
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
obj : object
"""
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`deprecate` is deprecated, "
"use `warn` with `DeprecationWarning` instead. "
"(deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
return _Deprecate(message=msg)
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works similarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=None):
"""Provide information about ndarray obj.
Parameters
----------
obj : ndarray
Must be ndarray, not checked.
output
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
if output is None:
output = sys.stdout
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
"data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print("type: %s" % obj.dtype, file=output)
@set_module('numpy')
def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
"""
Get help information for an array, function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is
an `ndarray` instance, information about the array is printed.
If `object` is a numpy object, its docstring is given. If it is
a string, available modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``None``, in which case ``sys.stdout`` will be used.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
When the argument is an array, information about the array is printed.
>>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
>>> np.info(a)
class: ndarray
shape: (2, 3)
strides: (24, 8)
itemsize: 8
aligned: True
contiguous: True
fortran: False
data pointer: 0x562b6e0d2860 # may vary
byteorder: little
byteswap: False
type: complex64
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if output is None:
output = sys.stdout
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n "
"*** Repeat reference found in %s *** " % namestr,
file=output
)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object) or inspect.ismethod(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
try:
arguments = str(inspect.signature(object))
except Exception:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
public_methods = [meth for meth in methods if meth[0] != '_']
if public_methods:
print("\n\nMethods:\n", file=output)
for meth in public_methods:
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(" %s -- %s" % (meth, methstr), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def safe_eval(source):
"""
Protected string evaluation.
.. deprecated:: 2.0
Use `ast.literal_eval` instead.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
.. warning::
This function is identical to :py:meth:`ast.literal_eval` and
has the same security implications. It may not always be safe
to evaluate large input strings.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
ValueError: malformed node or string: <_ast.Call object at 0x...>
"""
# Deprecated in NumPy 2.0, 2023-07-11
warnings.warn(
"`safe_eval` is deprecated. Use `ast.literal_eval` instead. "
"Be aware of security implications, such as memory exhaustion "
"based attacks (deprecated in NumPy 2.0)",
DeprecationWarning,
stacklevel=2
)
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
def _median_nancheck(data, result, axis):
"""
Utility function to check median result from data for NaN values at the end
and return NaN in that case. Input result can also be a MaskedArray.
Parameters
----------
data : array
Sorted input data to median function
result : Array or MaskedArray
Result of median function.
axis : int
Axis along which the median was computed.
Returns
-------
result : scalar or ndarray
Median or NaN in axes which contained NaN in the input. If the input
was an array, NaN will be inserted in-place. If a scalar, either the
input itself or a scalar NaN.
"""
if data.size == 0:
return result
potential_nans = data.take(-1, axis=axis)
n = np.isnan(potential_nans)
# masked NaN values are ok, although for masked the copyto may fail for
# unmasked ones (this was always broken) when the result is a scalar.
if np.ma.isMaskedArray(n):
n = n.filled(False)
if not n.any():
return result
# Without given output, it is possible that the current result is a
# numpy scalar, which is not writeable. If so, just return nan.
if isinstance(result, np.generic):
return potential_nans
# Otherwise copy NaNs (if there are any)
np.copyto(result, potential_nans, where=n)
return result
def _opt_info():
"""
Returns a string containing the CPU features supported
by the current build.
The format of the string can be explained as follows:
- Dispatched features supported by the running machine end with `*`.
- Dispatched features not supported by the running machine
end with `?`.
- Remaining features represent the baseline.
Returns:
str: A formatted string indicating the supported CPU features.
"""
from numpy._core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
return ''
enabled_features = ' '.join(__cpu_baseline__)
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
enabled_features += f" {feature}*"
else:
enabled_features += f" {feature}?"
return enabled_features
def drop_metadata(dtype, /):
"""
Returns the dtype unchanged if it contained no metadata or a copy of the
dtype if it (or any of its structure dtypes) contained metadata.
This utility is used by `np.save` and `np.savez` to drop metadata before
saving.
.. note::
Due to its limitation this function may move to a more appropriate
home or change in the future and is considered semi-public API only.
.. warning::
This function does not preserve more strange things like record dtypes
and user dtypes may simply return the wrong thing. If you need to be
sure about the latter, check the result with:
``np.can_cast(new_dtype, dtype, casting="no")``.
"""
if dtype.fields is not None:
found_metadata = dtype.metadata is not None
names = []
formats = []
offsets = []
titles = []
for name, field in dtype.fields.items():
field_dt = drop_metadata(field[0])
if field_dt is not field[0]:
found_metadata = True
names.append(name)
formats.append(field_dt)
offsets.append(field[1])
titles.append(None if len(field) < 3 else field[2])
if not found_metadata:
return dtype
structure = dict(
names=names, formats=formats, offsets=offsets, titles=titles,
itemsize=dtype.itemsize)
# NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
return np.dtype(structure, align=dtype.isalignedstruct)
elif dtype.subdtype is not None:
# subarray dtype
subdtype, shape = dtype.subdtype
new_subdtype = drop_metadata(subdtype)
if dtype.metadata is None and new_subdtype is subdtype:
return dtype
return np.dtype((new_subdtype, shape))
else:
# Normal unstructured dtype
if dtype.metadata is None:
return dtype
# Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
return np.dtype(dtype.str)

View File

@ -0,0 +1,33 @@
from typing import (
Any,
TypeVar,
Protocol,
)
from numpy._core.numerictypes import (
issubdtype as issubdtype,
)
_T_contra = TypeVar("_T_contra", contravariant=True)
# A file-like object opened in `w` mode
class _SupportsWrite(Protocol[_T_contra]):
def write(self, s: _T_contra, /) -> Any: ...
__all__: list[str]
def get_include() -> str: ...
def info(
object: object = ...,
maxwidth: int = ...,
output: None | _SupportsWrite[str] = ...,
toplevel: str = ...,
) -> None: ...
def source(
object: object,
output: None | _SupportsWrite[str] = ...,
) -> None: ...
def show_runtime() -> None: ...

View File

@ -0,0 +1,155 @@
"""Utility to compare (NumPy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
__all__ = ['NumpyVersion']
class NumpyVersion:
"""Parse and compare numpy version strings.
NumPy has the following versioning scheme (numbers given are examples; they
can be > 9 in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
NumPy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
>>> # skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
Traceback (most recent call last):
...
ValueError: Not a valid numpy version string
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (str, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, str):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return "NumpyVersion(%s)" % self.vstring

View File

@ -0,0 +1,17 @@
__all__: list[str]
class NumpyVersion:
vstring: str
version: str
major: int
minor: int
bugfix: int
pre_release: str
is_devversion: bool
def __init__(self, vstring: str) -> None: ...
def __lt__(self, other: str | NumpyVersion) -> bool: ...
def __le__(self, other: str | NumpyVersion) -> bool: ...
def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
def __gt__(self, other: str | NumpyVersion) -> bool: ...
def __ge__(self, other: str | NumpyVersion) -> bool: ...

View File

@ -0,0 +1,7 @@
from ._array_utils_impl import (
__all__,
__doc__,
byte_bounds,
normalize_axis_index,
normalize_axis_tuple,
)

View File

@ -0,0 +1,6 @@
from ._array_utils_impl import (
__all__ as __all__,
byte_bounds as byte_bounds,
normalize_axis_index as normalize_axis_index,
normalize_axis_tuple as normalize_axis_tuple,
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,22 @@
from typing import Any, Literal, Final
__all__: list[str]
EXPECTED_KEYS: Final[set[str]]
MAGIC_PREFIX: Final[bytes]
MAGIC_LEN: Literal[8]
ARRAY_ALIGN: Literal[64]
BUFFER_SIZE: Literal[262144] # 2**18
def magic(major, minor): ...
def read_magic(fp): ...
def dtype_to_descr(dtype): ...
def descr_to_dtype(descr): ...
def header_data_from_array_1_0(array): ...
def write_array_header_1_0(fp, d): ...
def write_array_header_2_0(fp, d): ...
def read_array_header_1_0(fp): ...
def read_array_header_2_0(fp): ...
def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...

View File

@ -0,0 +1,96 @@
"""
Introspection helper functions.
"""
import re
__all__ = ['opt_func_info']
def opt_func_info(func_name=None, signature=None):
"""
Returns a dictionary containing the currently supported CPU dispatched
features for all optimized functions.
Parameters
----------
func_name : str (optional)
Regular expression to filter by function name.
signature : str (optional)
Regular expression to filter by data type.
Returns
-------
dict
A dictionary where keys are optimized function names and values are
nested dictionaries indicating supported targets based on data types.
Examples
--------
Retrieve dispatch information for functions named 'add' or 'sub' and
data types 'float64' or 'float32':
>>> import numpy as np
>>> dict = np.lib.introspect.opt_func_info(
... func_name="add|abs", signature="float64|complex64"
... )
>>> import json
>>> print(json.dumps(dict, indent=2))
{
"absolute": {
"dd": {
"current": "SSE41",
"available": "SSE41 baseline(SSE SSE2 SSE3)"
},
"Ff": {
"current": "FMA3__AVX2",
"available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
},
"Dd": {
"current": "FMA3__AVX2",
"available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"
}
},
"add": {
"ddd": {
"current": "FMA3__AVX2",
"available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
},
"FFF": {
"current": "FMA3__AVX2",
"available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"
}
}
}
"""
from numpy._core._multiarray_umath import (
__cpu_targets_info__ as targets, dtype
)
if func_name is not None:
func_pattern = re.compile(func_name)
matching_funcs = {
k: v for k, v in targets.items()
if func_pattern.search(k)
}
else:
matching_funcs = targets
if signature is not None:
sig_pattern = re.compile(signature)
matching_sigs = {}
for k, v in matching_funcs.items():
matching_chars = {}
for chars, targets in v.items():
if any([
sig_pattern.search(c) or
sig_pattern.search(dtype(c).name)
for c in chars
]):
matching_chars[chars] = targets
if matching_chars:
matching_sigs[k] = matching_chars
else:
matching_sigs = matching_funcs
return matching_sigs

View File

@ -0,0 +1,183 @@
"""
Mixin classes for custom array types that don't inherit from ndarray.
"""
from numpy._core import umath as um
__all__ = ['NDArrayOperatorsMixin']
def _disables_array_ufunc(obj):
"""True when __array_ufunc__ is set to None."""
try:
return obj.__array_ufunc__ is None
except AttributeError:
return False
def _binary_method(ufunc, name):
"""Implement a forward binary method with a ufunc, e.g., __add__."""
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(self, other)
func.__name__ = '__{}__'.format(name)
return func
def _reflected_binary_method(ufunc, name):
"""Implement a reflected binary method with a ufunc, e.g., __radd__."""
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(other, self)
func.__name__ = '__r{}__'.format(name)
return func
def _inplace_binary_method(ufunc, name):
"""Implement an in-place binary method with a ufunc, e.g., __iadd__."""
def func(self, other):
return ufunc(self, other, out=(self,))
func.__name__ = '__i{}__'.format(name)
return func
def _numeric_methods(ufunc, name):
"""Implement forward, reflected and inplace binary methods with a ufunc."""
return (_binary_method(ufunc, name),
_reflected_binary_method(ufunc, name),
_inplace_binary_method(ufunc, name))
def _unary_method(ufunc, name):
"""Implement a unary special method with a ufunc."""
def func(self):
return ufunc(self)
func.__name__ = '__{}__'.format(name)
return func
class NDArrayOperatorsMixin:
"""Mixin defining all operator special methods using __array_ufunc__.
This class implements the special methods for almost all of Python's
builtin operators defined in the `operator` module, including comparisons
(``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
deferring to the ``__array_ufunc__`` method, which subclasses must
implement.
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
arrays as described in `A Mechanism for Overriding Ufuncs
<https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
As an trivial example, consider this implementation of an ``ArrayLike``
class that simply wraps a NumPy array and ensures that the result of any
arithmetic operation is also an ``ArrayLike`` object:
>>> import numbers
>>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
... def __init__(self, value):
... self.value = np.asarray(value)
...
... # One might also consider adding the built-in list type to this
... # list, to support operations like np.add(array_like, list)
... _HANDLED_TYPES = (np.ndarray, numbers.Number)
...
... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
... out = kwargs.get('out', ())
... for x in inputs + out:
... # Only support operations with instances of
... # _HANDLED_TYPES. Use ArrayLike instead of type(self)
... # for isinstance to allow subclasses that don't
... # override __array_ufunc__ to handle ArrayLike objects.
... if not isinstance(
... x, self._HANDLED_TYPES + (ArrayLike,)
... ):
... return NotImplemented
...
... # Defer to the implementation of the ufunc
... # on unwrapped values.
... inputs = tuple(x.value if isinstance(x, ArrayLike) else x
... for x in inputs)
... if out:
... kwargs['out'] = tuple(
... x.value if isinstance(x, ArrayLike) else x
... for x in out)
... result = getattr(ufunc, method)(*inputs, **kwargs)
...
... if type(result) is tuple:
... # multiple return values
... return tuple(type(self)(x) for x in result)
... elif method == 'at':
... # no return value
... return None
... else:
... # one return value
... return type(self)(result)
...
... def __repr__(self):
... return '%s(%r)' % (type(self).__name__, self.value)
In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
the result is always another ``ArrayLike``:
>>> x = ArrayLike([1, 2, 3])
>>> x - 1
ArrayLike(array([0, 1, 2]))
>>> 1 - x
ArrayLike(array([ 0, -1, -2]))
>>> np.arange(3) - x
ArrayLike(array([-1, -1, -1]))
>>> x - np.arange(3)
ArrayLike(array([1, 1, 1]))
Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
with arbitrary, unrecognized types. This ensures that interactions with
ArrayLike preserve a well-defined casting hierarchy.
.. versionadded:: 1.13
"""
__slots__ = ()
# Like np.ndarray, this mixin class implements "Option 1" from the ufunc
# overrides NEP.
# comparisons don't have reflected and in-place versions
__lt__ = _binary_method(um.less, 'lt')
__le__ = _binary_method(um.less_equal, 'le')
__eq__ = _binary_method(um.equal, 'eq')
__ne__ = _binary_method(um.not_equal, 'ne')
__gt__ = _binary_method(um.greater, 'gt')
__ge__ = _binary_method(um.greater_equal, 'ge')
# numeric methods
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
um.matmul, 'matmul')
# Python 3 does not use __div__, __rdiv__, or __idiv__
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
um.true_divide, 'truediv')
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
um.floor_divide, 'floordiv')
__mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
__divmod__ = _binary_method(um.divmod, 'divmod')
__rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
# __idivmod__ does not exist
# TODO: handle the optional third argument for __pow__?
__pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(
um.left_shift, 'lshift')
__rshift__, __rrshift__, __irshift__ = _numeric_methods(
um.right_shift, 'rshift')
__and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
__xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
__or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
# unary methods
__neg__ = _unary_method(um.negative, 'neg')
__pos__ = _unary_method(um.positive, 'pos')
__abs__ = _unary_method(um.absolute, 'abs')
__invert__ = _unary_method(um.invert, 'invert')

View File

@ -0,0 +1,74 @@
from abc import ABCMeta, abstractmethod
from typing import Literal as L, Any
from numpy import ufunc
__all__: list[str]
# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
# even though it's reliant on subclasses implementing `__array_ufunc__`
# NOTE: The accepted input- and output-types of the various dunders are
# completely dependent on how `__array_ufunc__` is implemented.
# As such, only little type safety can be provided here.
class NDArrayOperatorsMixin(metaclass=ABCMeta):
@abstractmethod
def __array_ufunc__(
self,
ufunc: ufunc,
method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],
*inputs: Any,
**kwargs: Any,
) -> Any: ...
def __lt__(self, other: Any) -> Any: ...
def __le__(self, other: Any) -> Any: ...
def __eq__(self, other: Any) -> Any: ...
def __ne__(self, other: Any) -> Any: ...
def __gt__(self, other: Any) -> Any: ...
def __ge__(self, other: Any) -> Any: ...
def __add__(self, other: Any) -> Any: ...
def __radd__(self, other: Any) -> Any: ...
def __iadd__(self, other: Any) -> Any: ...
def __sub__(self, other: Any) -> Any: ...
def __rsub__(self, other: Any) -> Any: ...
def __isub__(self, other: Any) -> Any: ...
def __mul__(self, other: Any) -> Any: ...
def __rmul__(self, other: Any) -> Any: ...
def __imul__(self, other: Any) -> Any: ...
def __matmul__(self, other: Any) -> Any: ...
def __rmatmul__(self, other: Any) -> Any: ...
def __imatmul__(self, other: Any) -> Any: ...
def __truediv__(self, other: Any) -> Any: ...
def __rtruediv__(self, other: Any) -> Any: ...
def __itruediv__(self, other: Any) -> Any: ...
def __floordiv__(self, other: Any) -> Any: ...
def __rfloordiv__(self, other: Any) -> Any: ...
def __ifloordiv__(self, other: Any) -> Any: ...
def __mod__(self, other: Any) -> Any: ...
def __rmod__(self, other: Any) -> Any: ...
def __imod__(self, other: Any) -> Any: ...
def __divmod__(self, other: Any) -> Any: ...
def __rdivmod__(self, other: Any) -> Any: ...
def __pow__(self, other: Any) -> Any: ...
def __rpow__(self, other: Any) -> Any: ...
def __ipow__(self, other: Any) -> Any: ...
def __lshift__(self, other: Any) -> Any: ...
def __rlshift__(self, other: Any) -> Any: ...
def __ilshift__(self, other: Any) -> Any: ...
def __rshift__(self, other: Any) -> Any: ...
def __rrshift__(self, other: Any) -> Any: ...
def __irshift__(self, other: Any) -> Any: ...
def __and__(self, other: Any) -> Any: ...
def __rand__(self, other: Any) -> Any: ...
def __iand__(self, other: Any) -> Any: ...
def __xor__(self, other: Any) -> Any: ...
def __rxor__(self, other: Any) -> Any: ...
def __ixor__(self, other: Any) -> Any: ...
def __or__(self, other: Any) -> Any: ...
def __ror__(self, other: Any) -> Any: ...
def __ior__(self, other: Any) -> Any: ...
def __neg__(self) -> Any: ...
def __pos__(self) -> Any: ...
def __abs__(self) -> Any: ...
def __invert__(self) -> Any: ...

View File

@ -0,0 +1,3 @@
from ._npyio_impl import (
__doc__, DataSource, NpzFile
)

View File

@ -0,0 +1,4 @@
from numpy.lib._npyio_impl import (
DataSource as DataSource,
NpzFile as NpzFile,
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
from ._scimath_impl import (
__all__, __doc__, sqrt, log, log2, logn, log10, power, arccos, arcsin,
arctanh
)

View File

@ -0,0 +1,12 @@
from ._scimath_impl import (
__all__ as __all__,
sqrt as sqrt,
log as log,
log2 as log2,
logn as logn,
log10 as log10,
power as power,
arccos as arccos,
arcsin as arcsin,
arctanh as arctanh,
)

View File

@ -0,0 +1,3 @@
from ._stride_tricks_impl import (
__doc__, as_strided, sliding_window_view
)

View File

@ -0,0 +1,4 @@
from numpy.lib._stride_tricks_impl import (
as_strided as as_strided,
sliding_window_view as sliding_window_view,
)

Some files were not shown because too many files have changed in this diff Show More