o
    ŀgl                     @  s   d dl mZ d dlmZ d dlmZmZmZmZ d dl	m
Z
mZ d dlmZmZmZ d dlmZmZmZmZmZmZmZmZmZ d dlmZmZ erbd dlmZmZm Z  d d	l!m"Z"m#Z# d d
l$m%Z% G dd deZ&G dd dee&Z'dS )    )annotations)dedent)TYPE_CHECKINGAnyCallableLiteral)deprecate_kwargdoc)BaseIndexerExpandingIndexerGroupbyIndexer)	_shared_docscreate_section_headerkwargs_numeric_onlynumba_notestemplate_headertemplate_returnstemplate_see_alsowindow_agg_numba_parameterswindow_apply_parameters)BaseWindowGroupbyRollingAndExpandingMixin)AxisQuantileInterpolationWindowingRankType)	DataFrameSeries)NDFramec                      sb  e Zd ZU dZg dZded< 				dd fddZdddZee	d e
de
dddd fddZeZeeed eed!eed"e
d#d$d%d&d'
dd fd+d,Zeeed-eed eed!eed"e
d.d$d/d0d'	(				dd fd<d=Zeeed-ee ed eed!eed>eed"e
d?d$d@d@d'	(		dd fdAdBZeeed-ee ed eed!eed>eed"e
dCd$dDdEd'	(		dd fdFdGZeeed-ee ed eed!eed>eed"e
dHd$dIdJd'	(		dd fdKdLZeeed-ee ed eed!eed>eed"e
dMd$dNdNd'	(		dd fdOdPZeeed-ee ed eed!eed>eed"e
dQd$dRdRd'	(		dd fdSdTZeeed-e
dUdVddeedWed eed!dXeed>e
dYdVdded"e
dZdVddd$d[d\d'		(		dd fd^d_Zeeed-e
dUdVddeedWed eed!d`eed>e
dadVdded"e
dbdVddd$dcddd'		(		dd fdedfZeeed-e
dUdVddeed eed!eed>dged"e
dhdVddd$didjd'dd fdkdlZeeed-eed eed!dmeed>dned"e
dod$dpdqd'dd fdrdsZ eeed-eed eed!dteed>dued"e
dvdVddd$dwdxd'dd fdydzZ!eeed-e
d{dVddeed eed!eed"e
d|d$d}d}d'e"d}d~d		(dd fddZ#eeded-e
ddVddeed eed!eed"e
ddVddd$ddd'			(	(dd fddZ$eeed-e
ddVddeed eed!eed"e
dd$ddd'				(dd fddZ%eeed-e
ddVddeed eed!e
ddVddeed>e
ded"e
dd$ddd'				(dd fddZ&  Z'S )	Expandinga  
    Provide expanding window calculations.

    Parameters
    ----------
    min_periods : int, default 1
        Minimum number of observations in window required to have a value;
        otherwise, result is ``np.nan``.

    axis : int or str, default 0
        If ``0`` or ``'index'``, roll across the rows.

        If ``1`` or ``'columns'``, roll across the columns.

        For `Series` this parameter is unused and defaults to 0.

    method : str {'single', 'table'}, default 'single'
        Execute the rolling operation per single column or row (``'single'``)
        or over the entire object (``'table'``).

        This argument is only implemented when specifying ``engine='numba'``
        in the method call.

        .. versionadded:: 1.3.0

    Returns
    -------
    pandas.api.typing.Expanding

    See Also
    --------
    rolling : Provides rolling window calculations.
    ewm : Provides exponential weighted functions.

    Notes
    -----
    See :ref:`Windowing Operations <window.expanding>` for further usage details
    and examples.

    Examples
    --------
    >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
    >>> df
         B
    0  0.0
    1  1.0
    2  2.0
    3  NaN
    4  4.0

    **min_periods**

    Expanding sum with 1 vs 3 observations needed to calculate a value.

    >>> df.expanding(1).sum()
         B
    0  0.0
    1  1.0
    2  3.0
    3  3.0
    4  7.0
    >>> df.expanding(3).sum()
         B
    0  NaN
    1  NaN
    2  3.0
    3  3.0
    4  7.0
    )min_periodsaxismethodz	list[str]_attributes   r   singleNobjr   r   intr    r   r!   strreturnNonec                   s   t  j|||||d d S )N)r%   r   r    r!   	selection)super__init__)selfr%   r   r    r!   r*   	__class__ P/var/www/html/myenv/lib/python3.10/site-packages/pandas/core/window/expanding.pyr,   |   s   
zExpanding.__init__r
   c                 C  s   t  S )z[
        Return an indexer class that will compute the window start and end bounds
        )r   )r-   r0   r0   r1   _get_window_indexer   s   zExpanding._get_window_indexer	aggregatez
        See Also
        --------
        pandas.DataFrame.aggregate : Similar DataFrame method.
        pandas.Series.aggregate : Similar Series method.
        a  
        Examples
        --------
        >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
        >>> df
           A  B  C
        0  1  4  7
        1  2  5  8
        2  3  6  9

        >>> df.ewm(alpha=0.5).mean()
                  A         B         C
        0  1.000000  4.000000  7.000000
        1  1.666667  4.666667  7.666667
        2  2.428571  5.428571  8.428571
        zSeries/Dataframe )see_alsoexamplesklassr    c                   s   t  j|g|R i |S )N)r+   r3   )r-   funcargskwargsr.   r0   r1   r3      s    zExpanding.aggregateReturnszSee AlsoExamplesz        >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().count()
        a    1.0
        b    2.0
        c    3.0
        d    4.0
        dtype: float64
        	expandingzcount of non NaN observationscount)window_methodaggregation_description
agg_methodFnumeric_onlyboolc                      t  j|dS N)rB   )r+   r>   r-   rB   r.   r0   r1   r>      s   zExpanding.count
Parametersz        >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().apply(lambda s: s.max() - 2 * s.min())
        a   -1.0
        b    0.0
        c    1.0
        d    2.0
        dtype: float64
        zcustom aggregation functionapplyr8   Callable[..., Any]rawengine!Literal['cython', 'numba'] | Noneengine_kwargsdict[str, bool] | Noner9   tuple[Any, ...] | Noner:   dict[str, Any] | Nonec                   s   t  j||||||dS )N)rJ   rK   rM   r9   r:   )r+   rH   )r-   r8   rJ   rK   rM   r9   r:   r.   r0   r1   rH      s   !zExpanding.applyNotesz        >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().sum()
        a     1.0
        b     3.0
        c     6.0
        d    10.0
        dtype: float64
        sumc                      t  j|||dS N)rB   rK   rM   )r+   rR   r-   rB   rK   rM   r.   r0   r1   rR      
   !zExpanding.sumz        >>> ser = pd.Series([3, 2, 1, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().max()
        a    3.0
        b    3.0
        c    3.0
        d    4.0
        dtype: float64
        maximummaxc                   rS   rT   )r+   rX   rU   r.   r0   r1   rX      rV   zExpanding.maxz        >>> ser = pd.Series([2, 3, 4, 1], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().min()
        a    2.0
        b    2.0
        c    2.0
        d    1.0
        dtype: float64
        minimumminc                   rS   rT   )r+   rZ   rU   r.   r0   r1   rZ   G  rV   zExpanding.minz        >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().mean()
        a    1.0
        b    1.5
        c    2.0
        d    2.5
        dtype: float64
        meanc                   rS   rT   )r+   r[   rU   r.   r0   r1   r[   n  rV   zExpanding.meanz        >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser.expanding().median()
        a    1.0
        b    1.5
        c    2.0
        d    2.5
        dtype: float64
        medianc                   rS   rT   )r+   r\   rU   r.   r0   r1   r\     rV   zExpanding.medianz
        ddof : int, default 1
            Delta Degrees of Freedom.  The divisor used in calculations
            is ``N - ddof``, where ``N`` represents the number of elements.

        
z1.4z/numpy.std : Equivalent method for NumPy array.
z
        The default ``ddof`` of 1 used in :meth:`Series.std` is different
        than the default ``ddof`` of 0 in :func:`numpy.std`.

        A minimum of one period is required for the rolling calculation.

        a  
        >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])

        >>> s.expanding(3).std()
        0         NaN
        1         NaN
        2    0.577350
        3    0.957427
        4    0.894427
        5    0.836660
        6    0.786796
        dtype: float64
        zstandard deviationstdddofc                      t  j||||dS N)r_   rB   rK   rM   )r+   r^   r-   r_   rB   rK   rM   r.   r0   r1   r^        5zExpanding.stdz/numpy.var : Equivalent method for NumPy array.
z
        The default ``ddof`` of 1 used in :meth:`Series.var` is different
        than the default ``ddof`` of 0 in :func:`numpy.var`.

        A minimum of one period is required for the rolling calculation.

        a  
        >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])

        >>> s.expanding(3).var()
        0         NaN
        1         NaN
        2    0.333333
        3    0.916667
        4    0.800000
        5    0.700000
        6    0.619048
        dtype: float64
        variancevarc                   r`   ra   )r+   re   rb   r.   r0   r1   re     rc   zExpanding.varz:A minimum of one period is required for the calculation.

z
        >>> s = pd.Series([0, 1, 2, 3])

        >>> s.expanding().sem()
        0         NaN
        1    0.707107
        2    0.707107
        3    0.745356
        dtype: float64
        zstandard error of meansemc                   s   t  j||dS )N)r_   rB   )r+   rf   )r-   r_   rB   r.   r0   r1   rf   4  s   #zExpanding.semz:scipy.stats.skew : Third moment of a probability density.
zEA minimum of three periods is required for the rolling calculation.

a           >>> ser = pd.Series([-1, 0, 2, -1, 2], index=['a', 'b', 'c', 'd', 'e'])
        >>> ser.expanding().skew()
        a         NaN
        b         NaN
        c    0.935220
        d    1.414214
        e    0.315356
        dtype: float64
        zunbiased skewnessskewc                   rD   rE   )r+   rg   rF   r.   r0   r1   rg   Y  s   zExpanding.skewz/scipy.stats.kurtosis : Reference SciPy method.
z<A minimum of four periods is required for the calculation.

a[  
        The example below will show a rolling calculation with a window size of
        four matching the equivalent function call using `scipy.stats`.

        >>> arr = [1, 2, 3, 4, 999]
        >>> import scipy.stats
        >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")
        -1.200000
        >>> print(f"{{scipy.stats.kurtosis(arr, bias=False):.6f}}")
        4.999874
        >>> s = pd.Series(arr)
        >>> s.expanding(4).kurt()
        0         NaN
        1         NaN
        2         NaN
        3   -1.200000
        4    4.999874
        dtype: float64
        z,Fisher's definition of kurtosis without biaskurtc                   rD   rE   )r+   rh   rF   r.   r0   r1   rh   x  s   &zExpanding.kurta  
        quantile : float
            Quantile to compute. 0 <= quantile <= 1.

            .. deprecated:: 2.1.0
                This will be renamed to 'q' in a future version.
        interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}
            This optional parameter specifies the interpolation method to use,
            when the desired quantile lies between two data points `i` and `j`:

                * linear: `i + (j - i) * fraction`, where `fraction` is the
                  fractional part of the index surrounded by `i` and `j`.
                * lower: `i`.
                * higher: `j`.
                * nearest: `i` or `j` whichever is nearest.
                * midpoint: (`i` + `j`) / 2.
        a          >>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'])
        >>> ser.expanding(min_periods=4).quantile(.25)
        a     NaN
        b     NaN
        c     NaN
        d    1.75
        e    2.00
        f    2.25
        dtype: float64
        quantileq)old_arg_namenew_arg_namelinearfloatinterpolationr   c                   rS   )N)rj   ro   rB   )r+   ri   )r-   rj   ro   rB   r.   r0   r1   ri     s
   4zExpanding.quantilez.. versionadded:: 1.4.0 

a  
        method : {{'average', 'min', 'max'}}, default 'average'
            How to rank the group of records that have the same value (i.e. ties):

            * average: average rank of the group
            * min: lowest rank in the group
            * max: highest rank in the group

        ascending : bool, default True
            Whether or not the elements should be ranked in ascending order.
        pct : bool, default False
            Whether or not to display the returned rankings in percentile
            form.
        a+  
        >>> s = pd.Series([1, 4, 2, 3, 5, 3])
        >>> s.expanding().rank()
        0    1.0
        1    2.0
        2    2.0
        3    3.0
        4    5.0
        5    3.5
        dtype: float64

        >>> s.expanding().rank(method="max")
        0    1.0
        1    2.0
        2    2.0
        3    3.0
        4    5.0
        5    4.0
        dtype: float64

        >>> s.expanding().rank(method="min")
        0    1.0
        1    2.0
        2    2.0
        3    3.0
        4    5.0
        5    3.0
        dtype: float64
        rankaverageTr   	ascendingpctc                   r`   )N)r!   rr   rs   rB   )r+   rp   )r-   r!   rr   rs   rB   r.   r0   r1   rp     s   DzExpanding.ranka   
        other : Series or DataFrame, optional
            If not supplied then will default to self and produce pairwise
            output.
        pairwise : bool, default None
            If False then only matching columns between self and other will be
            used and the output will be a DataFrame.
            If True then all pairwise combinations will be calculated and the
            output will be a MultiIndexed DataFrame in the case of DataFrame
            inputs. In the case of missing elements, only complete pairwise
            observations will be used.
        ddof : int, default 1
            Delta Degrees of Freedom.  The divisor used in calculations
            is ``N - ddof``, where ``N`` represents the number of elements.
        a0          >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])
        >>> ser1.expanding().cov(ser2)
        a         NaN
        b    0.500000
        c    1.500000
        d    3.333333
        dtype: float64
        zsample covariancecovotherDataFrame | Series | Nonepairwisebool | Nonec                   r`   N)ru   rw   r_   rB   )r+   rt   r-   ru   rw   r_   rB   r.   r0   r1   rt   %  s   1zExpanding.covaN  
        other : Series or DataFrame, optional
            If not supplied then will default to self and produce pairwise
            output.
        pairwise : bool, default None
            If False then only matching columns between self and other will be
            used and the output will be a DataFrame.
            If True then all pairwise combinations will be calculated and the
            output will be a MultiIndexed DataFrame in the case of DataFrame
            inputs. In the case of missing elements, only complete pairwise
            observations will be used.
        z
        cov : Similar method to calculate covariance.
        numpy.corrcoef : NumPy Pearson's correlation calculation.
        ao  
        This function uses Pearson's definition of correlation
        (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).

        When `other` is not specified, the output will be self correlation (e.g.
        all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
        set to `True`.

        Function will return ``NaN`` for correlations of equal valued sequences;
        this is the result of a 0/0 division error.

        When `pairwise` is set to `False`, only matching columns between `self` and
        `other` will be used.

        When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
        with the original index on the first level, and the `other` DataFrame
        columns on the second level.

        In the case of missing elements, only complete pairwise observations
        will be used.

        a1          >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
        >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])
        >>> ser1.expanding().corr(ser2)
        a         NaN
        b    1.000000
        c    0.981981
        d    0.975900
        dtype: float64
        correlationcorrc                   r`   ry   )r+   r|   rz   r.   r0   r1   r|   ]  s   LzExpanding.corr)r#   r   r$   N)
r%   r   r   r&   r    r   r!   r'   r(   r)   )r(   r
   )F)rB   rC   )FNNNN)r8   rI   rJ   rC   rK   rL   rM   rN   r9   rO   r:   rP   )FNN)rB   rC   rK   rL   rM   rN   )r#   FNN)r_   r&   rB   rC   rK   rL   rM   rN   )r#   F)r_   r&   rB   rC   )rm   F)rj   rn   ro   r   rB   rC   )rq   TFF)r!   r   rr   rC   rs   rC   rB   rC   )NNr#   F)ru   rv   rw   rx   r_   r&   rB   rC   )(__name__
__module____qualname____doc__r"   __annotations__r,   r2   r	   r   r   r3   aggr   r   r   r   r>   r   rH   r   r   r   rR   rX   rZ   r[   r\   replacer^   re   rf   rg   rh   r   ri   rp   rt   r|   __classcell__r0   r0   r.   r1   r   3   s<  
 F



0


0

"
%

-

?
,

Gr   c                   @  s&   e Zd ZdZejej ZdddZdS )ExpandingGroupbyz5
    Provide a expanding groupby implementation.
    r(   r   c                 C  s   t | jjtd}|S )z
        Return an indexer class that will compute the window start and end bounds

        Returns
        -------
        GroupbyIndexer
        )groupby_indiceswindow_indexer)r   _grouperindicesr   )r-   r   r0   r0   r1   r2     s
   z$ExpandingGroupby._get_window_indexerN)r(   r   )r}   r~   r   r   r   r"   r   r2   r0   r0   r0   r1   r     s    r   N)(
__future__r   textwrapr   typingr   r   r   r   pandas.util._decoratorsr   r	   pandas.core.indexers.objectsr
   r   r   pandas.core.window.docr   r   r   r   r   r   r   r   r   pandas.core.window.rollingr   r   pandas._typingr   r   r   pandasr   r   pandas.core.genericr   r   r   r0   r0   r0   r1   <module>   s(    ,       