
    -i/                         S r SSKrSSKJrJr  SSKrSSKJr  SSK	J
r
Jr  SSKJr  SSKJr  SS	KJrJr  SS
KJr  SSKJrJrJr  SSKJr  SSKJr  / SQr\\S.r " S S\
5      rg)z5
Kernel Density Estimation
-------------------------
    N)IntegralReal)gammainc   )BaseEstimator_fit_context)VALID_METRICS)check_random_state)Interval
StrOptions)	row_norms)_check_sample_weightcheck_is_fittedvalidate_data   )BallTree)KDTree)gaussiantophatepanechnikovexponentiallinearcosine)	ball_treekd_treec                      \ rS rSr% Sr\" \SSSS9\" SS15      /\" \" \	R                  5       5      S	1-  5      /\" \" \5      5      /\" \" \R                  " \	R                  5        V VVs/ s H  n[        U   PM     snnn 6 5      5      /\" \SSS
S9/\" \SSS
S9/S/\" \SSS
S9/S\/S.	r\\S'   SS	SSSSSSSS.	S jrS r\" SS9SS j5       rS rSS jrSS jrSrgs  snnn f ) KernelDensity&   a  Kernel Density Estimation.

Read more in the :ref:`User Guide <kernel_density>`.

Parameters
----------
bandwidth : float or {"scott", "silverman"}, default=1.0
    The bandwidth of the kernel. If bandwidth is a float, it defines the
    bandwidth of the kernel. If bandwidth is a string, one of the estimation
    methods is implemented.

algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
    The tree algorithm to use.

kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',                  'cosine'}, default='gaussian'
    The kernel to use.

metric : str, default='euclidean'
    Metric to use for distance computation. See the
    documentation of `scipy.spatial.distance
    <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
    the metrics listed in
    :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
    values.

    Not all metrics are valid with all algorithms: refer to the
    documentation of :class:`BallTree` and :class:`KDTree`. Note that the
    normalization of the density output is correct only for the Euclidean
    distance metric.

atol : float, default=0
    The desired absolute tolerance of the result.  A larger tolerance will
    generally lead to faster execution.

rtol : float, default=0
    The desired relative tolerance of the result.  A larger tolerance will
    generally lead to faster execution.

breadth_first : bool, default=True
    If true (default), use a breadth-first approach to the problem.
    Otherwise use a depth-first approach.

leaf_size : int, default=40
    Specify the leaf size of the underlying tree.  See :class:`BallTree`
    or :class:`KDTree` for details.

metric_params : dict, default=None
    Additional parameters to be passed to the tree for use with the
    metric.  For more information, see the documentation of
    :class:`BallTree` or :class:`KDTree`.

Attributes
----------
n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

tree_ : ``BinaryTree`` instance
    The tree algorithm for fast generalized N-point problems.

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

bandwidth_ : float
    Value of the bandwidth, given directly by the bandwidth parameter or
    estimated using the 'scott' or 'silverman' method.

    .. versionadded:: 1.0

See Also
--------
sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
    problems.
sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
    problems.

Examples
--------
Compute a gaussian kernel density estimate with a fixed bandwidth.

>>> from sklearn.neighbors import KernelDensity
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
>>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
>>> log_density = kde.score_samples(X[:3])
>>> log_density
array([-1.52955942, -1.51462041, -1.60244657])
r   Nneither)closedscott	silvermanautoleftbooleanr   )		bandwidth	algorithmkernelmetricatolrtolbreadth_first	leaf_sizemetric_params_parameter_constraints      ?r   	euclideanT(   c       	         p    X l         Xl        X0l        X@l        XPl        X`l        Xpl        Xl        Xl        g N)	r'   r&   r(   r)   r*   r+   r,   r-   r.   )
selfr&   r'   r(   r)   r*   r+   r,   r-   r.   s
             I/var/www/html/venv/lib/python3.13/site-packages/sklearn/neighbors/_kde.py__init__KernelDensity.__init__   s3     #"		*"*    c                     US:X  a+  U[         R                  ;   a  gU[        R                  ;   a  gg U[        U   R                  ;  a"  [	        SR                  [        U   U5      5      eU$ )Nr#   r   r   zinvalid metric for {0}: '{1}')r   valid_metricsr   	TREE_DICT
ValueErrorformat)r5   r'   r)   s      r6   _choose_algorithmKernelDensity._choose_algorithm   ss     --- 8111" 2 Yy1??? 3::9Y;OQWX  r9   F)prefer_skip_nested_validationc                    U R                  U R                  U R                  5      n[        U R                  [
        5      (       a  U R                  S:X  a+  UR                  S   SUR                  S   S-   -  -  U l        ObU R                  S:X  a@  UR                  S   UR                  S   S-   -  S-  SUR                  S   S-   -  -  U l        OU R                  U l        [        XS[        R                  S	9nUb  [        X1[        R                  S
S9nU R                  nUc  0 n[        U   " U4U R                  U R                  US.UD6U l        U $ )a  Fit the Kernel Density model on the data.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    List of n_features-dimensional data points.  Each row
    corresponds to a single data point.

y : None
    Ignored. This parameter exists only for compatibility with
    :class:`~sklearn.pipeline.Pipeline`.

sample_weight : array-like of shape (n_samples,), default=None
    List of sample weights attached to the data X.

    .. versionadded:: 0.20

Returns
-------
self : object
    Returns the instance itself.
r!   r   r      r"   r   C)orderdtypeT)rG   ensure_non_negative)r)   r-   sample_weight)r?   r'   r)   
isinstancer&   strshape
bandwidth_r   npfloat64r   r.   r<   r-   tree_)r5   XyrI   r'   kwargss         r6   fitKernelDensity.fit   s7   6 **4>>4;;G	dnnc**~~("#''!*qwwqzA~1F"G;.#$771:a#@1#D!''!*q.)# #nnDO$BJJ?$0

M ##>Fy)
;;nn'	

 

 r9   c           
         [        U 5        [        XS[        R                  SS9nU R                  R
                  c$  U R                  R                  R                  S   nOU R                  R                  nU R                  U-  nU R                  R                  UU R                  U R                  UU R                  U R                  SS9nU[        R                  " U5      -  nU$ )a  Compute the log-likelihood of each sample under the model.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    An array of points to query.  Last dimension should match dimension
    of training data (n_features).

Returns
-------
density : ndarray of shape (n_samples,)
    Log-likelihood of each sample in `X`. These are normalized to be
    probability densities, so values will be low for high-dimensional
    data.
rE   F)rF   rG   resetr   T)hr(   r*   r+   r,   
return_log)r   r   rN   rO   rP   rI   datarL   
sum_weightr*   kernel_densityrM   r(   r+   r,   log)r5   rQ   Natol_Nlog_densitys        r6   score_samplesKernelDensity.score_samples   s      	 $BJJeL::##+

%%a(A

%%AQjj//oo;;,, 0 
 	rvvay r9   c                 L    [         R                  " U R                  U5      5      $ )a  Compute the total log-likelihood under the model.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    List of n_features-dimensional data points.  Each row
    corresponds to a single data point.

y : None
    Ignored. This parameter exists only for compatibility with
    :class:`~sklearn.pipeline.Pipeline`.

Returns
-------
logprob : float
    Total log-likelihood of the data in X. This is normalized to be a
    probability density, so the value will be low for high-dimensional
    data.
)rN   sumra   )r5   rQ   rR   s      r6   scoreKernelDensity.score  s    ( vvd((+,,r9   c                    [        U 5        U R                  S;  a
  [        5       e[        R                  " U R
                  R                  5      n[        U5      nUR                  SSUS9nU R
                  R                  c/  XSR                  S   -  R                  [        R                  5      nO\[        R                  " [        R                  " U R
                  R                  5      5      nUS   n[        R                  " XuU-  5      nU R                  S:X  a2  [        R                  " UR!                  X6   U R"                  5      5      $ U R                  S:X  a  UR                  S   n	UR!                  X4S9n
[%        U
S	S
9n['        SU	-  SU-  5      SU	-  -  U R"                  -  [        R(                  " U5      -  nX6   XSS2[        R*                  4   -  -   $ g)a  Generate random samples from the model.

Currently, this is implemented only for gaussian and tophat kernels.

Parameters
----------
n_samples : int, default=1
    Number of samples to generate.

random_state : int, RandomState instance or None, default=None
    Determines random number generation used to generate
    random samples. Pass an int for reproducible results
    across multiple function calls.
    See :term:`Glossary <random_state>`.

Returns
-------
X : array-like of shape (n_samples, n_features)
    List of samples.
)r   r   r   r   )sizeNrC   r   r   T)squaredg      ?r0   )r   r(   NotImplementedErrorrN   asarrayrP   rZ   r
   uniformrI   rL   astypeint64cumsumsearchsorted
atleast_2dnormalrM   r   r   sqrtnewaxis)r5   	n_samplesrandom_staterZ   rnguicumsum_weightr[   dimrQ   s_sq
corrections                r6   sampleKernelDensity.sample4  s   * 	;;44%''zz$**//* .KK19K-::##+ZZ]"**2884AIIbjj1I1I&JKM&r*J:~>A;;*$==DGT__!EFF[[H$ **Q-C

 0
1AQ-DsC$J/C#I>//"''$-  
 7QArzzM!:::: %r9   )r'   r*   r&   rM   r,   r(   r-   r)   r.   r+   rP   )NNr4   )r   N)__name__
__module____qualname____firstlineno____doc__r   r   r   setr<   keysVALID_KERNELS	itertoolschainr	   r   dictr/   __annotations__r7   r?   r   rT   ra   re   r~   __static_attributes__).0algr	   s   000r6   r   r   &   sA   [~ T1d95-.
 !Y^^%5!6&!ABCc-012IOOINNDT%UDTSmC&8DT%UVW

 $478$478#xD@A!$D , +.  &+4	4l$L-,3;O &Vs   .C'r   ) r   r   numbersr   r   numpyrN   scipy.specialr   baser   r   neighbors._baser	   utilsr
   utils._param_validationr   r   utils.extmathr   utils.validationr   r   r   
_ball_treer   _kd_treer   r   r<   r    r9   r6   <module>r      sT     "  " . + & : % S S    #v6	
A;M A;r9   