
    -irL                    P   S r SSKrSSKrSSKJrJr  SSKJr  SSKJ	r	  SSK
rSSKJrJrJr  SSKJrJr  SS	KJr  SS
KJr  SSKJr  SSKJr  S r " S S\" SS5      5      r " S S\S9r " S S5      r " S S5      r " S S5      r  " S S\5      r! " S S\5      r" " S S\"5      r# " S  S!\"5      r$ " S" S#\5      r% " S$ S%\\ \5      r& " S& S'\\ \5      r' " S( S)\\\5      r( " S* S+\(5      r) " S, S-\\\5      r* " S. S/\\\5      r+ " S0 S1\5      r,S5S2 jr- " S3 S4\5      r.g)6zRA set of kernels that can be combined by operators and used in Gaussian processes.    N)ABCMetaabstractmethod)
namedtuple)	signature)cdistpdist
squareform)gammakv   )clone)ConvergenceWarning)pairwise_kernels)_num_samplesc                 j   [         R                  " U5      R                  [        5      n[         R                  " U5      S:  a  [        S5      e[         R                  " U5      S:X  aJ  U R                  S   UR                  S   :w  a*  [        SUR                  S   U R                  S   4-  5      eU$ )N   z2length_scale cannot be of dimension greater than 1r   zKAnisotropic kernel must have the same number of dimensions as data (%d!=%d))npsqueezeastypefloatndim
ValueErrorshape)Xlength_scales     S/var/www/html/venv/lib/python3.13/site-packages/sklearn/gaussian_process/kernels.py_check_length_scaler   (   s    ::l+2259L	ww|q MNN	ww|!aggajL4F4Fq4I&I*-9-?-?-BAGGAJ,OP
 	
     c                   :   ^  \ rS rSrSrSrSU 4S jjrS rSrU =r	$ )Hyperparameter4   a  A kernel hyperparameter's specification in form of a namedtuple.

.. versionadded:: 0.18

Attributes
----------
name : str
    The name of the hyperparameter. Note that a kernel using a
    hyperparameter with name "x" must have the attributes self.x and
    self.x_bounds

value_type : str
    The type of the hyperparameter. Currently, only "numeric"
    hyperparameters are supported.

bounds : pair of floats >= 0 or "fixed"
    The lower and upper bound on the parameter. If n_elements>1, a pair
    of 1d array with n_elements each may be given alternatively. If
    the string "fixed" is passed as bounds, the hyperparameter's value
    cannot be changed.

n_elements : int, default=1
    The number of elements of the hyperparameter value. Defaults to 1,
    which corresponds to a scalar hyperparameter. n_elements > 1
    corresponds to a hyperparameter which is vector-valued,
    such as, e.g., anisotropic length-scales.

fixed : bool, default=None
    Whether the value of this hyperparameter is fixed, i.e., cannot be
    changed during hyperparameter tuning. If None is passed, the "fixed" is
    derived based on the given bounds.

Examples
--------
>>> from sklearn.gaussian_process.kernels import ConstantKernel
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import Hyperparameter
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ConstantKernel(constant_value=1.0,
...    constant_value_bounds=(0.0, 10.0))

We can access each hyperparameter:

>>> for hyperparameter in kernel.hyperparameters:
...    print(hyperparameter)
Hyperparameter(name='constant_value', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)

>>> params = kernel.get_params()
>>> for key in sorted(params): print(f"{key} : {params[key]}")
constant_value : 1.0
constant_value_bounds : (0.0, 10.0)
 c                   > [        U[        5      (       a  US:w  aw  [        R                  " U5      nUS:  a[  UR                  S   S:X  a  [        R
                  " X4S5      nO0UR                  S   U:w  a  [        SXUR                  S   4-  5      eUc  [        U[        5      =(       a    US:H  n[        TU ]!  XX#XE5      $ )Nfixedr   r   z@Bounds on %s should have either 1 or %d dimensions. Given are %d)	
isinstancestrr   
atleast_2dr   repeatr   super__new__)clsname
value_typebounds
n_elementsr$   	__class__s         r   r*   Hyperparameter.__new__z   s    &#&&&G*;]]6*FA~<<?a'YYv1=F\\!_
2$6V\\!_=>  =vs+A'0AEws*jPPr   c                 \   U R                   UR                   :H  =(       a    U R                  UR                  :H  =(       am    [        R                  " U R                  UR                  :H  5      =(       a9    U R
                  UR
                  :H  =(       a    U R                  UR                  :H  $ N)r,   r-   r   allr.   r/   r$   )selfothers     r   __eq__Hyperparameter.__eq__   s|    II# *5#3#33*t{{ell23* 5#3#33* 

ekk)	
r   )r   N)
__name__
__module____qualname____firstlineno____doc__	__slots__r*   r7   __static_attributes____classcell__r0   s   @r   r    r    4   s     
5~ IQ&
 
r   r    )r,   r-   r.   r/   r$   c                      \ rS rSrSrSS jrS rS r\S 5       r	\S 5       r
\S 5       r\R                  S	 5       r\S
 5       rS rS rS rS rS rS rS r\SS j5       r\S 5       r\S 5       r\S 5       rS rSrg)Kernel   a  Base class for all kernels.

.. versionadded:: 0.18

Examples
--------
>>> from sklearn.gaussian_process.kernels import Kernel, RBF
>>> import numpy as np
>>> class CustomKernel(Kernel):
...     def __init__(self, length_scale=1.0):
...         self.length_scale = length_scale
...     def __call__(self, X, Y=None):
...         if Y is None:
...             Y = X
...         return np.inner(X, X if Y is None else Y) ** 2
...     def diag(self, X):
...         return np.ones(X.shape[0])
...     def is_stationary(self):
...         return True
>>> kernel = CustomKernel(length_scale=2.0)
>>> X = np.array([[1, 2], [3, 4]])
>>> print(kernel(X))
[[ 25 121]
 [121 625]]
c                 4   [        5       nU R                  n[        UR                  SUR                  5      n[	        U5      n/ / pvUR
                  R                  5        H  nUR                  UR                  :w  a+  UR                  S:w  a  UR                  UR                  5        UR                  UR                  :X  d  Md  UR                  UR                  5        M     [        U5      S:w  a  [        SU< S35      eU H  n	[        X	5      X)'   M     U$ )  Get parameters of this kernel.

Parameters
----------
deep : bool, default=True
    If True, will return the parameters for this estimator and
    contained subobjects that are estimators.

Returns
-------
params : dict
    Parameter names mapped to their values.
deprecated_originalr5   r   zmscikit-learn kernels should always specify their parameters in the signature of their __init__ (no varargs). z  doesn't follow this convention.)dictr0   getattr__init__r   
parametersvalueskindVAR_KEYWORDr,   appendVAR_POSITIONALlenRuntimeError)
r5   deepparamsr+   init	init_signargsvarargs	parameterargs
             r   
get_paramsKernel.get_params   s      nns||%:CLLIdO	Bg"--446I~~!6!669>>V;SINN+~~!9!99y~~.	 7 w<1 :=?  C!$,FK  r   c                    U(       d  U $ U R                  SS9nUR                  5        H  u  p4UR                  SS5      n[        U5      S:  a6  Uu  pgXb;  a  [	        SU< SU < S35      eX&   nUR
                  " S0 Xt0D6  M\  X2;  a(  [	        SU< SU R                  R                  < S35      e[        XU5        M     U $ )	a  Set the parameters of this kernel.

The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.

Returns
-------
self
T)rS   __r   zInvalid parameter z for kernel zK. Check the list of available parameters with `kernel.get_params().keys()`.r"   )	r[   itemssplitrQ   r   
set_paramsr0   r9   setattr)	r5   rT   valid_paramskeyvaluer`   r,   sub_name
sub_objects	            r   ra   Kernel.set_params   s     KD1 ,,.JCIIdA&E5zA~!&+$ AEdL 
 */
%%:(9: *$  7 79  5)- ). r   c                 (    [        U 5      nXl        U$ )zReturns a clone of self with given hyperparameters theta.

Parameters
----------
theta : ndarray of shape (n_dims,)
    The hyperparameters
)r   theta)r5   rj   cloneds      r   clone_with_thetaKernel.clone_with_theta  s     tr   c                 4    U R                   R                  S   $ )z>Returns the number of non-fixed hyperparameters of the kernel.r   )rj   r   r5   s    r   n_dimsKernel.n_dims  s     zz""r   c                     [        U 5       Vs/ s H&  nUR                  S5      (       d  M  [        X5      PM(     nnU$ s  snf )z4Returns a list of all hyperparameter specifications.hyperparameter_)dir
startswithrI   )r5   attrrs      r   hyperparametersKernel.hyperparameters  sH    
 D	
!01  GD! 	
 

 
s   ??c                 J   / nU R                  5       nU R                   H3  nUR                  (       a  M  UR                  X#R                     5        M5     [        U5      S:  a*  [        R                  " [        R                  " U5      5      $ [        R                  " / 5      $ )  Returns the (flattened, log-transformed) non-fixed hyperparameters.

Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.

Returns
-------
theta : ndarray of shape (n_dims,)
    The non-fixed, log-transformed hyperparameters of the kernel
r   )
r[   rx   r$   rO   r,   rQ   r   loghstackarray)r5   rj   rT   hyperparameters       r   rj   Kernel.theta  sy     ""22N!'''V$7$789 3 u:>66"))E*++88B<r   c                    U R                  5       nSnU R                   H  nUR                  (       a  M  UR                  S:  a@  [        R
                  " XX4R                  -    5      X$R                  '   X4R                  -  nMf  [        R
                  " X   5      X$R                  '   US-  nM     U[        U5      :w  a  [        SU[        U5      4-  5      eU R                  " S0 UD6  g)Sets the (flattened, log-transformed) non-fixed hyperparameters.

Parameters
----------
theta : ndarray of shape (n_dims,)
    The non-fixed, log-transformed hyperparameters of the kernel
r   r   zGtheta has not the correct number of entries. Should be %d; given are %dNr"   )
r[   rx   r$   r/   r   expr,   rQ   r   ra   )r5   rj   rT   ir   s        r   rj   r   5  s     ""22N##((1,.0ffa";";;</**+ ....0ffUX.>**+Q 3 E
?.12CJ@  	!&!r   c                    U R                    Vs/ s H"  nUR                  (       a  M  UR                  PM$     nn[        U5      S:  a*  [        R
                  " [        R                  " U5      5      $ [        R                  " / 5      $ s  snf Returns the log-transformed bounds on the theta.

Returns
-------
bounds : ndarray of shape (n_dims, 2)
    The log-transformed bounds on the kernel's hyperparameters theta
r   )rx   r$   r.   rQ   r   r|   vstackr~   )r5   r   r.   s      r   r.   Kernel.boundsT  ss     #'"6"6
"6!'' "N!!"6 	 

 v;?66"))F+,,88B<
s
   B	B	c                 l    [        U[        5      (       d  [        U [        U5      5      $ [        X5      $ r3   r%   rC   SumConstantKernelr5   bs     r   __add__Kernel.__add__g  s+    !V$$t^A.//4|r   c                 l    [        U[        5      (       d  [        [        U5      U 5      $ [        X5      $ r3   r   r   s     r   __radd__Kernel.__radd__l  s+    !V$$~a($//1|r   c                 l    [        U[        5      (       d  [        U [        U5      5      $ [        X5      $ r3   r%   rC   Productr   r   s     r   __mul__Kernel.__mul__q  s,    !V$$4!233tr   c                 l    [        U[        5      (       d  [        [        U5      U 5      $ [        X5      $ r3   r   r   s     r   __rmul__Kernel.__rmul__v  s,    !V$$>!,d33qr   c                     [        X5      $ r3   )Exponentiationr   s     r   __pow__Kernel.__pow__{  s    d&&r   c                 |   [        U 5      [        U5      :w  a  gU R                  5       nUR                  5       n[        [        UR	                  5       5      [        UR	                  5       5      -   5       HC  n[
        R                  " UR                  US 5      UR                  US 5      :g  5      (       d  MC    g   g)NFT)typer[   setlistkeysr   anyget)r5   r   params_aparams_brd   s        r   r7   Kernel.__eq__~  s    :a ??$<<>tHMMO,tHMMO/DDECvvhll3-c41HHII F r   c           	          SR                  U R                  R                  SR                  [	        SR                   U R
                  5      5      5      $ )Nz{0}({1}), {0:.3g})formatr0   r9   joinmaprj   ro   s    r   __repr__Kernel.__repr__  s>      NN##TYYs93C3CTZZ/P%Q
 	
r   Nc                     g)zEvaluate the kernel.Nr"   )r5   r   Yeval_gradients       r   __call__Kernel.__call__      r   c                     g)a  Returns the diagonal of the kernel k(X, X).

The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : array-like of shape (n_samples,)
    Left argument of the returned kernel k(X, Y)

Returns
-------
K_diag : ndarray of shape (n_samples_X,)
    Diagonal of kernel k(X, X)
Nr"   r5   r   s     r   diagKernel.diag  r   r   c                     g))Returns whether the kernel is stationary.Nr"   ro   s    r   is_stationaryKernel.is_stationary  r   r   c                     g)zReturns whether the kernel is defined on fixed-length feature
vectors or generic objects. Defaults to True for backward
compatibility.Tr"   ro   s    r   requires_vector_inputKernel.requires_vector_input  s    
 r   c                 ^   [         R                  " U R                  [         R                  " U R                  5      R
                  5      nSnU R                   H  nUR                  (       a  M  [        UR                  5       H  nXS4   (       aC  [        R                  " SU< SUR                  < SUR                  U   S   < S3[        5        OMXS4   (       aB  [        R                  " SU< SUR                  < SUR                  U   S   < S3[        5        US-  nM     M     g	)
z?Called after fitting to warn if bounds may have been too tight.r   z&The optimal value found for dimension z of parameter z' is close to the specified lower bound zE. Decreasing the bound and calling fit again may find a better value.r   z' is close to the specified upper bound zE. Increasing the bound and calling fit again may find a better value.N)r   iscloser.   r'   rj   Trx   r$   ranger/   warningswarnr,   r   )r5   
list_closeidxhypdims        r   _check_bounds_paramsKernel._check_bounds_params  s    ZZR]]4::-F-H-HI
''CyyS^^,1f%MM ,/#**S/!:LN +  Q'MM ,/#**S/!:LN + q+ - (r   r"   TNF)r9   r:   r;   r<   r=   r[   ra   rl   propertyrp   rx   rj   setterr.   r   r   r   r   r   r7   r   r   r   r   r   r   r   r?   r"   r   r   rC   rC      s    4&P&P
 # #      . \\" "<    $

 
 
'

 # #  $ 8 8  r   rC   )	metaclassc                       \ rS rSrSrS rSrg)NormalizedKernelMixini  zKMixin for kernels which are normalized: k(X, X)=1.

.. versionadded:: 0.18
c                 H    [         R                  " UR                  S   5      $ )  Returns the diagonal of the kernel k(X, X).

The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
    Left argument of the returned kernel k(X, Y)

Returns
-------
K_diag : ndarray of shape (n_samples_X,)
    Diagonal of kernel k(X, X)
r   )r   onesr   r   s     r   r   NormalizedKernelMixin.diag  s    " wwqwwqz""r   r"   N)r9   r:   r;   r<   r=   r   r?   r"   r   r   r   r     s    
#r   r   c                       \ rS rSrSrS rSrg)StationaryKernelMixini  zQMixin for kernels which are stationary: k(X, Y)= f(X-Y).

.. versionadded:: 0.18
c                     g)r   Tr"   ro   s    r   r   #StationaryKernelMixin.is_stationary  s    r   r"   N)r9   r:   r;   r<   r=   r   r?   r"   r   r   r   r     s    
r   r   c                   (    \ rS rSrSr\S 5       rSrg)GenericKernelMixini  zMixin for kernels which operate on generic objects such as variable-
length sequences, trees, and graphs.

.. versionadded:: 0.22
c                     g)z>Whether the kernel works only on fixed-length feature vectors.Fr"   ro   s    r   r   (GenericKernelMixin.requires_vector_input  s     r   r"   N)r9   r:   r;   r<   r=   r   r   r?   r"   r   r   r   r     s      r   r   c                       \ rS rSrSrS rSS jr\S 5       r\R                  S 5       r\S 5       r
SS	 jrS
 rS r\S 5       rS rSrg)CompoundKerneli   aS  Kernel which is composed of a set of other kernels.

.. versionadded:: 0.18

Parameters
----------
kernels : list of Kernels
    The other kernels

Examples
--------
>>> from sklearn.gaussian_process.kernels import WhiteKernel
>>> from sklearn.gaussian_process.kernels import RBF
>>> from sklearn.gaussian_process.kernels import CompoundKernel
>>> kernel = CompoundKernel(
...     [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
>>> print(kernel.bounds)
[[-11.51292546  11.51292546]
 [-11.51292546  11.51292546]]
>>> print(kernel.n_dims)
2
>>> print(kernel.theta)
[1.09861229 0.69314718]
c                     Xl         g r3   kernels)r5   r   s     r   rJ   CompoundKernel.__init__  s    r   c                 (    [        U R                  S9$ )rF   r   )rH   r   )r5   rS   s     r   r[   CompoundKernel.get_params  s     DLL))r   c                     [         R                  " U R                   Vs/ s H  oR                  PM     sn5      $ s  snf r{   )r   r}   r   rj   r5   kernels     r   rj   CompoundKernel.theta-  s+     yyT\\B\6,,\BCCB   ;c                     U R                   R                  n[        U R                  5       H  u  p4XU-  US-   U-   Ul        M     g)zSets the (flattened, log-transformed) non-fixed hyperparameters.

Parameters
----------
theta : array of shape (n_dims,)
    The non-fixed, log-transformed hyperparameters of the kernel
r   N)k1rp   	enumerater   rj   )r5   rj   k_dimsr   r   s        r   rj   r   =  s?     "4<<0IA Vq1u.>?FL 1r   c                     [         R                  " U R                   Vs/ s H  oR                  PM     sn5      $ s  snf )zReturns the log-transformed bounds on the theta.

Returns
-------
bounds : array of shape (n_dims, 2)
    The log-transformed bounds on the kernel's hyperparameters theta
)r   r   r   r.   r   s     r   r.   CompoundKernel.boundsJ  s+     yydllClF--lCDDCr   Nc           
         U(       a  / n/ nU R                    HC  nU" XU5      u  pxUR                  U5        UR                  US[        R                  4   5        ME     [        R                  " U5      [        R
                  " US5      4$ [        R                  " U R                    Vs/ s H  of" XU5      PM     sn5      $ s  snf )a  Return the kernel k(X, Y) and optionally its gradient.

Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object,             default=None
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_X, n_features) or list of object,             default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of the
    kernel hyperparameter is computed.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
    Kernel k(X, Y)

K_gradient : ndarray of shape                 (n_samples_X, n_samples_X, n_dims, n_kernels), optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
.   )r   rO   r   newaxisdstackconcatenate)	r5   r   r   r   KK_gradr   K_singleK_grad_singles	            r   r   CompoundKernel.__call__U  s    @ AF,,*0}*E'"mCO<= ' 99Q<!:::99UffQ=9UVVUs   *Cc                 T   [        U 5      [        U5      :w  d,  [        U R                  5      [        UR                  5      :w  a  g[        R                  " [        [        U R                  5      5       Vs/ s H!  o R                  U   UR                  U   :H  PM#     sn5      $ s  snf r   )r   rQ   r   r   r4   r   )r5   r   r   s      r   r7   CompoundKernel.__eq__  sv    :a C$5QYY$Gvv6;C<M6NO6N\\!_		!,6NO
 	
Os   6(B%c                     [         R                  " U R                   Vs/ s H  oR                  5       PM     sn5      $ s  snf r   )r   r4   r   r   r   s     r   r   CompoundKernel.is_stationary  s.    vvDLLIL&++-LIJJIs   ?c                     [         R                  " U R                   Vs/ s H  oR                  PM     sn5      $ s  snf z=Returns whether the kernel is defined on discrete structures.)r   r   r   r   r   s     r   r   $CompoundKernel.requires_vector_input  s-     vv$,,O,33,OPPOr   c                     [         R                  " U R                   Vs/ s H  o"R                  U5      PM     sn5      R                  $ s  snf )a  Returns the diagonal of the kernel k(X, X).

The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Argument to the kernel.

Returns
-------
K_diag : ndarray of shape (n_samples_X, n_kernels)
    Diagonal of kernel k(X, X)
)r   r   r   r   r   )r5   r   r   s      r   r   CompoundKernel.diag  s5    " yyt||D|V++a.|DEGGGDs   A
r   r   r   )r9   r:   r;   r<   r=   rJ   r[   r   rj   r   r.   r   r7   r   r   r   r?   r"   r   r   r   r      s    2*  D D \\
@ 
@ E E)WV
K Q QHr   r   c                       \ rS rSrSrS rSS jr\S 5       r\S 5       r	\	R                  S 5       r	\S 5       rS	 rS
 r\S 5       rSrg)KernelOperatori  z=Base class for all kernel operators.

.. versionadded:: 0.18
c                     Xl         X l        g r3   r   k2)r5   r   r  s      r   rJ   KernelOperator.__init__  s    r   c                 P   [        U R                  U R                  S9nU(       a  U R                  R                  5       R	                  5       nUR                  S U 5       5        U R                  R                  5       R	                  5       nUR                  S U 5       5        U$ )rF   r  c              3   4   #    U  H  u  pS U-   U4v   M     g7f)k1__Nr"   .0kvals      r   	<genexpr>,KernelOperator.get_params.<locals>.<genexpr>       E*6A:s+*   c              3   4   #    U  H  u  pS U-   U4v   M     g7f)k2__Nr"   r  s      r   r  r    r  r  )rH   r   r  r[   r_   updater5   rS   rT   
deep_itemss       r   r[   KernelOperator.get_params  sz     TWW-++-335JMME*EE++-335JMME*EEr   c           	         U R                   R                   Vs/ s H<  n[        SUR                  -   UR                  UR
                  UR                  5      PM>     nnU R                  R                   HK  nUR                  [        SUR                  -   UR                  UR
                  UR                  5      5        MM     U$ s  snf )%Returns a list of all hyperparameter.r  r  )	r   rx   r    r,   r-   r.   r/   r  rO   )r5   r   rw   s      r   rx   KernelOperator.hyperparameters  s     #'''"9"9
 #: ,,,))%%))	 #: 	
 
 #gg55NHH^000"--"))"--	 6 '
s   ACc                     [         R                  " U R                  R                  U R                  R                  5      $ r   )r   rO   r   rj   r  ro   s    r   rj   KernelOperator.theta  s%     yy66r   c                 ~    U R                   R                  nUSU U R                   l        XS U R                  l        gr   N)r   rp   rj   r  )r5   rj   k1_dimss      r   rj   r&    s1     ''..hwhr   c                 j   U R                   R                  R                  S:X  a  U R                  R                  $ U R                  R                  R                  S:X  a  U R                   R                  $ [        R
                  " U R                   R                  U R                  R                  45      $ r   )r   r.   sizer  r   r   ro   s    r   r.   KernelOperator.bounds  sp     77>>!#77>>!77>>!#77>>!yy$''..$''..9::r   c                 &   [        U 5      [        U5      :w  a  gU R                  UR                  :H  =(       a    U R                  UR                  :H  =(       d9    U R                  UR                  :H  =(       a    U R                  UR                  :H  $ r   )r   r   r  r   s     r   r7   KernelOperator.__eq__	  s_    :a 1443DGGqttO 
GGqttO/144	
r   c                 x    U R                   R                  5       =(       a    U R                  R                  5       $ r  )r   r   r  ro   s    r   r   KernelOperator.is_stationary  s'    ww$$&B477+@+@+BBr   c                 h    U R                   R                  =(       d    U R                  R                  $ r  )r   r   r  ro   s    r   r   $KernelOperator.requires_vector_input  s#     ww,,M0M0MMr   r  Nr   )r9   r:   r;   r<   r=   rJ   r[   r   rx   rj   r   r.   r7   r   r   r?   r"   r   r   r  r    s    
.  . 7 7 \\
( 
( ; ;
C N Nr   r  c                   .    \ rS rSrSrSS jrS rS rSrg)	r   i  a  The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via

.. math::
    k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)

Note that the `__add__` magic method is overridden, so
`Sum(RBF(), RBF())` is equivalent to using the + operator
with `RBF() + RBF()`.


Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
k1 : Kernel
    The first base-kernel of the sum-kernel

k2 : Kernel
    The second base-kernel of the sum-kernel

Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Sum(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 + RBF(length_scale=1)
Nc                     U(       a?  U R                  XSS9u  pEU R                  XSS9u  pgXF-   [        R                  " XW45      4$ U R                  X5      U R                  X5      -   $ )aY  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_X, n_features) or list of object,                default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
Tr   )r   r  r   r   r5   r   r   r   K1K1_gradientK2K2_gradients           r   r   Sum.__call__A  sc    8 "gga$g?OB"gga$g?OB7BII{&@AAA771=4771=00r   c                 p    U R                   R                  U5      U R                  R                  U5      -   $ )a  Returns the diagonal of the kernel k(X, X).

The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Argument to the kernel.

Returns
-------
K_diag : ndarray of shape (n_samples_X,)
    Diagonal of kernel k(X, X)
r   r   r  r   s     r   r   Sum.diagd  '    " ww||Aa00r   c                 N    SR                  U R                  U R                  5      $ )Nz	{0} + {1}r   r   r  ro   s    r   r   Sum.__repr__w      !!$''47733r   r"   r   	r9   r:   r;   r<   r=   r   r   r   r?   r"   r   r   r   r     s    $L!1F1&4r   r   c                   .    \ rS rSrSrSS jrS rS rSrg)	r   i{  a  The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via

.. math::
    k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)

Note that the `__mul__` magic method is overridden, so
`Product(RBF(), RBF())` is equivalent to using the * operator
with `RBF() * RBF()`.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
k1 : Kernel
    The first base-kernel of the product-kernel

k2 : Kernel
    The second base-kernel of the product-kernel


Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RBF, Product,
...            ConstantKernel)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Product(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 * RBF(length_scale=1)
Nc           	      >   U(       at  U R                  XSS9u  pEU R                  XSS9u  pgXF-  [        R                  " XVSS2SS2[        R                  4   -  XtSS2SS2[        R                  4   -  45      4$ U R                  X5      U R                  X5      -  $ )aV  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_Y, n_features) or list of object,            default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                 optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
Tr5  N)r   r  r   r   r   r6  s           r   r   Product.__call__  s    8 "gga$g?OB"gga$g?OB7BII!Q

"233[aBJJFVCW5WX   771=4771=00r   c                 p    U R                   R                  U5      U R                  R                  U5      -  $   Returns the diagonal of the kernel k(X, X).

The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Argument to the kernel.

Returns
-------
K_diag : ndarray of shape (n_samples_X,)
    Diagonal of kernel k(X, X)
r=  r   s     r   r   Product.diag  r?  r   c                 N    SR                  U R                  U R                  5      $ )Nz	{0} * {1}rA  ro   s    r   r   Product.__repr__  rC  r   r"   r   rD  r"   r   r   r   r   {  s    %N#1J1&4r   r   c                       \ rS rSrSrS rSS jr\S 5       r\S 5       r	\	R                  S 5       r	\S 5       rS	 rSS jrS rS rS r\S 5       rSrg
)r   i  a
  The Exponentiation kernel takes one base kernel and a scalar parameter
:math:`p` and combines them via

.. math::
    k_{exp}(X, Y) = k(X, Y) ^p

Note that the `__pow__` magic method is overridden, so
`Exponentiation(RBF(), 2)` is equivalent to using the ** operator
with `RBF() ** 2`.


Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
kernel : Kernel
    The base kernel

exponent : float
    The exponent for the base kernel


Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
...            Exponentiation)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.419
>>> gpr.predict(X[:1,:], return_std=True)
(array([635.5]), array([0.559]))
c                     Xl         X l        g r3   r   exponent)r5   r   rQ  s      r   rJ   Exponentiation.__init__  s     r   c                     [        U R                  U R                  S9nU(       a@  U R                  R                  5       R	                  5       nUR                  S U 5       5        U$ )rF   rP  c              3   4   #    U  H  u  pS U-   U4v   M     g7f)kernel__Nr"   r  s      r   r  ,Exponentiation.get_params.<locals>.<genexpr>  s     IjFA:>3/jr  )rH   r   rQ  r[   r_   r  r  s       r   r[   Exponentiation.get_params  sM     T[[4==A//1779JMMIjIIr   c           	          / nU R                   R                   HK  nUR                  [        SUR                  -   UR
                  UR                  UR                  5      5        MM     U$ )r#  rU  )r   rx   rO   r    r,   r-   r.   r/   )r5   rw   r   s      r   rx   Exponentiation.hyperparameters   sa     "kk99NHH!4!44"--"))"--	 : r   c                 .    U R                   R                  $ r   r   rj   ro   s    r   rj   Exponentiation.theta/  s     {{   r   c                 $    XR                   l        gr(  r[  )r5   rj   s     r   rj   r\  ?  s     "r   c                 .    U R                   R                  $ )r   )r   r.   ro   s    r   r.   Exponentiation.boundsJ  s     {{!!!r   c                     [        U 5      [        U5      :w  a  gU R                  UR                  :H  =(       a    U R                  UR                  :H  $ r   )r   r   rQ  r   s     r   r7   Exponentiation.__eq__U  s:    :a {{ahh&F4==AJJ+FFr   Nc                    U(       a[  U R                  XSS9u  pEXPR                  USS2SS2[        R                  4   U R                  S-
  -  -  -  nX@R                  -  U4$ U R                  XSS9nX@R                  -  $ )aU  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_Y, n_features) or list of object,            default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
Tr5  Nr   F)r   rQ  r   r   r5   r   r   r   r   
K_gradients         r   r   Exponentiation.__call__Z  s|    8  KKDKAMA--!Aq"**,<*=$--RSBS*TTTJmm#Z//A6Amm##r   c                 R    U R                   R                  U5      U R                  -  $ rI  )r   r   rQ  r   s     r   r   Exponentiation.diag~  s"    " {{"dmm33r   c                 N    SR                  U R                  U R                  5      $ )Nz
{0} ** {1})r   r   rQ  ro   s    r   r   Exponentiation.__repr__  s    ""4;;>>r   c                 6    U R                   R                  5       $ r  )r   r   ro   s    r   r   Exponentiation.is_stationary  s    {{((**r   c                 .    U R                   R                  $ r  )r   r   ro   s    r   r   $Exponentiation.requires_vector_input  s     {{000r   )rQ  r   r   r   )r9   r:   r;   r<   r=   rJ   r[   r   rx   rj   r   r.   r7   r   r   r   r   r   r?   r"   r   r   r   r     s    &P!(   ! ! \\" " " "G
"$H4&?+ 1 1r   r   c                   H    \ rS rSrSrS
S jr\S 5       rSS jrS r	S r
S	rg)r   i  ap  Constant kernel.

Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.

.. math::
    k(x_1, x_2) = constant\_value \;\forall\; x_1, x_2

Adding a constant kernel is equivalent to adding a constant::

        kernel = RBF() + ConstantKernel(constant_value=2)

is the same as::

        kernel = RBF() + 2


Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
constant_value : float, default=1.0
    The constant value which defines the covariance:
    k(x_1, x_2) = constant_value

constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on `constant_value`.
    If set to "fixed", `constant_value` cannot be changed during
    hyperparameter tuning.

Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = RBF() + ConstantKernel(constant_value=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3696
>>> gpr.predict(X[:1,:], return_std=True)
(array([606.1]), array([0.248]))
c                     Xl         X l        g r3   constant_valueconstant_value_bounds)r5   rq  rr  s      r   rJ   ConstantKernel.__init__  s    ,%:"r   c                 0    [        SSU R                  5      $ )Nrq  numeric)r    rr  ro   s    r   hyperparameter_constant_value,ConstantKernel.hyperparameter_constant_value  s    .	4;U;UVVr   Nc                 J   Uc  UnOU(       a  [        S5      e[        R                  " [        U5      [        U5      4U R                  [        R
                  " U R                  5      R                  S9nU(       a  U R                  R                  (       d_  U[        R                  " [        U5      [        U5      S4U R                  [        R
                  " U R                  5      R                  S94$ U[        R                  " [        U5      [        U5      S45      4$ U$ )at  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_X, n_features) or list of object,             default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.
    Only supported when Y is None.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),             optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when eval_gradient
    is True.
.Gradient can only be evaluated when Y is None.dtyper   r   )
r   r   fullr   rq  r~   r{  rv  r$   emptyr5   r   r   r   r   s        r   r   ConstantKernel.__call__  s    : 9AMNNGG!_l1o.((4../55

 55;;GG%a,q/1=++ hht':':;AA  "((LO\!_a#HIIIHr   c                     [         R                  " [        U5      U R                  [         R                  " U R                  5      R
                  S9$ rJ  rz  )r   r|  r   rq  r~   r{  r   s     r   r   ConstantKernel.diag  s=    " wwO((4../55
 	
r   c                 `    SR                  [        R                  " U R                  5      5      $ )Nz
{0:.3g}**2)r   r   sqrtrq  ro   s    r   r   ConstantKernel.__repr__$  s"    ""2774+>+>#?@@r   rp        ?gh㈵>g     j@r   )r9   r:   r;   r<   r=   rJ   r   rv  r   r   r   r?   r"   r   r   r   r     s4    .`; W W4l
.Ar   r   c                   H    \ rS rSrSrS
S jr\S 5       rSS jrS r	S r
S	rg)WhiteKerneli(  a  White kernel.

The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.

.. math::
    k(x_1, x_2) = noise\_level \text{ if } x_i == x_j \text{ else } 0


Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
noise_level : float, default=1.0
    Parameter controlling the noise level (variance)

noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'noise_level'.
    If set to "fixed", 'noise_level' cannot be changed during
    hyperparameter tuning.

Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
>>> gpr = GaussianProcessRegressor(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0, 592.1 ]), array([316.6, 316.6]))
c                     Xl         X l        g r3   noise_levelnoise_level_bounds)r5   r  r  s      r   rJ   WhiteKernel.__init__Q  s    &"4r   c                 0    [        SSU R                  5      $ )Nr  ru  )r    r  ro   s    r   hyperparameter_noise_level&WhiteKernel.hyperparameter_noise_levelU      mY8O8OPPr   Nc                    Ub  U(       a  [        S5      eUc  U R                  [        R                  " [	        U5      5      -  nU(       a  U R
                  R                  (       dF  UU R                  [        R                  " [	        U5      5      SS2SS2[        R                  4   -  4$ U[        R                  " [	        U5      [	        U5      S45      4$ U$ [        R                  " [	        U5      [	        U5      45      $ )ar  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
    Left argument of the returned kernel k(X, Y)

Y : array-like of shape (n_samples_X, n_features) or list of object,            default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    is evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.
    Only supported when Y is None.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),            optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when eval_gradient
    is True.
Nry  r   )
r   r  r   eyer   r  r$   r   r}  zerosr~  s        r   r   WhiteKernel.__call__Y  s    : =]MNN9  266,q/#::A66<<((266,q/+B1aCS+TT 
 bhhQa!'LMMM88\!_l1o>??r   c                     [         R                  " [        U5      U R                  [         R                  " U R                  5      R
                  S9$ r  )r   r|  r   r  r~   r{  r   s     r   r   WhiteKernel.diag  s;    " wwOT--RXXd>N>N5O5U5U
 	
r   c                 b    SR                  U R                  R                  U R                  5      $ )Nz{0}(noise_level={1:.3g}))r   r0   r9   r  ro   s    r   r   WhiteKernel.__repr__  s*    )00NN##T%5%5
 	
r   r  r  r   )r9   r:   r;   r<   r=   rJ   r   r  r   r   r   r?   r"   r   r   r  r  (  s4    &P5 Q Q-@^
*
r   r  c                   R    \ rS rSrSrS
S jr\S 5       r\S 5       rSS jr	S r
S	rg)RBFi  a  Radial basis function kernel (aka squared-exponential kernel).

The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length scale
parameter :math:`l>0`, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:

.. math::
    k(x_i, x_j) = \exp\left(- \frac{d(x_i, x_j)^2}{2l^2} \right)

where :math:`l` is the length scale of the kernel and
:math:`d(\cdot,\cdot)` is the Euclidean distance.
For advice on how to set the length scale parameter, see e.g. [1]_.

This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
    The length scale of the kernel. If a float, an isotropic kernel is
    used. If an array, an anisotropic kernel is used where each dimension
    of l defines the length-scale of the respective feature dimension.

length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'length_scale'.
    If set to "fixed", 'length_scale' cannot be changed during
    hyperparameter tuning.

References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
    Advice on Covariance functions".
    <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_

.. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
    "Gaussian Processes for Machine Learning". The MIT Press.
    <http://www.gaussianprocess.org/gpml/>`_

Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866
>>> gpc.predict_proba(X[:2,:])
array([[0.8354, 0.03228, 0.1322],
       [0.7906, 0.0652, 0.1441]])
c                     Xl         X l        g r3   r   length_scale_bounds)r5   r   r  s      r   rJ   RBF.__init__  s    (#6 r   c                     [         R                  " U R                  5      =(       a    [        U R                  5      S:  $ )Nr   )r   iterabler   rQ   ro   s    r   anisotropicRBF.anisotropic  s,    {{4,,-L#d6G6G2H12LLr   c                     U R                   (       a+  [        SSU R                  [        U R                  5      5      $ [        SSU R                  5      $ Nr   ru  )r  r    r  rQ   r   ro   s    r   hyperparameter_length_scaleRBF.hyperparameter_length_scale  sL    !((D%%&	  ni9Q9QRRr   Nc                    [         R                  " U5      n[        XR                  5      nUcH  [	        X-  SS9n[         R
                  " SU-  5      n[        U5      n[         R                  " US5        O:U(       a  [        S5      e[        X-  X$-  SS9n[         R
                  " SU-  5      nU(       Ga  U R                  R                  (       a5  U[         R                  " UR                  S   UR                  S   S45      4$ U R                  (       a  UR                  S   S:X  a)  U[        U5      -  SS2SS2[         R                  4   nXg4$ U R                  (       aX  USS2[         R                  SS24   U[         R                  SS2SS24   -
  S-  US-  -  nXvS	[         R                  4   -  nXg4$ gU$ )
D  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
    Left argument of the returned kernel k(X, Y)

Y : ndarray of shape (n_samples_Y, n_features), default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    if evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.
    Only supported when Y is None.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                 optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
Nsqeuclideanmetricg      r   ry  r   r   .)r   r'   r   r   r   r   r	   fill_diagonalr   r   r  r$   r}  r   r  r   )r5   r   r   r   r   distsr   rd  s           r   r   RBF.__call__  s   8 MM!*1.?.?@9!*=AEte|$A1AQ" !QRR!*A,<]SEte|$A//55"((AGGAJ
A#>???%%););A)>!)C*U"33Q2::5EF
}$!!2::q 01Abjj!Q6F4GGAM !O
 RZZ00
}$ " Hr   c           	      X   U R                   (       aS  SR                  U R                  R                  SR	                  [        SR                  U R                  5      5      5      $ SR                  U R                  R                  [        R                  " U R                  5      S   5      $ )Nz{0}(length_scale=[{1}])r   r   z{0}(length_scale={1:.3g})r   )	r  r   r0   r9   r   r   r   r   ravelro   s    r   r   RBF.__repr__0  s    ,33''		#i..0A0ABC 
 /55''$2C2C)DQ)G r   r  r  r   )r9   r:   r;   r<   r=   rJ   r   r  r  r   r   r?   r"   r   r   r  r    sD    <|7 M M S S9v	r   r  c                   @   ^  \ rS rSrSrSU 4S jjrSS jrS rSrU =r	$ )	Materni<  u  Matern kernel.

The class of Matern kernels is a generalization of the :class:`RBF`.
It has an additional parameter :math:`\nu` which controls the
smoothness of the resulting function. The smaller :math:`\nu`,
the less smooth the approximated function is.
As :math:`\nu\rightarrow\infty`, the kernel becomes equivalent to
the :class:`RBF` kernel. When :math:`\nu = 1/2`, the Matérn kernel
becomes identical to the absolute exponential kernel.
Important intermediate values are
:math:`\nu=1.5` (once differentiable functions)
and :math:`\nu=2.5` (twice differentiable functions).

The kernel is given by:

.. math::
     k(x_i, x_j) =  \frac{1}{\Gamma(\nu)2^{\nu-1}}\Bigg(
     \frac{\sqrt{2\nu}}{l} d(x_i , x_j )
     \Bigg)^\nu K_\nu\Bigg(
     \frac{\sqrt{2\nu}}{l} d(x_i , x_j )\Bigg)



where :math:`d(\cdot,\cdot)` is the Euclidean distance,
:math:`K_{\nu}(\cdot)` is a modified Bessel function and
:math:`\Gamma(\cdot)` is the gamma function.
See [1]_, Chapter 4, Section 4.2, for details regarding the different
variants of the Matern kernel.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
    The length scale of the kernel. If a float, an isotropic kernel is
    used. If an array, an anisotropic kernel is used where each dimension
    of l defines the length-scale of the respective feature dimension.

length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'length_scale'.
    If set to "fixed", 'length_scale' cannot be changed during
    hyperparameter tuning.

nu : float, default=1.5
    The parameter nu controlling the smoothness of the learned function.
    The smaller nu, the less smooth the approximated function is.
    For nu=inf, the kernel becomes equivalent to the RBF kernel and for
    nu=0.5 to the absolute exponential kernel. Important intermediate
    values are nu=1.5 (once differentiable functions) and nu=2.5
    (twice differentiable functions). Note that values of nu not in
    [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
    (appr. 10 times higher) since they require to evaluate the modified
    Bessel function. Furthermore, in contrast to l, nu is kept fixed to
    its initial value and not optimized.

References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
    "Gaussian Processes for Machine Learning". The MIT Press.
    <http://www.gaussianprocess.org/gpml/>`_

Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866
>>> gpc.predict_proba(X[:2,:])
array([[0.8513, 0.0368, 0.1117],
        [0.8086, 0.0693, 0.1220]])
c                 0   > [         TU ]  X5        X0l        g r3   )r)   rJ   nu)r5   r   r  r  r0   s       r   rJ   Matern.__init__  s    ;r   c           	      4
  ^ ^^ [         R                  " T5      m[        TT R                  5      nTc  [	        TU-  SS9nO#U(       a  [        S5      e[        TU-  TU-  SS9nT R                  S:X  a  [         R                  " U* 5      nGOT R                  S:X  a8  U[        R                  " S5      -  nSU-   [         R                  " U* 5      -  nGOFT R                  S	:X  a@  U[        R                  " S
5      -  nSU-   US-  S-  -   [         R                  " U* 5      -  nOT R                  [         R                  :X  a  [         R                  " US-  * S-  5      nOUnXfS:H  ==   [         R                  " [        5      R                  -  ss'   [        R                  " ST R                  -  5      U-  nUR                  SST R                  -
  -  [!        T R                  5      -  5        XgT R                  -  -  nU[#        T R                  U5      -  nTc"  [%        U5      n[         R&                  " US5        U(       Ga  T R(                  R*                  (       a6  [         R,                  " TR.                  S   TR.                  S   S45      nXh4$ T R0                  (       a?  TSS2[         R2                  SS24   T[         R2                  SS2SS24   -
  S-  US-  -  n	O&[%        US-  5      SS2SS2[         R2                  4   n	T R                  S:X  a  [         R                  " U	R5                  SS95      SS2SS2[         R2                  4   n
[         R6                  " U	5      n[         R8                  " U	U
UU
S:g  S9  US[         R2                  4   U-  nGO-T R                  S:X  aW  SU	-  [         R                  " [         R                  " SU	R5                  S5      -  5      * 5      S[         R2                  4   -  nOT R                  S	:X  a_  [         R                  " S
U	R5                  S5      -  5      S[         R2                  4   nSU	-  US-   -  [         R                  " U* 5      -  nOWT R                  [         R                  :X  a  XS[         R2                  4   -  nO!UUU 4S jnU[;        T R<                  US5      4$ T R0                  (       d3  XhSS2SS24   R5                  S5      SS2SS2[         R2                  4   4$ Xh4$ U$ )r  N	euclideanr  ry  g      ?      ?r   r  g      @   r   g      @g       @        r   r   )axis)outwhere.g?c                 4   > TR                  U 5      " TT5      $ r3   )rl   )rj   r   r   r5   s    r   fMatern.__call__.<locals>.f  s    0071==r   绽|=)r   r'   r   r   r   r   r   r  r   mathr  inffinfor   epsfillr
   r   r	   r  r  r$   r}  r   r  r   sum
zeros_likedivide_approx_fprimerj   )r5   r   r   r   r   r  r   tmprd  Ddenominatordivide_resultr  s   ```          r   r   Matern.__call__  s   8 MM!*1d.?.?@9!l*;?E !QRR!l*A,<[QE77c>vAWW^		!$AqBFFA2J&AWW^		!$Aq1a4#:%3AWW{S()AA3hK288E?...K))AK(1,CFFA#-(E$''N:;dggADGGS!!A91AQ"//55XXqwwqz1771:q&AB
}$ q"**a'(1RZZA-=+>>1DVWXuax(Arzz)9:ww#~ ggaeeem4Q2::5EF "a 0		%%*	 sBJJ/-?
CURVVRWWQr]-C,C%DS"**_%UU
Cgga!%%)m,S"**_=&]cAg6E
BFF"3

?!33
> .Q>>>##QT*..r21a3CDDD}$Hr   c           	         U R                   (       a^  SR                  U R                  R                  SR	                  [        SR                  U R                  5      5      U R                  5      $ SR                  U R                  R                  [        R                  " U R                  5      S   U R                  5      $ )Nz#{0}(length_scale=[{1}], nu={2:.3g})r   r   z%{0}(length_scale={1:.3g}, nu={2:.3g})r   )
r  r   r0   r9   r   r   r   r  r   r  ro   s    r   r   Matern.__repr__  s    8??''		#i..0A0ABC  ;AA''$2C2C)DQ)G r   )r  )r  r  r  r   )
r9   r:   r;   r<   r=   rJ   r   r   r?   r@   rA   s   @r   r  r  <  s     M^eN
 
r   r  c                   Z    \ rS rSrSr    S
S jr\S 5       r\S 5       rSS jr	S r
S	rg)RationalQuadratici  a  Rational Quadratic kernel.

The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length scales. It is
parameterized by a length scale parameter :math:`l>0` and a scale
mixture parameter :math:`\alpha>0`. Only the isotropic variant
where length_scale :math:`l` is a scalar is supported at the moment.
The kernel is given by:

.. math::
    k(x_i, x_j) = \left(
    1 + \frac{d(x_i, x_j)^2 }{ 2\alpha  l^2}\right)^{-\alpha}

where :math:`\alpha` is the scale mixture parameter, :math:`l` is
the length scale of the kernel and :math:`d(\cdot,\cdot)` is the
Euclidean distance.
For advice on how to set the parameters, see e.g. [1]_.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
length_scale : float > 0, default=1.0
    The length scale of the kernel.

alpha : float > 0, default=1.0
    Scale mixture parameter

length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'length_scale'.
    If set to "fixed", 'length_scale' cannot be changed during
    hyperparameter tuning.

alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'alpha'.
    If set to "fixed", 'alpha' cannot be changed during
    hyperparameter tuning.

References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
    Advice on Covariance functions".
    <https://www.cs.toronto.edu/~duvenaud/cookbook/>`_

Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RationalQuadratic
>>> X, y = load_iris(return_X_y=True)
>>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733
>>> gpc.predict_proba(X[:2,:])
array([[0.8881, 0.0566, 0.05518],
        [0.8678, 0.0707 , 0.0614]])
c                 4    Xl         X l        X0l        X@l        g r3   )r   alphar  alpha_bounds)r5   r   r  r  r  s        r   rJ   RationalQuadratic.__init__C  s     )
#6 (r   c                 0    [        SSU R                  5      $ r  r    r  ro   s    r   r  -RationalQuadratic.hyperparameter_length_scaleO  s    ni9Q9QRRr   c                 0    [        SSU R                  5      $ )Nr  ru  )r    r  ro   s    r   hyperparameter_alpha&RationalQuadratic.hyperparameter_alphaS      gy$2C2CDDr   Nc                    [        [        R                  " U R                  5      5      S:  a  [	        S5      e[        R
                  " U5      nUca  [        [        USS95      nUSU R                  -  U R                  S-  -  -  nSU-   nX`R                  * -  n[        R                  " US5        OOU(       a  [        S5      e[        XSS9nSUSU R                  -  U R                  S-  -  -  -   U R                  * -  nU(       GaB  U R                  R                  (       d2  XG-  U R                  S-  W-  -  nUSS2SS2[        R                  4   nO3[        R                  " UR                   S   UR                   S   S45      nU R"                  R                  (       d[  UU R                  * [        R$                  " W5      -  USU R                  S-  -  U-  -  -   -  n	U	SS2SS2[        R                  4   n	O3[        R                  " UR                   S   UR                   S   S45      n	U[        R&                  " X45      4$ U$ )	a(  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
    Left argument of the returned kernel k(X, Y)

Y : ndarray of shape (n_samples_Y, n_features), default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    if evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.
    Only supported when Y is None.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when eval_gradient
    is True.
r   zeRationalQuadratic kernel only supports isotropic version, please use a single scalar for length_scaleNr  r  r   ry  r   )rQ   r   
atleast_1dr   AttributeErrorr'   r	   r   r  r  r   r   r  r$   r   r}  r   r  r|   r   )
r5   r   r   r   r  r  baser   length_scale_gradientalpha_gradients
             r   r   RationalQuadratic.__call__W  s   6 r}}T../014 >  MM!9uQ}=>E1tzz>D,=,=q,@@ACs7Dzzk!AQ" !QRR!}5EUa$**nt/@/@!/CCDD$**TA3399(-	T5F5F5ID5P(Q%(=aBJJ>N(O%(*!''!*aggaj!1L(M% ,,22!"ZZK"&&,.q4#4#4a#77$>?@" "01bjj0@!A!#1771:qwwqz1*E!Fbii GHHHHr   c                 x    SR                  U R                  R                  U R                  U R                  5      $ )Nz({0}(alpha={1:.3g}, length_scale={2:.3g}))r   r0   r9   r  r   ro   s    r   r   RationalQuadratic.__repr__  s0    9@@NN##TZZ1B1B
 	
r   )r  r  r   r  r  r  r  r  r   )r9   r:   r;   r<   r=   rJ   r   r  r  r   r   r?   r"   r   r   r  r    sS    <@ ' 
) S S E EAF
r   r  c                   Z    \ rS rSrSr    S
S jr\S 5       r\S 5       rSS jr	S r
S	rg)ExpSineSquaredi  a  Exp-Sine-Squared kernel (aka periodic kernel).

The ExpSineSquared kernel allows one to model functions which repeat
themselves exactly. It is parameterized by a length scale
parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
Only the isotropic variant where :math:`l` is a scalar is
supported at the moment. The kernel is given by:

.. math::
    k(x_i, x_j) = \text{exp}\left(-
    \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)

where :math:`l` is the length scale of the kernel, :math:`p` the
periodicity of the kernel and :math:`d(\cdot,\cdot)` is the
Euclidean distance.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------

length_scale : float > 0, default=1.0
    The length scale of the kernel.

periodicity : float > 0, default=1.0
    The periodicity of the kernel.

length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'length_scale'.
    If set to "fixed", 'length_scale' cannot be changed during
    hyperparameter tuning.

periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'periodicity'.
    If set to "fixed", 'periodicity' cannot be changed during
    hyperparameter tuning.

Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import ExpSineSquared
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.0144
>>> gpr.predict(X[:2,:], return_std=True)
(array([425.6, 457.5]), array([0.3894, 0.3467]))
c                 4    Xl         X l        X0l        X@l        g r3   )r   periodicityr  periodicity_bounds)r5   r   r  r  r  s        r   rJ   ExpSineSquared.__init__  s     )&#6 "4r   c                 0    [        SSU R                  5      $ )zReturns the length scaler   ru  r  ro   s    r   r  *ExpSineSquared.hyperparameter_length_scale  s     ni9Q9QRRr   c                 0    [        SSU R                  5      $ )Nr  ru  )r    r  ro   s    r   hyperparameter_periodicity)ExpSineSquared.hyperparameter_periodicity  r  r   Nc                    [         R                  " U5      nUcr  [        [        USS95      n[         R                  U-  U R
                  -  n[         R                  " U5      n[         R                  " SX`R                  -  S-  -  5      nOwU(       a  [        S5      e[        XSS9n[         R                  " S[         R                  " [         R                  U R
                  -  U-  5      U R                  -  S-  -  5      nU(       Ga:  [         R                  " W5      nU R                  R                  (       d6  SU R                  S-  -  WS-  -  U-  n	U	SS2SS2[         R                  4   n	O3[         R                  " UR                   S   UR                   S	   S45      n	U R"                  R                  (       d9  SU-  U R                  S-  -  U-  W-  U-  n
U
SS2SS2[         R                  4   n
O3[         R                  " UR                   S   UR                   S	   S45      n
U[         R$                  " X45      4$ U$ )
r  Nr  r  r   ry     r   r   )r   r'   r	   r   pir  sinr   r   r   r   cosr  r$   r   r}  r   r  r   )r5   r   r   r   r  rZ   
sin_of_argr   
cos_of_argr  periodicity_gradients              r   r   ExpSineSquared.__call__  s   8 MM!9uQ{;<E%%%-$"2"22CJrZ*;*;;AABA !QRR!{3EbffRUUT%5%55=>ARARRWXXXA J3399()D,=,=q,@(@:q=(PST(T%(=aBJJ>N(O%(*!''!*aggaj!1L(M%2288Gd//22Z?*LqP % (<Aq"**<L'M$')xxQWWQZ0K'L$bii!6 MNNNHr   c                 x    SR                  U R                  R                  U R                  U R                  5      $ )Nz.{0}(length_scale={1:.3g}, periodicity={2:.3g}))r   r0   r9   r   r  ro   s    r   r   ExpSineSquared.__repr__+  s2    ?FFNN##T%6%68H8H
 	
r   )r   r  r  r  r  r   )r9   r:   r;   r<   r=   rJ   r   r  r  r   r   r?   r"   r   r   r  r    sR    4p '&
5 S S Q Q=~
r   r  c                   N    \ rS rSrSrSS jr\S 5       rSS jrS r	S r
S	 rS
rg)
DotProducti1  a  Dot-Product kernel.

The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting :math:`N(0, 1)` priors on the coefficients
of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
on the bias. The DotProduct kernel is invariant to a rotation of
the coordinates about the origin, but not translations.
It is parameterized by a parameter sigma_0 :math:`\sigma`
which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by

.. math::
    k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j

The DotProduct kernel is commonly combined with exponentiation.

See [1]_, Chapter 4, Section 4.2, for further details regarding the
DotProduct kernel.

Read more in the :ref:`User Guide <gp_kernels>`.

.. versionadded:: 0.18

Parameters
----------
sigma_0 : float >= 0, default=1.0
    Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
    the kernel is homogeneous.

sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'sigma_0'.
    If set to "fixed", 'sigma_0' cannot be changed during
    hyperparameter tuning.

References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
    "Gaussian Processes for Machine Learning". The MIT Press.
    <http://www.gaussianprocess.org/gpml/>`_

Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0, 592.1]), array([316.6, 316.6]))
c                     Xl         X l        g r3   sigma_0sigma_0_bounds)r5   r	  r
  s      r   rJ   DotProduct.__init__j  s    ,r   c                 0    [        SSU R                  5      $ )Nr	  ru  )r    r
  ro   s    r   hyperparameter_sigma_0!DotProduct.hyperparameter_sigma_0n  s    iD4G4GHHr   Nc                 :   [         R                  " U5      nUc'  [         R                  " X5      U R                  S-  -   nO8U(       a  [	        S5      e[         R                  " X5      U R                  S-  -   nU(       a  U R
                  R                  (       dK  [         R                  " UR                  S   UR                  S   S45      nSU R                  S-  -  US'   XE4$ U[         R                  " UR                  S   UR                  S   S45      4$ U$ )C  Return the kernel k(X, Y) and optionally its gradient.

Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
    Left argument of the returned kernel k(X, Y)

Y : ndarray of shape (n_samples_Y, n_features), default=None
    Right argument of the returned kernel k(X, Y). If None, k(X, X)
    if evaluated instead.

eval_gradient : bool, default=False
    Determines whether the gradient with respect to the log of
    the kernel hyperparameter is computed.
    Only supported when Y is None.

Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
    Kernel k(X, Y)

K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),                optional
    The gradient of the kernel k(X, X) with respect to the log of the
    hyperparameter of the kernel. Only returned when `eval_gradient`
    is True.
r   ry  r   r   ).r   )	r   r'   innerr	  r   r  r$   r}  r   rc  s         r   r   DotProduct.__call__r  s    8 MM!9q0A !QRRq0A..44XXqwwqz1771:q&AB
%&q%8
6"}$"((AGGAJ
A#>???Hr   c                 P    [         R                  " SX5      U R                  S-  -   $ )a  Returns the diagonal of the kernel k(X, X).

The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.

Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
    Left argument of the returned kernel k(X, Y).

Returns
-------
K_diag : ndarray of shape (n_samples_X,)
    Diagonal of kernel k(X, X).
zij,ij->ir   )r   einsumr	  r   s     r   r   DotProduct.diag  s"    " yyQ*T\\1_<<r   c                     g)r   Fr"   ro   s    r   r   DotProduct.is_stationary  s    r   c                 b    SR                  U R                  R                  U R                  5      $ )Nz{0}(sigma_0={1:.3g}))r   r0   r9   r	  ro   s    r   r   DotProduct.__repr__  s#    %,,T^^-D-DdllSSr   r  r  r   )r9   r:   r;   r<   r=   rJ   r   r  r   r   r   r   r?   r"   r   r   r  r  1  s9    6p- I I,\=&Tr   r  c                 l   U" U 4U-   6 n[         R                  " UR                  S   UR                  S   [        U 5      4[        5      n[         R                  " [        U 5      4[        5      n[        [        U 5      5       H,  nSXg'   X&-  nU" X-   4U-   6 U-
  X   -  US S 2S S 2U4'   SXg'   M.     U$ )Nr   r   r  r  )r   r  r   rQ   r   r   )	xkr  epsilonrW   f0gradeir  ds	            r   r  r    s    	
bUT\	B88RXXa["((1+s2w7?D	3r7*e	$B3r7^LbfY-/"4<Q1W	 
 Kr   c                   V    \ rS rSrSr    SS jr\S 5       rSS jrS r	S r
S	 rS
rg)PairwiseKerneli  a<  Wrapper for kernels in sklearn.metrics.pairwise.

A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.

Note: Evaluation of eval_gradient is not analytic but numeric and all
      kernels support only isotropic distances. The parameter gamma is
      considered to be a hyperparameter and may be optimized. The other
      kernel parameters are set directly at initialization and are kept
      fixed.

.. versionadded:: 0.18

Parameters
----------
gamma : float, default=1.0
    Parameter gamma of the pairwise kernel specified by metric. It should
    be positive.

gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
    The lower and upper bound on 'gamma'.
    If set to "fixed", 'gamma' cannot be changed during
    hyperparameter tuning.

metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial",               "rbf", "laplacian", "sigmoid", "cosine"} or callable,               default="linear"
    The metric to use when calculating kernel between instances in a
    feature array. If metric is a string, it must be one of the metrics
    in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
    If metric is "precomputed", X is assumed to be a kernel matrix.
    Alternatively, if metric is a callable function, it is called on each
    pair of instances (rows) and the resulting value recorded. The callable
    should take two arrays from X as input and return a value indicating
    the distance between them.

pairwise_kernels_kwargs : dict, default=None
    All entries of this dict (if any) are passed as keyword arguments to
    the pairwise kernel function.

Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import PairwiseKernel
>>> X, y = load_iris(return_X_y=True)
>>> kernel = PairwiseKernel(metric='rbf')
>>> gpc = GaussianProcessClassifier(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733
>>> gpc.predict_proba(X[:2,:])
array([[0.8880, 0.05663, 0.05532],
       [0.8676, 0.07073, 0.06165]])
Nc                 4    Xl         X l        X0l        X@l        g r3   r
   gamma_boundsr  pairwise_kernels_kwargs)r5   r
   r%  r  r&  s        r   rJ   PairwiseKernel.__init__	  s     
('>$r   c                 0    [        SSU R                  5      $ )Nr
   ru  )r    r%  ro   s    r   hyperparameter_gamma#PairwiseKernel.hyperparameter_gamma	  r  r   c                   ^ ^^^ T R                   mT R                   c  0 m[        R                  " T5      m[        TT4T R                  T R
                  SS.TD6nU(       ar  T R                  R                  (       a5  U[        R                  " TR                  S   TR                  S   S45      4$ UUUU 4S jnU[        T R                  US5      4$ U$ )r  Tr  r
   filter_paramsr   c                 b   > [        TT4TR                  [        R                  " U 5      SS.TD6$ )NTr,  )r   r  r   r   )r
   r   r   r&  r5   s    r   r  "PairwiseKernel.__call__.<locals>.f?	  s<    +  ${{ ffUm&* 2 r   r  )r&  r   r'   r   r  r
   r)  r$   r}  r   r  rj   )r5   r   r   r   r   r  r&  s   ```   @r   r   PairwiseKernel.__call__	  s    8 #'">">''/&(#MM!
 ;;**
 &
 ((.."((AGGAJ
A#>???  .Q>>>Hr   c                 N    [         R                  " U SU5      R                  5       $ )r   r   )r   apply_along_axisr  r   s     r   r   PairwiseKernel.diagM	  s"    $ ""4A.4466r   c                      U R                   S;   $ )r   )rbfr  ro   s    r   r   PairwiseKernel.is_stationarya	  s    {{g%%r   c                 x    SR                  U R                  R                  U R                  U R                  5      $ )Nz{0}(gamma={1}, metric={2}))r   r0   r9   r
   r  ro   s    r   r   PairwiseKernel.__repr__e	  s.    +22NN##TZZ
 	
r   r$  )r  r  linearNr   )r9   r:   r;   r<   r=   rJ   r   r)  r   r   r   r   r?   r"   r   r   r"  r"    sF    6t   $
? E E:x7(&
r   r"  )r"   )/r=   r  r   abcr   r   collectionsr   inspectr   numpyr   scipy.spatial.distancer   r   r	   scipy.specialr
   r   r  r   
exceptionsr   metrics.pairwiser   utils.validationr   r   r    rC   r   r   r   r   r  r   r   r   r   r  r  r  r  r  r  r  r"  r"   r   r   <module>rC     sk   X.   ' "   ; ; #  + / +	`
Q`
Ftw tn	# #4 
 
aHV aHHsNV sNl^4. ^4Ba4n a4H|1V |1~GA*,> GATx
');V x
vV
!6 VrES EPY
-/Df Y
xN
*,A6 N
bGT GTV	`
V `
r   