
    -i                        S r SSKJr  SSKJr  SSKrSSKrSSK	J
r
JrJr  SSKJrJr  SSKJrJrJrJr  SS	KJrJr  SS
KJr  SSKJr  SSKJrJr  SSKJ r   SSK!J"r"J#r#  SSK$J%r%J&r&J'r'  SSK$J(r)  \RT                  " / SQ5      SS2\RV                  4   r,\RT                  " / SQ5      SS2\RV                  4   r- " S S\5      r. " S S\\5      r/g)z"Gaussian processes classification.    )Integral)
itemgetterN)	cho_solvecholeskysolve)erfexpit   )BaseEstimatorClassifierMixin_fit_contextclone)OneVsOneClassifierOneVsRestClassifier)LabelEncoder)check_random_state)Interval
StrOptions)_check_optimize_result)check_is_fittedvalidate_data   )RBFCompoundKernelKernel)ConstantKernel)g=
ףp=?g?gGz?g)\(?g(\?)g;
!IgQfAy@g>[(dk@gK`@gvic                   j    \ rS rSrSr SSSSSSSS	.S
 jjrS rS rS r SS jr	S r
SS jrS rSrg)'_BinaryGaussianProcessClassifierLaplace$   a  Binary Gaussian process classification based on Laplace approximation.

The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.

Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.

Currently, the implementation is restricted to using the logistic link
function.

.. versionadded:: 0.18

Parameters
----------
kernel : kernel instance, default=None
    The kernel specifying the covariance function of the GP. If None is
    passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
    the kernel's hyperparameters are optimized during fitting.

optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
    Can either be one of the internally supported optimizers for optimizing
    the kernel's parameters, specified by a string, or an externally
    defined optimizer passed as a callable. If a callable is passed, it
    must have the  signature::

        def optimizer(obj_func, initial_theta, bounds):
            # * 'obj_func' is the objective function to be maximized, which
            #   takes the hyperparameters theta as parameter and an
            #   optional flag eval_gradient, which determines if the
            #   gradient is returned additionally to the function value
            # * 'initial_theta': the initial value for theta, which can be
            #   used by local optimizers
            # * 'bounds': the bounds on the values of theta
            ....
            # Returned are the best found hyperparameters theta and
            # the corresponding value of the target function.
            return theta_opt, func_min

    Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
    is used. If None is passed, the kernel's parameters are kept fixed.
    Available internal optimizers are::

        'fmin_l_bfgs_b'

n_restarts_optimizer : int, default=0
    The number of restarts of the optimizer for finding the kernel's
    parameters which maximize the log-marginal likelihood. The first run
    of the optimizer is performed from the kernel's initial parameters,
    the remaining ones (if any) from thetas sampled log-uniform randomly
    from the space of allowed theta-values. If greater than 0, all bounds
    must be finite. Note that n_restarts_optimizer=0 implies that one
    run is performed.

max_iter_predict : int, default=100
    The maximum number of iterations in Newton's method for approximating
    the posterior during predict. Smaller values will reduce computation
    time at the cost of worse results.

warm_start : bool, default=False
    If warm-starts are enabled, the solution of the last Newton iteration
    on the Laplace approximation of the posterior mode is used as
    initialization for the next call of _posterior_mode(). This can speed
    up convergence when _posterior_mode is called several times on similar
    problems as in hyperparameter optimization. See :term:`the Glossary
    <warm_start>`.

copy_X_train : bool, default=True
    If True, a persistent copy of the training data is stored in the
    object. Otherwise, just a reference to the training data is stored,
    which might cause predictions to change if the data is modified
    externally.

random_state : int, RandomState instance or None, default=None
    Determines random number generation used to initialize the centers.
    Pass an int for reproducible results across multiple function calls.
    See :term:`Glossary <random_state>`.

Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
    Feature vectors or other representations of training data (also
    required for prediction).

y_train_ : array-like of shape (n_samples,)
    Target values in training data (also required for prediction)

classes_ : array-like of shape (n_classes,)
    Unique class labels.

kernel_ : kernl instance
    The kernel used for prediction. The structure of the kernel is the
    same as the one passed as parameter but with optimized hyperparameters

L_ : array-like of shape (n_samples, n_samples)
    Lower-triangular Cholesky decomposition of the kernel in X_train_

pi_ : array-like of shape (n_samples,)
    The probabilities of the positive class for the training points
    X_train_

W_sr_ : array-like of shape (n_samples,)
    Square root of W, the Hessian of log-likelihood of the latent function
    values for the observed labels. Since W is diagonal, only the diagonal
    of sqrt(W) is stored.

log_marginal_likelihood_value_ : float
    The log-marginal-likelihood of ``self.kernel_.theta``

References
----------
.. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
   "Gaussian Processes for Machine Learning",
   MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
Nfmin_l_bfgs_br   d   FT)	optimizern_restarts_optimizermax_iter_predict
warm_startcopy_X_trainrandom_statec                X    Xl         X l        X0l        X@l        XPl        X`l        Xpl        g Nkernelr"   r#   r$   r%   r&   r'   )selfr+   r"   r#   r$   r%   r&   r'   s           P/var/www/html/venv/lib/python3.13/site-packages/sklearn/gaussian_process/_gpc.py__init__0_BinaryGaussianProcessClassifierLaplace.__init__   s+     "$8! 0$((    c           	      R  ^  T R                   c  [        SSS9[        SSS9-  T l        O[	        T R                   5      T l        [        T R                  5      T l        T R                  (       a  [        R                  " U5      OUT l        [        5       nUR                  U5      T l        UR                  T l        T R                  R                   S:  a0  [#        T R$                  R&                  < ST R                  < 35      eT R                  R                   S:X  aC  [#        S	R)                  T R$                  R&                  T R                  R                   5      5      eT R*                  Gb  T R                  R,                  S
:  Ga  SU 4S jjnT R/                  UT R                  R0                  T R                  R2                  5      /nT R4                  S
:  a  [        R6                  " T R                  R2                  5      R9                  5       (       d  [#        S5      eT R                  R2                  n[;        T R4                  5       Hb  n[        R<                  " T R                  R?                  USS2S
4   USS2S4   5      5      nURA                  T R/                  XHU5      5        Md     [C        [E        [G        S5      U5      5      n	U[        RH                  " U	5         S
   T R                  l        T R                  RK                  5         [        RL                  " U	5      * T l'        O*T RQ                  T R                  R0                  5      T l'        T R                  T R                  5      n
T RS                  U
SS9u  nu  T l*        T l+        T l,          nT $ )aA  Fit Gaussian process classification model.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Feature vectors or other representations of training data.

y : array-like of shape (n_samples,)
    Target values, must be binary.

Returns
-------
self : returns an instance of self.
Ng      ?fixed)constant_value_bounds)length_scale_boundsr
   z9 supports only binary classification. y contains classes r   z){0:s} requires 2 classes; got {1:d} classr   Tc                 f   > U(       a  TR                  U SSS9u  p#U* U* 4$ TR                  U SS9* $ )NTF)eval_gradientclone_kernelr7   )log_marginal_likelihood)thetar6   lmlgradr,   s       r-   obj_func=_BinaryGaussianProcessClassifierLaplace.fit.<locals>.obj_func   sM      $ < <T != !IC  4$;& 88U8SSSr0   zYMultiple optimizer restarts (n_restarts_optimizer>0) requires that all bounds are finite.return_temporaries)T)-r+   Cr   kernel_r   r   r'   rngr&   npcopyX_train_r   fit_transformy_train_classes_size
ValueError	__class____name__formatr"   n_dims_constrained_optimizationr:   boundsr#   isfiniteallrangeexpuniformappendlistmapr   argmin_check_bounds_paramsminlog_marginal_likelihood_value_r9   _posterior_modepi_W_sr_L_)r,   Xylabel_encoderr=   optimarQ   	iterationtheta_initial
lml_valuesK_s   `           r-   fit+_BinaryGaussianProcessClassifierLaplace.fit   s    ;;S@3D DL !-DL%d&7&78&*&7&7
Q %%33A6%..==!>>**DMM;  ]]1$;BBNN++T]]-?-?  >>%$,,*=*=*AT ..dll00$,,2E2EF ((1,{{4<<#6#67;;==$?  ,,!&t'@'@!AI$&FF488+;+;F1a4L&QRTUQU,+W$XMMM66xPVW "B c*Q-89J!'		*(=!>q!ADLLLL--/3566*3E2ED/262N2N""3D/ LL'373G3G$ 4H 4
00DHdj$'1a r0   c                    [        U 5        U R                  U R                  U5      nUR                  R	                  U R
                  U R                  -
  5      n[        R                  " US:  U R                  S   U R                  S   5      $ )aA  Perform classification on an array of test vectors X.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
C : ndarray of shape (n_samples,)
    Predicted target values for X, values are from ``classes_``
r   r   )
r   rB   rF   TdotrH   r_   rD   whererI   )r,   rb   K_starf_stars       r-   predict/_BinaryGaussianProcessClassifierLaplace.predict  sj     	
 dmmQ/dmmdhh67xx
DMM!$4dmmA6FGGr0   c           
         [        U 5        U R                  U5      u  p#SSU-  -  n[        U-  n[        R                  " [        R
                  U-  5      [        U[        R                  " XD[        S-  -   -  5      -  5      -  S[        R                  " US-  [        R
                  -  5      -  -  n[        U-  R                  SS9S[        R                  5       -  -   n[        R                  " SU-
  U45      R                  $ )a  Return probability estimates for the test vector X.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
C : array-like of shape (n_samples, n_classes)
    Returns the probability of the samples for each class in
    the model. The columns correspond to the classes in sorted
    order, as they appear in the attribute ``classes_``.
r   r
   r   )axis      ?)r   latent_mean_and_varianceLAMBDASrD   sqrtpir   COEFSsumvstackrn   )r,   rb   latent_mean
latent_varalphagamma	integralspi_stars           r-   predict_proba5_BinaryGaussianProcessClassifierLaplace.predict_proba$  s     	 #'"?"?"B Q^$+%GGBEEEM"%"''%7A:+=">??@A277:>BEE1224 	
 9$))q)1C%))+4EEyy!g+w/0222r0   c                    Uc  U(       a  [        S5      eU R                  $ U(       a  U R                  R                  U5      nOU R                  nXl        U(       a  U" U R
                  SS9u  pVOU" U R
                  5      nU R                  USS9u  nu  ppnU(       d  U$ [        R                  " UR                  S   5      nU	SS2[        R                  4   [        U
S4[        R                  " U	5      5      -  n[        XSS2[        R                  4   U-  5      nS[        R                  " U5      [        R                  " SX5      -
  -  US	U-
  -  S	S
U-  -
  -  -  n[        UR                  S   5       H  nWSS2SS2U4   nSUR                   R#                  U5      R#                  U5      -  SUR                   R%                  5       R#                  UR$                  " 5       5      -  -
  nUR"                  " U R&                  U-
  5      nXR#                  UR#                  U5      5      -
  nUUR                   R#                  U5      -   UU'   M     X}4$ )a!  Returns log-marginal likelihood of theta for training data.

Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
    Kernel hyperparameters for which the log-marginal likelihood is
    evaluated. If None, the precomputed log_marginal_likelihood
    of ``self.kernel_.theta`` is returned.

eval_gradient : bool, default=False
    If True, the gradient of the log-marginal likelihood with respect
    to the kernel hyperparameters at position theta is returned
    additionally. If True, theta must not be None.

clone_kernel : bool, default=True
    If True, the kernel attribute is copied. If False, the kernel
    attribute is modified, but may result in a performance improvement.

Returns
-------
log_likelihood : float
    Log-marginal likelihood of theta for training data.

log_likelihood_gradient : ndarray of shape (n_kernel_params,),                 optional
    Gradient of the log-marginal likelihood with respect to the kernel
    hyperparameters at position theta.
    Only returned when `eval_gradient` is True.
N.Gradient can only be evaluated for theta!=NoneT)r6   r?   r         zij, ij -> jr   r
   rw   )rK   r]   rB   clone_with_thetar:   rF   r^   rD   emptyshapenewaxisr   diagr   einsumrT   rn   ro   ravelrH   )r,   r:   r6   r7   r+   ri   
K_gradientZr{   W_srLbad_ZRrA   s_2js_1s_3s                       r-   r9   ?_BinaryGaussianProcessClassifierLaplace.log_marginal_likelihoodK  s   @ = !QRR666\\2259F\\F L"4==EMAzt}}%A "&!5!5aD!5!QBaAH hhu{{1~&BJJ)QIrwwt}"EE!!RZZ-(1,- wwqzBIImQ::<QV}AF
+- 	 syy|$A1a7#A
q))C!##))+//!'')2L,LLCdmmb()AeeAEE!Ho%C35599S>)CF % vr0   c                    [        U 5        U R                  U R                  U5      nUR                  R	                  U R
                  U R                  -
  5      n[        U R                  U R                  SS2[        R                  4   U-  5      nU R                  R                  U5      [        R                  " SXD5      -
  nX54$ )a  Compute the mean and variance of the latent function values.

Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.

Note that this function is only supported for binary classification.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
latent_mean : array-like of shape (n_samples,)
    Mean of the latent function values at the query points.

latent_var : array-like of shape (n_samples,)
    Variance of the latent function values at the query points.
Nzij,ij->j)r   rB   rF   rn   ro   rH   r_   r   ra   r`   rD   r   r   r   )r,   rb   rq   r   vr   s         r-   rx   @_BinaryGaussianProcessClassifierLaplace.latent_mean_and_variance  s    , 	 dmmQ/hhll4==488#;<$''4::am4v=>\\&&q)BIIj!,GG
&&r0   c           	      |   U R                   (       aL  [        U S5      (       a;  U R                  R                  U R                  R                  :X  a  U R                  nO-[
        R                  " U R                  [
        R                  S9n[
        R                  * n[        U R                  5       GHp  n[        U5      nUSU-
  -  n[
        R                  " U5      nUSS2[
        R                  4   U-  n	[
        R                  " UR                  S   5      X-  -   n
[        U
SS9nXs-  U R                  U-
  -   nX[!        US4U	R#                  U5      5      -  -
  nUR#                  U5      nSUR$                  R#                  U5      -  [
        R&                  " [
        R(                  " U R                  S	-  S-
  * U-  5      5      R+                  5       -
  [
        R,                  " [
        R.                  " U5      5      R+                  5       -
  nX-
  S
:  a    OUnGMs     X0l        U(       a	  UWWWWW44$ U$ )a   Mode-finding for binary Laplace GPC and fixed kernel.

This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
f_cached)dtyper   Nr   T)lowerr   r
   g|=)r%   hasattrr   r   rH   rD   
zeros_likefloat64infrT   r$   r	   rz   r   eyer   r   ro   rn   log1prU   r}   logr   )r,   ri   r@   fr9   rj   r{   Wr   W_sr_KBr   r   r   r;   s                  r-   r^   7_BinaryGaussianProcessClassifierLaplace._posterior_mode  s    OOj))##t}}':'::Admm2::>A $&66't,,-AqBa"fA771:D!RZZ-(1,Fqwwqz"V]2A$'A+,A9aY

1>>>AaA
 qsswwqz!((266DMMA$5$9":Q">?@DDFG&&$((*+  ,u4&)#9 .< *Rq!Q,???**r0   c                 <   U R                   S:X  aF  [        R                  R                  XSSUS9n[	        SU5        UR
                  UR                  peXV4$ [        U R                   5      (       a  U R                  XUS9u  pVXV4$ [        SU R                   -  5      e)Nr    zL-BFGS-BT)methodjacrQ   lbfgs)rQ   zUnknown optimizer %s.)	r"   scipyoptimizeminimizer   xfuncallablerK   )r,   r=   initial_thetarQ   opt_res	theta_optfunc_mins          r-   rP   A_BinaryGaussianProcessClassifierLaplace._constrained_optimization  s    >>_,nn--
V . G #7G4")))W[[x "" dnn%%"&..QW."XI "" 4t~~EFFr0   )ra   r`   rF   rI   r&   r   r+   rB   r]   r$   r#   r"   r_   r'   rC   r%   rH   r)   NFT)F)rM   
__module____qualname____firstlineno____doc__r.   rk   rs   r   r9   rx   r^   rP   __static_attributes__ r0   r-   r   r   $   s]    qj ) ")&`DH.%3P =AM^'B8+t#r0   r   c                       \ rS rSr% Sr\S/\" S15      \S/\" \	SSSS9/\" \	SSSS9/S	/S	/S
/\" SS15      /\	S/S.	r
\\S'    SSSSSSSSSS.S jjr\" SS9S 5       rS rS r\S 5       r SS jrS rSrg)GaussianProcessClassifieri  aC  Gaussian process classification (GPC) based on Laplace approximation.

The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.

Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.

Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.

Read more in the :ref:`User Guide <gaussian_process>`.

.. versionadded:: 0.18

Parameters
----------
kernel : kernel instance, default=None
    The kernel specifying the covariance function of the GP. If None is
    passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
    the kernel's hyperparameters are optimized during fitting. Also kernel
    cannot be a `CompoundKernel`.

optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b'
    Can either be one of the internally supported optimizers for optimizing
    the kernel's parameters, specified by a string, or an externally
    defined optimizer passed as a callable. If a callable is passed, it
    must have the  signature::

        def optimizer(obj_func, initial_theta, bounds):
            # * 'obj_func' is the objective function to be maximized, which
            #   takes the hyperparameters theta as parameter and an
            #   optional flag eval_gradient, which determines if the
            #   gradient is returned additionally to the function value
            # * 'initial_theta': the initial value for theta, which can be
            #   used by local optimizers
            # * 'bounds': the bounds on the values of theta
            ....
            # Returned are the best found hyperparameters theta and
            # the corresponding value of the target function.
            return theta_opt, func_min

    Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
    is used. If None is passed, the kernel's parameters are kept fixed.
    Available internal optimizers are::

        'fmin_l_bfgs_b'

n_restarts_optimizer : int, default=0
    The number of restarts of the optimizer for finding the kernel's
    parameters which maximize the log-marginal likelihood. The first run
    of the optimizer is performed from the kernel's initial parameters,
    the remaining ones (if any) from thetas sampled log-uniform randomly
    from the space of allowed theta-values. If greater than 0, all bounds
    must be finite. Note that n_restarts_optimizer=0 implies that one
    run is performed.

max_iter_predict : int, default=100
    The maximum number of iterations in Newton's method for approximating
    the posterior during predict. Smaller values will reduce computation
    time at the cost of worse results.

warm_start : bool, default=False
    If warm-starts are enabled, the solution of the last Newton iteration
    on the Laplace approximation of the posterior mode is used as
    initialization for the next call of _posterior_mode(). This can speed
    up convergence when _posterior_mode is called several times on similar
    problems as in hyperparameter optimization. See :term:`the Glossary
    <warm_start>`.

copy_X_train : bool, default=True
    If True, a persistent copy of the training data is stored in the
    object. Otherwise, just a reference to the training data is stored,
    which might cause predictions to change if the data is modified
    externally.

random_state : int, RandomState instance or None, default=None
    Determines random number generation used to initialize the centers.
    Pass an int for reproducible results across multiple function calls.
    See :term:`Glossary <random_state>`.

multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest'
    Specifies how multi-class classification problems are handled.
    Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest',
    one binary Gaussian process classifier is fitted for each class, which
    is trained to separate this class from the rest. In 'one_vs_one', one
    binary Gaussian process classifier is fitted for each pair of classes,
    which is trained to separate these two classes. The predictions of
    these binary predictors are combined into multi-class predictions.
    Note that 'one_vs_one' does not support predicting probability
    estimates.

n_jobs : int, default=None
    The number of jobs to use for the computation: the specified
    multiclass problems are computed in parallel.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

Attributes
----------
base_estimator_ : ``Estimator`` instance
    The estimator instance that defines the likelihood function
    using the observed data.

kernel_ : kernel instance
    The kernel used for prediction. In case of binary classification,
    the structure of the kernel is the same as the one passed as parameter
    but with optimized hyperparameters. In case of multi-class
    classification, a CompoundKernel is returned which consists of the
    different kernels used in the one-versus-rest classifiers.

log_marginal_likelihood_value_ : float
    The log-marginal-likelihood of ``self.kernel_.theta``

classes_ : array-like of shape (n_classes,)
    Unique class labels.

n_classes_ : int
    The number of classes in the training data

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

See Also
--------
GaussianProcessRegressor : Gaussian process regression (GPR).

References
----------
.. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
   "Gaussian Processes for Machine Learning",
   MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_

Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
...         random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.83548752, 0.03228706, 0.13222543],
       [0.79064206, 0.06525643, 0.14410151]])

For a comparison of the GaussianProcessClassifier with other classifiers see:
:ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`.
Nr    r   left)closedr   booleanr'   one_vs_rest
one_vs_one	r+   r"   r#   r$   r%   r&   r'   multi_classn_jobs_parameter_constraintsr!   FT)r"   r#   r$   r%   r&   r'   r   r   c                p    Xl         X l        X0l        X@l        XPl        X`l        Xpl        Xl        Xl        g r)   r   )
r,   r+   r"   r#   r$   r%   r&   r'   r   r   s
             r-   r.   "GaussianProcessClassifier.__init__  s6     "$8! 0$((&r0   )prefer_skip_nested_validationc           
         [        U R                  [        5      (       a  [        S5      eU R                  b  U R                  R                  (       a  [        XUSSSS9u  pO[        XUSSSS9u  p[        U R                  U R                  U R                  U R                  U R                  U R                  U R                  S9U l        [        R                  " U5      U l        U R                   R"                  U l        U R$                  S:X  a'  [        S	U R$                  U R                   S
   4-  5      eU R$                  S:  a  U R&                  S:X  a$  [)        U R                  U R*                  S9U l        OLU R&                  S:X  a$  [-        U R                  U R*                  S9U l        O[        SU R&                  -  5      eU R                  R/                  X5        U R$                  S:  aP  [        R0                  " U R                  R2                   Vs/ s H  nUR5                  5       PM     sn5      U l        U $ U R                  R5                  5       U l        U $ s  snf )aL  Fit Gaussian process classification model.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Feature vectors or other representations of training data.

y : array-like of shape (n_samples,)
    Target values, must be binary.

Returns
-------
self : object
    Returns an instance of self.
z!kernel cannot be a CompoundKernelNFTnumeric)multi_output	ensure_2dr   r*   r   zfGaussianProcessClassifier requires 2 or more distinct classes; got %d class (only class %s is present)r   r
   r   )r   r   zUnknown multi-class mode %s)
isinstancer+   r   rK   requires_vector_inputr   r   r"   r#   r$   r%   r&   r'   base_estimator_rD   uniquerI   rJ   
n_classes_r   r   r   r   rk   meanestimators_r9   r]   )r,   rb   rc   	estimators       r-   rk   GaussianProcessClassifier.fit  s   " dkk>22@AA;;$++"C"C $iDAq !%tDA  G;;nn!%!:!:!22**** 
 		!--,,??a!%$--2B CD 
 ??Q=0':((($ !!\1'9((($ !!>AQAQ!QRR  &??Q24'' &*%9%9%E%E%E	 557%E3D/  $$<<> / s   I%c                     [        U 5        U R                  b  U R                  R                  (       a  [        XSSSS9nO[        XSSSS9nU R                  R                  U5      $ )aB  Perform classification on an array of test vectors X.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
C : ndarray of shape (n_samples,)
    Predicted target values for X, values are from ``classes_``.
NTr   Fr   r   reset)r   r+   r   r   r   rs   r,   rb   s     r-   rs   !GaussianProcessClassifier.predict  sY     	;;$++"C"CdYeTAdd%PA##++A..r0   c                 &   [        U 5        U R                  S:  a  U R                  S:X  a  [        S5      eU R                  b  U R                  R
                  (       a  [        XSSSS9nO[        XSSSS9nU R                  R                  U5      $ )	a  Return probability estimates for the test vector X.

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
C : array-like of shape (n_samples, n_classes)
    Returns the probability of the samples for each class in
    the model. The columns correspond to the classes in sorted
    order, as they appear in the attribute :term:`classes_`.
r
   r   zlone_vs_one multi-class mode does not support predicting probability estimates. Use one_vs_rest mode instead.NTr   Fr   )	r   r   r   rK   r+   r   r   r   r   r   s     r-   r   'GaussianProcessClassifier.predict_proba+  s     	??Q4#3#3|#C,  ;;$++"C"CdYeTAdd%PA##11!44r0   c                     U R                   S:X  a  U R                  R                  $ [        U R                  R                   Vs/ s H  oR                  PM     sn5      $ s  snf )z(Return the kernel of the base estimator.r
   )r   r   rB   r   r   )r,   r   s     r-   rB   !GaussianProcessClassifier.kernel_I  sV     ??a''///!484H4H4T4TU4Ty""4TU Us   A c                 z   [        U 5        Uc  U(       a  [        S5      eU R                  $ [        R                  " U5      nU R
                  S:X  a  U R                  R                  XUS9$ U(       a  [        S5      eU R                  R                  nUS   R                  R                  nUR                  S   U:X  aA  [        R                  " [        U5       VVs/ s H  u  pgUR                  XS9PM     snn5      $ UR                  S   XPR                  R                  S   -  :X  aL  [        R                  " [        U5       VVs/ s H  u  pgUR                  XU-  XVS-   -   US9PM!     snn5      $ [        SXUU R                  R                  S   -  UR                  S   4-  5      es  snnf s  snnf )a  Return log-marginal likelihood of theta for training data.

In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.

Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
    Kernel hyperparameters for which the log-marginal likelihood is
    evaluated. In the case of multi-class classification, theta may
    be the  hyperparameters of the compound kernel or of an individual
    kernel. In the latter case, all individual kernel get assigned the
    same theta values. If None, the precomputed log_marginal_likelihood
    of ``self.kernel_.theta`` is returned.

eval_gradient : bool, default=False
    If True, the gradient of the log-marginal likelihood with respect
    to the kernel hyperparameters at position theta is returned
    additionally. Note that gradient computation is not supported
    for non-binary classification. If True, theta must not be None.

clone_kernel : bool, default=True
    If True, the kernel attribute is copied. If False, the kernel
    attribute is modified, but may result in a performance improvement.

Returns
-------
log_likelihood : float
    Log-marginal likelihood of theta for training data.

log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
    Gradient of the log-marginal likelihood with respect to the kernel
    hyperparameters at position theta.
    Only returned when `eval_gradient` is True.
r   r
   r8   zHGradient of log-marginal-likelihood not implemented for multi-class GPC.r   r   zEShape of theta must be either %d or %d. Obtained theta with shape %d.)r   rK   r]   rD   asarrayr   r   r9   NotImplementedErrorr   rB   rO   r   r   	enumeraterI   )r,   r:   r6   r7   
estimatorsrO   ir   s           r-   r9   1GaussianProcessClassifier.log_marginal_likelihoodS  s   L 	= !QRR666

5!??a''??< @   )'  --99J]**11F{{1~'ww
 -6j,A	 -BLA "99! :  -B	  Q6MM,?,?,B#BBww -6j,A
 -BLA	 "99!1*vQ/?@)5 :  -B  !4(;(;A(>>AOP 's   F1
&F7
c                 "   U R                   S:  a  [        SU R                    S35      e[        U 5        U R                  b  U R                  R                  (       a  [        XSSSS9nO[        XSSSS9nU R                  R                  U5      $ )	a  Compute the mean and variance of the latent function.

Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.

Note that this function is only supported for binary classification.

.. versionadded:: 1.7

Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
    Query points where the GP is evaluated for classification.

Returns
-------
latent_mean : array-like of shape (n_samples,)
    Mean of the latent function values at the query points.

latent_var : array-like of shape (n_samples,)
    Variance of the latent function values at the query points.
r
   zoReturning the mean and variance of the latent function f is only supported for binary classification, received z	 classes.NTr   Fr   )r   rK   r   r+   r   r   r   rx   r   s     r-   rx   2GaussianProcessClassifier.latent_mean_and_variance  s    0 ??QI??#9. 
 	;;$++"C"CdYeTAdd%PA##<<Q??r0   )r   rI   r&   r+   r]   r$   r   r   r   r#   r"   r'   r%   r)   r   )rM   r   r   r   r   r   r   r   r   r   r   dict__annotations__r.   r   rk   rs   r   propertyrB   r9   rx   r   r   r0   r-   r   r     s    `F 4. /!23XtD!)(AtF!K L%h4GH k"'("M<#@ABT"
$D 
  "!. 5H 6HT/,5<   =ASj%@r0   r   )0r   numbersr   operatorr   numpyrD   scipy.optimizer   scipy.linalgr   r   r   scipy.specialr   r	   baser   r   r   r   
multiclassr   r   preprocessingr   utilsr   utils._param_validationr   r   utils.optimizer   utils.validationr   r   kernelsr   r   r   r   rA   arrayr   ry   r|   r   r   r   r0   r-   <module>r     s    (
     3 3 $ F F @ ( & : 3 = 0 0 ( ((0
1!RZZ-
@
N	RZZ-	
]#m ]#@I@ I@r0   