
    -i̧                     V   S SK r S SKJrJr  S SKJrJr  S SKrS SK	J
r  SSKJrJrJr  SSKJrJr  SSKJr  SSKJrJrJrJr  SS	KJrJr  SS
KJr  SSKJ r   SSK!J"r"J#r#  SSK$J%r%J&r&J'r'J(r(J)r)J*r*  SSK+J,r-  SSK+J.r/  SSK+J0r1  / SQr2S r3 " S S\\S9r4 " S S\\4\S9r5S r6     SS jr7g)    N)ABCMetaabstractmethod)IntegralReal   )BaseEstimatorClassifierMixin_fit_context)ConvergenceWarningNotFittedError)LabelEncoder)check_arraycheck_random_statecolumn_or_1dcompute_class_weight)Interval
StrOptions)safe_sparse_dot)available_if)_ovr_decision_functioncheck_classification_targets)_check_large_sparse_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fittedvalidate_data   )
_liblinear)_libsvm)_libsvm_sparse)c_svcnu_svc	one_classepsilon_svrnu_svrc           	         U R                   S   S-   n/ n[        R                  " [        R                  " S/U/5      5      n[	        U5       H  nX%U   XVS-      2SS24   n[	        US-   U5       H\  nX%U   XXS-      2SS24   n	XS-
  XV   XVS-      24   n
XXX   XXS-      24   nUR                  [        X5      [        X5      -   5        M^     M     U$ )zyGenerate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel.r   r   N)shapenpcumsumhstackrangeappendr   )	dual_coef	n_supportsupport_vectorsn_classcoefsv_locsclass1sv1class2sv2alpha1alpha2s               D/var/www/html/venv/lib/python3.13/site-packages/sklearn/svm/_base.py_one_vs_one_coefr;   %   s     ooa 1$G Dii		A3	"234G.fo
0CCQFGFQJ0F!&/GQJ4G"G"JKC z7?WaZ=P+PPQFw!9L'LLMF KK4v7SST 1 ! K    c                     ^  \ rS rSr% Sr\" 1 Sk5      \/\" \SSSS9/\" SS	15      \" \	S
SSS9/\" \	SSSS9/\" \	S
SSS9/\" \	S
SSS9/\" \	S
SSS9/\" \	S
SSS9/S/S/\" \	SSSS9/\" S15      \
S/S/\" \SSSS9/S/S.r\
\S'   / SQr\S 5       rU 4S jr\" SS9S+S j5       rS rS rS rS rS rS  rS! rS" rS# rS$ rS% rS& r\S' 5       r S( r!\S) 5       r"S*r#U =r$$ ),
BaseLibSVME   zBase class for estimators that use libsvm as backing library.

This implements support vector machine classification and regression.

Parameter documentation is in the derived `SVC` class.
>   rbfpolylinearsigmoidprecomputedr   Nleft)closedscaleauto        neitherright      ?booleanbalancedverboserandom_statekerneldegreegammacoef0tolCnuepsilon	shrinkingprobability
cache_sizeclass_weightrO   max_iterrQ   _parameter_constraints)rB   rA   r@   rC   rD   c                 $   U R                   [        ;  a"  [        S[        < SU R                   < S35      eXl        X l        X0l        X@l        XPl        X`l        Xpl	        Xl
        Xl        Xl        Xl        Xl        Xl        Xl        Xl        g )Nzimpl should be one of z, z
 was given)_implLIBSVM_IMPL
ValueErrorrS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   rO   r_   rQ   )selfrS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   rO   r_   rQ   s                   r:   __init__BaseLibSVM.__init__j   sy    & ::[(<GT  

"&$( (r<   c                    > [         TU ]  5       nU R                  S:H  UR                  l        U R                  S:g  UR                  l        U$ NrD   )super__sklearn_tags__rS   
input_tagspairwisesparsere   tags	__class__s     r:   rk   BaseLibSVM.__sklearn_tags__   s?    w')#';;-#? !%!=r<   T)prefer_skip_nested_validationc           
      V
   [        U R                  5      n[        R                  " U5      nU(       a  U R                  S:X  a  [        S5      eU=(       a    [        U R                  5      (       + U l        [        U R                  5      (       a  [        X5        O[        U UU[        R                  SSSS9u  pU R                  U5      n[        R                  " Uc  / OU[        R                  S9n[        R                  U R                   5      n[#        U5      nUS:w  a6  XrR$                  S	   :w  a$  ['        S
SU< SUR$                  S	   < S3-   5      eU R                  S:X  aG  XqR$                  S   :w  a5  ['        SR)                  UR$                  S	   UR$                  S   5      5      eUR$                  S	   S	:  a;  UR$                  S	   U:w  a(  ['        SUR$                  < SUR$                  < S35      e[        U R                  5      (       a  SOU R                  nUS:X  a  SU l        O[-        U R.                  [0        5      (       a  U R.                  S:X  am  U(       a3  UR3                  U5      R5                  5       UR5                  5       S-  -
  OUR7                  5       n	U	S	:w  a  SUR$                  S   U	-  -  OSU l        OXU R.                  S:X  a  SUR$                  S   -  U l        O0[-        U R.                  [8        5      (       a  U R.                  U l        U R                  (       a  U R:                  OU R<                  n
U R>                  (       a
  [A        SSS9  URC                  [        RD                  " S5      RF                  5      nU
" XX6XS9  [I        US5      (       a  UR$                  OU4U l%        U RL                  RO                  5       U l(        U RR                  U l*        U R                   S;   a@  [W        U RX                  5      S:X  a'  U =RL                  S-  sl&        U RR                  * U l)        U R                  (       a  U RT                  RZ                  OU RT                  n[        R\                  " U RP                  5      R_                  5       n[        R\                  " U5      R_                  5       nU(       a  U(       d  ['        S5      eU R                   S;   a  U R`                  U l1        U $ U R`                  Re                  5       U l1        U $ ) a  Fit the SVM model according to the given training data.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)                 or (n_samples, n_samples)
    Training vectors, where `n_samples` is the number of samples
    and `n_features` is the number of features.
    For kernel="precomputed", the expected shape of X is
    (n_samples, n_samples).

y : array-like of shape (n_samples,)
    Target values (class labels in classification, real numbers in
    regression).

sample_weight : array-like of shape (n_samples,), default=None
    Per-sample weights. Rescale C per sample. Higher weights
    force the classifier to put more emphasis on these points.

Returns
-------
self : object
    Fitted estimator.

Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.

If X is a dense array, then the other methods will not support sparse
matrices as input.
rD   z-Sparse precomputed kernels are not supported.rX   csrF)dtypeorderaccept_sparseaccept_large_sparserv   r   r   z"X and y have incompatible shapes.
zX has z samples, but y has .r   zDPrecomputed matrix must be a square matrix. Input is a {}x{} matrix.z.sample_weight and X have incompatible shapes: z vs zT
Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).rI   rG   rL   rH   z[LibSVM] endi)random_seedr(   r"   r#   rP   zxThe dual coefficients or intercepts are not finite. The input data may contain large values and need to be preprocessed.)3r   rQ   spissparserS   	TypeErrorcallable_sparser   r   r)   float64_validate_targetsasarrayrc   indexrb   r   r(   rd   format_gamma
isinstancerU   strmultiplymeanvarr   _sparse_fit
_dense_fitrO   printrandintiinfomaxhasattr
shape_fit_
intercept_copy_intercept_
dual_coef__dual_coef_lenclasses_dataisfiniteall	_num_itern_iter_item)re   Xysample_weightrndrn   solver_type	n_samplesrS   X_varfitseedr.   intercept_finitenessdual_coef_finitenesss                  r:   r   BaseLibSVM.fit   s   D !!2!23Qdkk]2KLL;ht{{&;";DKK  #A) jj#$)DA ""1%

'B]"**
 "''

3 !O	!	WWQZ 757@!''!*MN 
 ;;-'I,C,,2F1771:qwwqz,J 
 q!A%-*=*=a*@I*M
 !&&	1  #+4;;"7"7T[[]" DK

C((zzW$DJA,,.!&&(q@PQPUPUPW<AQJcQWWQZ%%78Cv%!AGGAJ.

D))**DK"&,,dDOO<<*"%{{288C=,,-A-fG &-Q%8%8!''yl
  ??//1??::,,T]]1Cq1HOOr!O#.DO-1\\D$$))t?O?O	!{{4+;+;<@@B!{{9599;$)=!  ::,,>>DL   >>..0DLr<   c                 N    [        USS9R                  [        R                  SS9$ )zhValidation of y and class_weight.

Default implementation for SVR and one-class; overridden in BaseSVC.
TwarnF)r   )r   astyper)   r   )re   r   s     r:   r   BaseLibSVM._validate_targets'  s%    
 AD)00%0HHr<   c                     U R                   S;   d   eU R                   S:X  a)  [        R                  " SU R                  -  [        5        g g )Nr   r   r   znSolver terminated early (max_iter=%i).  Consider pre-processing your data with StandardScaler or MinMaxScaler.)fit_status_warningsr   r_   r   re   s    r:   _warn_from_fit_status BaseLibSVM._warn_from_fit_status.  sL    6)))q MM359]]C #	 !r<   c                 0   [        U R                  5      (       aB  Xl        U R                  U5      nUR                  S   UR                  S   :w  a  [        S5      e[        R                  " U R                  5        [        R                  " UU40 SU_SU_S[        U S[        R                  " S5      5      _SU_S	U R                  _S
U R                  _SU R                  _SU R                   _SU R"                  _SU R$                  _SU R&                  _SU R(                  _SU R*                  _SU R,                  _SU R.                  _SU_6u	  U l        U l        U l        U l        U l        U l        U l        U l        U l         U RC                  5         g )Nr   r   z(X.shape[0] should be equal to X.shape[1]svm_typer   r^   class_weight_rS   rX   rY   r\   rT   r[   rW   r]   rV   rU   rZ   r_   r   )"r   rS   _BaseLibSVM__Xfit_compute_kernelr(   rd   libsvmset_verbosity_wraprO   r   getattrr)   emptyrX   rY   r\   rT   r[   rW   r]   rV   r   rZ   r_   support_support_vectors_
_n_supportr   r   _probA_probBr   r   r   )re   r   r   r   r   rS   r   s          r:   r   BaseLibSVM._dense_fit8  s   DKK   K$$Q'AwwqzQWWQZ' !KLL!!$,,/ JJ
 !
 (	

 !D
 
 ff
 ww
 ((
 ;;
 nn
 
 
 **
 ++
  LL!
" ]]#
$ $%

	
M!OOOKKN, 	""$r<   c                 4   [         R                  " UR                  [         R                  SS9Ul        UR	                  5         U R
                  R                  U5      n[        R                  " U R                  5        [        R                  " UR                  S   UR                  UR                  UR                  UUUU R                  U R                  U R                   U R"                  U R$                  ['        U S[         R(                  " S5      5      UU R*                  U R,                  U R.                  [1        U R2                  5      [1        U R4                  5      U R6                  U5      u	  U l        U l        nU l        U l        U l         U l!        U l"        U l#        U RI                  5         [K        U S5      (       a  [M        U RN                  5      S-
  n	OSn	U R:                  R                  S   n
[         RP                  " [         RR                  " U
5      U	5      nU
(       d  [T        RV                  " / 5      U l,        g [         RR                  " SURZ                  S-   URZ                  U	-  5      n[T        RV                  " XU4X45      U l,        g )NrX   rv   rw   r   r   r   r   ).r)   r   r   r   sort_indices_sparse_kernelsr   libsvm_sparser   rO   libsvm_sparse_trainr(   indicesindptrrT   r   rV   rW   rX   r   r   rY   r]   rZ   intr[   r\   r_   r   r   r   r   r   r   r   r   r   r   r   r   tilearanger   
csr_matrixr   size)re   r   r   r   r   rS   r   kernel_typedual_coef_datar1   n_SVdual_coef_indicesdual_coef_indptrs                r:   r   BaseLibSVM._sparse_fitg  s   AFF"**C@	**008((6 --GGAJFFIIHHKKKKJJHHFFD/288A;7GGOOLL  !MM+

	
M!OOKKN2 	""$4$$$--(1,GG$$**1-GGBIIdOW= mmB/DO!yy$))A-/@/E/E/O  !mm4DEDOr<   c                     U R                  U5      nU R                  (       a  U R                  OU R                  nU" U5      $ )ap  Perform regression on samples in X.

For an one-class model, +1 (inlier) or -1 (outlier) is returned.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
    For kernel="precomputed", the expected shape of X is
    (n_samples_test, n_samples_train).

Returns
-------
y_pred : ndarray of shape (n_samples,)
    The predicted values.
)_validate_for_predictr   _sparse_predict_dense_predict)re   r   predicts      r:   r   BaseLibSVM.predict  s7      &&q)*.,,$&&D<O<Oqzr<   c                    U R                  U5      nUR                  S:X  a  [        USSS9nU R                  n[	        U R                  5      (       aL  SnUR
                  S   U R                  S   :w  a*  [        SUR
                  S   U R                  S   4-  5      e[        R                  U R                  5      n[        R                  " UU R                  U R                  U R                  U R                   U R"                  U R$                  U R&                  UUU R(                  U R*                  U R,                  U R.                  S9$ )	Nr   rX   F)rw   ry   rD   r   MX.shape[1] = %d should be equal to %d, the number of samples at training time)r   rS   rT   rV   rU   r]   )r   ndimr   rS   r   r(   r   rd   rc   r   rb   r   r   r   r   r   r   r   r   r   rT   rV   r   r]   )re   r   rS   r   s       r:   r   BaseLibSVM._dense_predict  s     #66Q;ASeDADKK  "FwwqzT__Q// =wwqz4??1#567  $$TZZ0~~MM!!OOKKKK;;**++
 	
r<   c                    U R                   n[        U5      (       a  SnU R                  R                  U5      nSn[        R
                  " UR                  UR                  UR                  U R                  R                  U R                  R                  U R                  R                  U R                  R                  U R                  [        R                  U R                  5      UU R                  U R                  U R                   U R"                  U[%        U S[&        R(                  " S5      5      U R*                  U R,                  U R.                  U R0                  U R2                  U R4                  U R6                  5      $ )NrD   rI   r   r   )rS   r   r   r   r   libsvm_sparse_predictr   r   r   r   r   r   rc   rb   rT   r   rV   rW   r   r)   r   rY   rZ   r[   r\   r   r   r   )re   r   rS   r   rX   s        r:   r   BaseLibSVM._sparse_predict  s   F"F**00822FFIIHH!!&&!!))!!((!!djj)KKKKJJHHD/288A;7GGLLNNOOKKKK/
 	
r<   c                    [        U R                  5      (       aj  U R                  XR                  5      n[        R                  " U5      (       a  UR                  5       n[        R                  " U[        R                  SS9nU$ )z0Return the data transformed by a callable kernelrX   r   )	r   rS   r   r   r   toarrayr)   r   r   re   r   rS   s      r:   r   BaseLibSVM._compute_kernel  s[    DKK   [[KK0F{{6"")

63?Ar<   c                 &   U R                  U5      nU R                  U5      nU R                  (       a  U R                  U5      nOU R	                  U5      nU R
                  S;   a*  [        U R                  5      S:X  a  UR                  5       * $ U$ )a  Evaluates the decision function for the samples in X.

Parameters
----------
X : array-like of shape (n_samples, n_features)

Returns
-------
X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
    Returns the decision function of the sample for each class
    in the model.
r   r   )	r   r   r   _sparse_decision_function_dense_decision_functionrb   r   r   ravel)re   r   dec_funcs      r:   _decision_functionBaseLibSVM._decision_function	  s     &&q)  #<<55a8H44Q7H ::,,T]]1Cq1HNN$$$r<   c                    [        U[        R                  SSS9nU R                  n[	        U5      (       a  Sn[
        R                  " UU R                  U R                  U R                  U R                  U R                  U R                  U R                  [        R                  U R                   5      UU R"                  U R$                  U R&                  U R(                  S9$ )NrX   F)rv   rw   ry   rD   r   rS   rT   r]   rV   rU   )r   r)   r   rS   r   r   decision_functionr   r   r   r   r   r   r   rc   r   rb   rT   r]   rV   r   r   s      r:   r   #BaseLibSVM._dense_decision_function'  s    3ERF"F''MM!!OOKKKK &&tzz2;;**++
 	
r<   c                    [         R                  " UR                  [         R                  SS9Ul        U R                  n[        US5      (       a  SnU R                  R                  U5      n[        R                  " UR                  UR                  UR                  U R                  R                  U R                  R                  U R                  R                  U R                  R                  U R                  [        R                  U R                   5      UU R"                  U R$                  U R&                  U R(                  U R*                  [-        U S[         R.                  " S5      5      U R0                  U R2                  U R4                  U R6                  U R8                  U R:                  U R<                  5      $ )NrX   r   __call__rD   r   r   )r)   r   r   r   rS   r   r   r   r   libsvm_sparse_decision_functionr   r   r   r   r   rc   rb   rT   r   rV   rW   rX   r   r   rY   rZ   r[   r\   r   r   r   re   r   rS   r   s       r:   r   $BaseLibSVM._sparse_decision_function?  s;   AFF"**C@6:&&"F**008<<FFIIHH!!&&!!))!!((!!djj)KKKKJJHHFFD/288A;7GGLLNNOOKKKK/
 	
r<   c           
         [        U 5        [        U R                  5      (       d  [        U US[        R
                  SSSS9nU R                  (       a1  [        R                  " U5      (       d  [        R                  " U5      nU R                  (       a  UR                  5         [        R                  " U5      (       aL  U R                  (       d;  [        U R                  5      (       d!  [        S[        U 5      R                  -  5      eU R                  S:X  aJ  UR                  S   U R                  S   :w  a*  [        S	UR                  S   U R                  S   4-  5      eU R                   nU R                  (       d^  UR"                  S:  aN  U R$                  R'                  5       UR                  S   :w  a#  [        S
U R(                  R                   S35      eU$ )Nru   rX   F)rx   rv   rw   ry   resetz3cannot use sparse input in %r trained on dense datarD   r   r   r   zThe internal representation of z was altered)r   r   rS   r   r)   r   r   r   r   r   r   rd   type__name__r(   r   r   r   
n_support_sumrq   )re   r   svs      r:   r    BaseLibSVM._validate_for_predictb  sp   $$#jj$)A <<Aa A<<NN;;q>>$,,x7L7LEt*%%& 
 ;;-'wwqzT__Q// =wwqz4??1#567  ""||!0C0C0ERS0T1$..2I2I1J,W  r<   c                     U R                   S:w  a  [        S5      eU R                  5       n[        R                  " U5      (       a  SUR
                  R                  l        U$ SUR                  l        U$ )zsWeights assigned to the features when `kernel="linear"`.

Returns
-------
ndarray of shape (n_features, n_classes)
rB   z2coef_ is only available when using a linear kernelF)rS   AttributeError	_get_coefr   r   r   flags	writeablere   r2   s     r:   coef_BaseLibSVM.coef_  sc     ;;(" !UVV~~ ;;t(-DIIOO%  $)DJJ r<   c                 B    [        U R                  U R                  5      $ N)r   r   r   r   s    r:   r  BaseLibSVM._get_coef  s    t//1F1FGGr<   c                      [        U 5        [        R	                  U R
                  5      nUS;   a  U R                  $ [        R                  " U R                  S   /5      $ ! [         a    [        ef = f)z)Number of support vectors for each class.r   r   )	r   r   r  rc   r   rb   r   r)   array)re   r   s     r:   r  BaseLibSVM.n_support_  sk    	!D! $$TZZ0v??" 88T__Q/011  	!  	!s   A" "A3)rX   __Xfitr   r   r   r   r   r   r   r   r]   r^   rV   rT   r   rZ   r   rU   r   rS   r_   r   rY   r\   rQ   r   r[   r   r   rW   rO   r  )%r  
__module____qualname____firstlineno____doc__r   r   r   r   r   dictr`   __annotations__r   r   rf   rk   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   propertyr  r  r  __static_attributes____classcell__rq   s   @r:   r>   r>   E   s    JK
 Haf=>()T3V4
 4tI>?sD;<tS$w78c3w78T3V<=[!{ai@A#ZL14>;hD@A'(+$D 6 JO%) %)N 5K 6KZI-%^;z( 
D"
H	<
0!
F'R  ,H 2 2r<   r>   )	metaclassc                   D  ^  \ rS rSr% Sr0 \R                  E\" SS15      /S/S.Er\\	S'   S H  r
\R                  \
5        M     \U 4S	 j5       rS
 rS rU 4S jrS r\" \5      S 5       r\" \5      S 5       rS rS rS r\S 5       r\S 5       rU 4S jrSrU =r$ )BaseSVCi  z!ABC for LibSVM-based classifiers.ovrovorM   )decision_function_shape
break_tiesr`   )rZ   rY   c                 V   > Xl         UU l        [        TU ]  UUUUUUUSUU	U
UUUUS9  g )NrI   rR   )r(  r)  rj   rf   )re   rS   rT   rU   rV   rW   rX   rY   r[   r\   r]   r^   rO   r_   r(  rQ   r)  rq   s                    r:   rf   BaseSVC.__init__  sQ    ( (?$$#!%% 	 	
r<   c                 ,   [        USS9n[        U5        [        R                  " USS9u  p1[	        U R
                  X2S9U l        [        U5      S:  a  [        S[        U5      -  5      eX0l	        [        R                  " U[        R                  SS9$ )	NTr   )return_inverse)classesr   r   z>The number of classes has to be greater than one; got %d classrX   r   )r   r   r)   uniquer   r^   r   r   rd   r   r   r   )re   r   y_clss       r:   r   BaseSVC._validate_targets  s    !$'$Q'2d31$2C2CSWs8a<Pc( 
 zz!2::S99r<   c                     U R                  U5      nU R                  S:X  a=  [        U R                  5      S:  a$  [	        US:  U* [        U R                  5      5      $ U$ )a  Evaluate the decision function for the samples in X.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    The input samples.

Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
    Returns the decision function of the sample for each class
    in the model.
    If decision_function_shape='ovr', the shape is (n_samples,
    n_classes).

Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
r&  r   r   )r   r(  r   r   r   )re   r   decs      r:   r   BaseSVC.decision_function  sU    6 %%a(''50S5G!5K)#'C4T]]9KLL
r<   c                   > [        U 5        U R                  (       a  U R                  S:X  a  [        S5      eU R                  (       aN  U R                  S:X  a>  [	        U R
                  5      S:  a%  [        R                  " U R                  U5      SS9nO[        TU ])  U5      nU R
                  R                  [        R                  " U[        R                  S95      $ )a  Perform classification on samples in X.

For an one-class model, +1 or -1 is returned.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
    For kernel="precomputed", the expected shape of X is
    (n_samples_test, n_samples_train).

Returns
-------
y_pred : ndarray of shape (n_samples,)
    Class labels for samples in X.
r'  z>break_ties must be False when decision_function_shape is 'ovo'r&  r   r   )axisrz   )r   r)  r(  rd   r   r   r)   argmaxr   rj   r   taker   intp)re   r   r   rq   s      r:   r   BaseSVC.predict  s    " 	??t;;uDP 
 OO,,5DMM"Q&		$003!<A"A}}!!"**Qbgg">??r<   c                 r    U R                   (       d  [        S5      eU R                  S;  a  [        S5      eg)Nz5predict_proba is not available when probability=Falser   z0predict_proba only implemented for SVC and NuSVCT)r\   r  rb   r   s    r:   _check_probaBaseSVC._check_proba=  s9     G  ::00 !STTr<   c                    U R                  U5      nU R                  R                  S:X  d  U R                  R                  S:X  a  [	        S5      eU R
                  (       a  U R                  OU R                  nU" U5      $ )a$  Compute probabilities of possible outcomes for samples in X.

The model needs to have probability information computed at training
time: fit with attribute `probability` set to True.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    For kernel="precomputed", the expected shape of X is
    (n_samples_test, n_samples_train).

Returns
-------
T : ndarray of shape (n_samples, n_classes)
    Returns the probability of the sample for each class in
    the model. The columns correspond to the classes in sorted
    order, as they appear in the attribute :term:`classes_`.

Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
r   zApredict_proba is not available when fitted with probability=False)r   probA_r   probB_r   r   _sparse_predict_proba_dense_predict_proba)re   r   
pred_probas      r:   predict_probaBaseSVC.predict_probaF  sq    6 &&q);;q DKK$4$4$9 S  +/,,D&&D<U<U 	 !}r<   c                 L    [         R                  " U R                  U5      5      $ )ab  Compute log probabilities of possible outcomes for samples in X.

The model need to have probability information computed at training
time: fit with attribute `probability` set to True.

Parameters
----------
X : array-like of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
    For kernel="precomputed", the expected shape of X is
    (n_samples_test, n_samples_train).

Returns
-------
T : ndarray of shape (n_samples, n_classes)
    Returns the log-probabilities of the sample for each class in
    the model. The columns correspond to the classes in sorted
    order, as they appear in the attribute :term:`classes_`.

Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
)r)   logrE  )re   r   s     r:   predict_log_probaBaseSVC.predict_log_probak  s    8 vvd((+,,r<   c                    U R                  U5      nU R                  n[        U5      (       a  Sn[        R	                  U R
                  5      n[        R                  " UU R                  U R                  U R                  U R                  U R                  U R                  U R                  UUU R                  U R                   U R"                  U R$                  S9nU$ )NrD   r   )r   rS   r   rc   r   rb   r   rE  r   r   r   r   r   r   r   rT   r]   rV   r   )re   r   rS   r   pprobs        r:   rC  BaseSVC._dense_predict_proba  s      #F"F$$TZZ0$$MM!!OOKKKK;;**++
" r<   c                    [         R                  " UR                  [         R                  SS9Ul        U R                  n[        U5      (       a  SnU R                  R                  U5      n[        R                  " UR                  UR                  UR                  U R                  R                  U R                  R                  U R                  R                  U R                  R                  U R                  [        R                  U R                   5      UU R"                  U R$                  U R&                  U R(                  U R*                  [-        U S[         R.                  " S5      5      U R0                  U R2                  U R4                  U R6                  U R8                  U R:                  U R<                  5      $ )NrX   r   rD   r   r   )r)   r   r   r   rS   r   r   r   r   libsvm_sparse_predict_probar   r   r   r   r   rc   rb   rT   r   rV   rW   rX   r   r   rY   rZ   r[   r\   r   r   r   r   s       r:   rB  BaseSVC._sparse_predict_proba  s9   AFF"**C@F"F**00888FFIIHH!!&&!!))!!((!!djj)KKKKJJHHFFD/288A;7GGLLNNOOKKKK/
 	
r<   c                    U R                   R                  S   S:X  a"  [        U R                   U R                  5      nU$ [	        U R                   U R
                  U R                  5      n[        R                  " US   5      (       a&  [        R                  " U5      R                  5       nU$ [        R                  " U5      nU$ )Nr   r   )r   r(   r   r   r;   r   r   r   vstacktocsrr)   r  s     r:   r  BaseSVC._get_coef  s    ??  #q("4??D4I4IJD  $$2G2GD {{47##yy,,.  yyr<   c                     U R                   $ zParameter learned in Platt scaling when `probability=True`.

Returns
-------
ndarray of shape  (n_classes * (n_classes - 1) / 2)
)r   r   s    r:   r@  BaseSVC.probA_       {{r<   c                     U R                   $ rV  )r   r   s    r:   rA  BaseSVC.probB_  rX  r<   c                 `   > [         TU ]  5       nU R                  S:g  UR                  l        U$ ri   )rj   rk   rS   rl   rn   ro   s     r:   rk   BaseSVC.__sklearn_tags__  s*    w')!%!=r<   )r)  r   r   r(  )r  r  r  r  r  r>   r`   r   r  r  unused_parampopr   rf   r   r   r   r=  r   rE  rI  rC  rB  r  r  r@  rA  rk   r   r!  r"  s   @r:   r%  r%    s    +$

+
+$$.u~$>#? k$D 
 *""<0 * %
 %
N:@@J ,"  "H ,-  -:6!
F      r<   r%  c           
         SS0SSS.S.SSS	00SS
0SSS.S.SSS00SSSS.0SS.nU S:X  a  X@   $ U S:w  a  [        SU -  5      eUR                  US5      nUc  SU-  nOGUR                  US5      nUc  SU< SU< S3nO&UR                  US5      nUc  SU< SU< SU< 3nOU$ [        SU< SU< SU< SU< 35      e)zFind the liblinear magic number for the solver.

This number depends on the values of the following attributes:
  - multi_class
  - penalty
  - loss
  - dual

The same number is also internally used by LibLinear to determine
which solver to use.
F   r      )FT)l1l2rc  T      r   r               )logistic_regressionhingesquared_hingeepsilon_insensitivesquared_epsilon_insensitivecrammer_singerro  r&  z<`multi_class` must be one of `ovr`, `crammer_singer`, got %rNzloss='%s' is not supportedzThe combination of penalty='z' and loss='z' is not supportedz' are not supported when dual=zUnsupported set of arguments: z, Parameters: penalty=z, loss=z, dual=)rd   get)	multi_classpenaltylossdual_solver_type_dict_solver_penerror_string_solver_dual
solver_nums	            r:   _get_liblinear_solver_typerz    s&   " (-aj8KLq	"!&
!12EF $tRj1(,b.C'D && --		J[X
 	
 $''d3K3d:"w5 D" 
 &))$5J! CJ4QUW 
 "!
$	. r<   c                    US;  aS  [        5       nUR                  U5      nUR                  n[        U5      S:  a  [	        SUS   -  5      e[        UUXS9nO%[        R                  " S[        R                  S9nUn[        R                  " U5        [        U5      nU(       a
  [        SSS	9  S
nU(       a  US::  a  [	        SU-  5      eUn[        R                  " U5        [        R                  " U5        [        R                  " U5        [        R                   " U 5      (       a  [#        U 5        [        R$                  " U[        R                  S9R'                  5       n[        R(                  " USS9n[+        X[        R                  S9n[-        XX5      n[        R.                  " U U[        R                   " U 5      UU
UUUU	UR1                  [        R2                  " S5      R4                  5      UU5      u  nn[5        U5      nUU	:  a  [6        R8                  " S[:        5        U(       a  USS2SS24   nUUSS2S4   -  nOUnSnUUU4$ )a  Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.

Preprocessing is done in this function before supplying it to liblinear.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples and
    `n_features` is the number of features.

y : array-like of shape (n_samples,)
    Target vector relative to X

C : float
    Inverse of cross-validation parameter. The lower the C, the higher
    the penalization.

fit_intercept : bool
    Whether or not to fit an intercept. If set to True, the feature vector
    is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
    1 corresponds to the intercept. If set to False, no intercept will be
    used in calculations (i.e. data is expected to be already centered).

intercept_scaling : float
    Liblinear internally penalizes the intercept, treating it like any
    other term in the feature vector. To reduce the impact of the
    regularization on the intercept, the `intercept_scaling` parameter can
    be set to a value greater than 1; the higher the value of
    `intercept_scaling`, the lower the impact of regularization on it.
    Then, the weights become `[w_x_1, ..., w_x_n,
    w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
    the feature weights and the intercept weight is scaled by
    `intercept_scaling`. This scaling allows the intercept term to have a
    different regularization behavior compared to the other features.

class_weight : dict or 'balanced', default=None
    Weights associated with classes in the form ``{class_label: weight}``.
    If not given, all classes are supposed to have weight one. For
    multi-output problems, a list of dicts can be provided in the same
    order as the columns of y.

    The "balanced" mode uses the values of y to automatically adjust
    weights inversely proportional to class frequencies in the input data
    as ``n_samples / (n_classes * np.bincount(y))``

penalty : {'l1', 'l2'}
    The norm of the penalty used in regularization.

dual : bool
    Dual or primal formulation,

verbose : int
    Set verbose to any positive number for verbosity.

max_iter : int
    Number of iterations.

tol : float
    Stopping condition.

random_state : int, RandomState instance or None, default=None
    Controls the pseudo random number generation for shuffling the data.
    Pass an int for reproducible output across multiple function calls.
    See :term:`Glossary <random_state>`.

multi_class : {'ovr', 'crammer_singer'}, default='ovr'
    `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
    optimizes a joint objective over all classes.
    While `crammer_singer` is interesting from an theoretical perspective
    as it is consistent it is seldom used in practice and rarely leads to
    better accuracy and is more expensive to compute.
    If `crammer_singer` is chosen, the options loss, penalty and dual will
    be ignored.

loss : {'logistic_regression', 'hinge', 'squared_hinge',             'epsilon_insensitive', 'squared_epsilon_insensitive},             default='logistic_regression'
    The loss function used to fit the model.

epsilon : float, default=0.1
    Epsilon parameter in the epsilon-insensitive loss function. Note
    that the value of this parameter depends on the scale of the target
    variable y. If unsure, set epsilon=0.

sample_weight : array-like of shape (n_samples,), default=None
    Weights assigned to each sample.

Returns
-------
coef_ : ndarray of shape (n_features, n_features + 1)
    The coefficient vector got by minimizing the objective function.

intercept_ : float
    The intercept term added to the vector.

n_iter_ : array of int
    Number of iterations run across for each class.
)rm  rn  r   zeThis solver needs samples of at least 2 classes in the data, but the data contains only one class: %rr   )r.  r   r   rz   z[LibLinear]r|   r}   g      zqIntercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.W)requirementsr   z@Liblinear failed to converge, increase the number of iterations.NrP   rI   )r   fit_transformr   r   rd   r   r)   r   r   	liblinearr   r   r   r   r   r   r   r   r   r   requirer   rz  
train_wrapr   r   r   r   r   r   )r   r   rX   fit_interceptintercept_scalingr^   rr  rt  rO   r_   rW   rQ   rq  rs  rZ   r   ency_indr   r   r   biasr   	raw_coef_r   
n_iter_maxr  r   s                               r:   _fit_liblinearr  *  s)   h IIn!!!$<<x=1'{+ 
 -(a
 "**5  )
\
*Cm$ D!,.?@  %D
g&$$W-  ) 
{{1~~A JJuBJJ/557EJJu3/E(LM,[4NK"--	
A	BHHSM%%&Iw$ WJXN	

 !SbS&!&1b5)99

*g%%r<   )Nr&  rj  g?N)8r   abcr   r   numbersr   r   numpyr)   scipy.sparsern   r   baser   r	   r
   
exceptionsr   r   preprocessingr   utilsr   r   r   r   utils._param_validationr   r   utils.extmathr   utils.metaestimatorsr   utils.multiclassr   r   utils.validationr   r   r   r   r   r   r|   r   r  r    r   r!   r   rc   r;   r>   r%  rz  r   r<   r:   <module>r     s     ' "   ? ? ; ( W W : + / S  &   -G@n	2' n	2bxozW xv	6J 	!D&r<   