
    -i                         S r SSKJr  SSKrSSKJr  SSKJrJ	r	J
r
  SSKJr  SSKJrJr  \
" S	S
/S	/\	" SS15      /S/\" \SSSS9/S.SS9SSSS.S j5       rg)z!Determination of parameter bounds    )RealN   )LabelBinarizer)Interval
StrOptionsvalidate_params)safe_sparse_dot)check_arraycheck_consistent_lengthz
array-likezsparse matrixsquared_hingelogbooleanneither)closed)Xylossfit_interceptintercept_scalingT)prefer_skip_nested_validationg      ?)r   r   r   c          	      (   [        U SS9n [        X5        [        SS9R                  U5      R                  n[
        R                  " [
        R                  " [        XP5      5      5      nU(       a  [
        R                  " [
        R                  " U5      S4U[
        R                  " U5      R                  S9n[        U[        [
        R                  " XW5      5      R                  5       5      nUS:X  a  [        S5      eUS	:X  a  S
U-  $ SU-  $ )a  Return the lowest bound for `C`.

The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with penalty='l1'.

This value is valid if `class_weight` parameter in `fit()` is not set.

For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples and
    `n_features` is the number of features.

y : array-like of shape (n_samples,)
    Target vector relative to X.

loss : {'squared_hinge', 'log'}, default='squared_hinge'
    Specifies the loss function.
    With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
    With 'log' it is the loss of logistic regression models.

fit_intercept : bool, default=True
    Specifies if the intercept should be fitted by the model.
    It must match the fit() method parameter.

intercept_scaling : float, default=1.0
    When fit_intercept is True, instance vector x becomes
    [x, intercept_scaling],
    i.e. a "synthetic" feature with constant value equals to
    intercept_scaling is appended to the instance vector.
    It must match the fit() method parameter.

Returns
-------
l1_min_c : float
    Minimum value for C.

Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
csc)accept_sparse)	neg_label   )dtypeg        zUIll-posed l1_min_c calculation: l1 will always select zero coefficients for this datar   g      ?g       @)r
   r   r   fit_transformTnpmaxabsr	   fullsizearrayr   dot
ValueError)r   r   r   r   r   Ydenbiass           F/var/www/html/venv/lib/python3.13/site-packages/sklearn/svm/_bounds.pyl1_min_cr,      s    | 	AU+AA!$221577A
&&-.
/CwwWWQZO.bhh?P6Q6W6W
 #s266!?+//12
cz5
 	
 SySy    )__doc__numbersr   numpyr    preprocessingr   utils._param_validationr   r   r   utils.extmathr	   utils.validationr
   r   r,    r-   r+   <module>r6      s    '
   * K K + C O,^_e456#&tQYGH #'	 +$RU H	Hr-   