
    -i/                         S r SSKrSSKrSSKJr  SSKJr  SSKJ	r	  SSK
Jr  SS	KJr  SS
KJrJr   SS jr            SS jrg)z<Solvers for Ridge and LogisticRegression using SAG algorithm    N   )ConvergenceWarning)check_array)	row_norms)_check_sample_weight   )make_dataset)sag32sag64c                     US;   a  SU [        U5      -   -  U-   nO&US:X  a  U [        U5      -   U-   nO[        SU-  5      eU(       a  [        SU-  U-  U5      nSSU-  U-   -  nU$ SU-  nU$ )a  Compute automatic step size for SAG solver.

The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.

Parameters
----------
max_squared_sum : float
    Maximum squared sum of X over samples.

alpha_scaled : float
    Constant that multiplies the regularization term, scaled by
    1. / n_samples, the number of samples.

loss : {'log', 'squared', 'multinomial'}
    The loss function used in SAG solver.

fit_intercept : bool
    Specifies if a constant (a.k.a. bias or intercept) will be
    added to the decision function.

n_samples : int, default=None
    Number of rows in X. Useful if is_saga=True.

is_saga : bool, default=False
    Whether to return step size for the SAGA algorithm or the SAG
    algorithm.

Returns
-------
step_size : float
    Step size used in SAG solver.

References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document

:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
)logmultinomialg      ?squaredzJUnknown loss function for SAG solver, got %s instead of 'log' or 'squared'r         ?)int
ValueErrormin)	max_squared_sumalpha_scaledlossfit_intercept	n_samplesis_sagaLmunsteps	            L/var/www/html/venv/lib/python3.13/site-packages/sklearn/linear_model/_sag.pyget_auto_step_sizer      s    \ %%Oc-&889LH		c-00<?X
 	
  !i-,.2a!eck" K QwK    c                    Uc  0 nUc  SnU
(       a6  [         R                  [         R                  /n[        XSSS9n [        XSSS9nU R                  S   U R                  S	   nn[        U5      U-  n[        U5      U-  nUS
:X  a  [        UR                  5       5      S	-   OS	n[        X U R                  S9nSUR                  5       ;   a  US   nO"[         R                  " UU4U R                  SS9nUR                  S   US	-   :H  nU(       a  USSS24   nUSS2SS24   nO[         R                  " UU R                  S9nSUR                  5       ;   a  US   nO[         R                  " UU R                  S9nSUR                  5       ;   a  US   nO"[         R                  " UU4U R                  SS9nSUR                  5       ;   a  US   nO"[         R                  " UU4U R                  SS9nSUR                  5       ;   a  US   nO$[         R                  " U[         R                  SS9nSUR                  5       ;   a  US   nOSn[        XX)5      u  nnUc  [        U SS9R                  5       n[        UUUUUUS9nUU-  S	:X  a  [!        S5      eU R                  [         R                  :X  a  ["        O[$        nU" UUUUUUUUUUUUUUUUUUUUU5      u  n n!U!U:X  a  [&        R(                  " S[*        5        U(       a  [         R,                  " UU45      nUUUUUU S.nUS
:X  a  UR.                  n"O	USS2S4   n"U"U!U4$ )a  SAG solver for Ridge and LogisticRegression.

SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.

IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.

This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.

The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.

.. versionadded:: 0.17

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
    Training data.

y : ndarray of shape (n_samples,)
    Target values. With loss='multinomial', y must be label encoded
    (see preprocessing.LabelEncoder). For loss='log' it must be in [0, 1].

sample_weight : array-like of shape (n_samples,), default=None
    Weights applied to individual samples (1. for unweighted).

loss : {'log', 'squared', 'multinomial'}, default='log'
    Loss function that will be optimized:
    -'log' is the binary logistic loss, as used in LogisticRegression.
    -'squared' is the squared loss, as used in Ridge.
    -'multinomial' is the multinomial logistic loss, as used in
     LogisticRegression.

    .. versionadded:: 0.18
       *loss='multinomial'*

alpha : float, default=1.
    L2 regularization term in the objective function
    ``(0.5 * alpha * || W ||_F^2)``.

beta : float, default=0.
    L1 regularization term in the objective function
    ``(beta * || W ||_1)``. Only applied if ``is_saga`` is set to True.

max_iter : int, default=1000
    The max number of passes over the training data if the stopping
    criteria is not reached.

tol : float, default=0.001
    The stopping criteria for the weights. The iterations will stop when
    max(change in weights) / max(weights) < tol.

verbose : int, default=0
    The verbosity level.

random_state : int, RandomState instance or None, default=None
    Used when shuffling the data. Pass an int for reproducible output
    across multiple function calls.
    See :term:`Glossary <random_state>`.

check_input : bool, default=True
    If False, the input arrays X and y will not be checked.

max_squared_sum : float, default=None
    Maximum squared sum of X over samples. If None, it will be computed,
    going through all the samples. The value should be precomputed
    to speed up cross validation.

warm_start_mem : dict, default=None
    The initialization parameters used for warm starting. Warm starting is
    currently used in LogisticRegression but not in Ridge.
    It contains:
        - 'coef': the weight vector, with the intercept in last line
            if the intercept is fitted.
        - 'gradient_memory': the scalar gradient for all seen samples.
        - 'sum_gradient': the sum of gradient over all seen samples,
            for each feature.
        - 'intercept_sum_gradient': the sum of gradient over all seen
            samples, for the intercept.
        - 'seen': array of boolean describing the seen samples.
        - 'num_seen': the number of seen samples.

is_saga : bool, default=False
    Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
    better in the first epochs, and allow for l1 regularisation.

Returns
-------
coef_ : ndarray of shape (n_features,)
    Weight vector.

n_iter_ : int
    The number of full pass on all samples.

warm_start_mem : dict
    Contains a 'coef' key with the fitted result, and possibly the
    fitted intercept at the end of the array. Contains also other keys
    used for warm starting.

Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> X = rng.randn(n_samples, n_features)
>>> y = rng.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
Ridge(solver='sag')

>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
LogisticRegression(solver='sag')

References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document

:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`

See Also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR,
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
N  csrC)dtypeaccept_sparseorderF)r$   	ensure_2dr&   r   r   r   )r$   coef)r$   r&   intercept_sum_gradientgradient_memorysum_gradientseennum_seenT)r   )r   r   zQCurrent sag implementation does not handle the case step_size * alpha_scaled == 1z?The max_iter was reached which means the coef_ did not converge)r(   r,   r*   r+   r-   r.   )npfloat64float32r   shapefloatr   maxr   r$   keyszerosint32r	   r   r   ZeroDivisionErrorr   r
   warningswarnr   vstackT)#Xysample_weightr   alphabetamax_itertolverboserandom_statecheck_inputr   warm_start_memr   _dtyper   
n_featuresr   beta_scaled	n_classes	coef_initr   intercept_initr*   gradient_memory_initsum_gradient_init	seen_initnum_seen_initdatasetintercept_decay	step_sizesagr.   n_iter_coef_s#                                      r   
sag_solverrX   W   s   t **bjj)uCH5DGGAJ
zI<)+L+	)K %)M$9AEEGq qI )IM$$&&"6*	 HHj)4AGG3O	 OOA&:>:M"2q5)crc1f%	)177;>#6#6#88!/0H!I!#)177!CN//11-.?@!xx	"!'' 
 ,,..*>:HHj)%<AGGSVW$$&&"6*	HHYbhhcB	^((**&z2+A-NG_#At488:"I <1$5
 	

 77bjj(%eC+Hg0 (M	

 IIy.9:	 )"8/N }!Q$'>))r   )NF)Nr   r   g        r!   gMbP?r   NTNNF)__doc__r9   numpyr/   
exceptionsr   utilsr   utils.extmathr   utils.validationr   _baser	   	_sag_fastr
   r   r   rX    r   r   <module>rb      s\    B
   +  % 3  # QVBP 	
	[*r   