
    -i/                    8   S r SSKrSSKrSSKrSSKJrJr  SSKrSSK	J
r
  SSKJr  SSKJrJrJrJr  SSKJrJrJrJr  SS	KJrJrJrJr  SS
KJrJrJr  SSK J!r!J"r"J#r#  SSK$J%r%J&r&  SSK'J(r(J)r)  S r*SSSSSSSSSS.	S jr+\" S/S/SS/SS/\" 1 Sk5      /\" \SSSS9S/\" \SSSS9S/S/SS/\" \SSSS9/\S/S/S/S/S.SS9SSSSSSSSSSSSS.S j5       r,SSSSSSSSSSSS .S! jr-     S9S" jr.S# r/\" S/S/\" S$S%15      /\" \SSSS9/S&.SS9 S:SS'SSSS(SSSS%SSSSS)S*S+.S, jj5       r0\" S/\" S%S$15      /S/\" \SSSS9/S-.SS9S'S.S%SSSSSSSSSSS/.S0 j5       r1 " S1 S2\\5      r2 " S3 S4\2\5      r3 " S5 S6\2\5      r4 " S7 S8\2\5      r5g);zDictionary learning.    N)IntegralReal)effective_n_jobs)linalg   )BaseEstimatorClassNamePrefixFeaturesOutMixinTransformerMixin_fit_context)LarsLasso	LassoLarsorthogonal_mp_gram)check_arraycheck_random_stategen_batchesgen_even_slices)Interval
StrOptionsvalidate_params)_randomized_svd	row_normssvd_flip)Paralleldelayed)check_is_fittedvalidate_datac                 T    U(       a!  U S;   a  [        SR                  U 5      5      eg g )N)omplarsz9Positive constraint not supported for '{}' coding method.)
ValueErrorformatmethodpositives     W/var/www/html/venv/lib/python3.13/site-packages/sklearn/decomposition/_dict_learning.py_check_positive_codingr'      s.    Fo-GNNvV
 	
 .x    
lasso_larsT  F	gramcov	algorithmregularizationcopy_covinitmax_iterverboser%   c       	            U R                   u  pUR                   S   nUS:X  a{  [        U5      U-  n [        R                  " SS9n[	        USU	USU
US9nUR                  UR                  U R                  US9  UR                  n[        R                  " S0 UD6  GOUS:X  a  [        U5      U-  n[        USUUS	U
S
9nUb1  UR                  S   (       d  [        R                  " U5      nUUl        UR                  UR                  U R                  SS9  UR                  nGOUS:X  as   [        R                  " SS9n[        SU	U[        U5      SS9nUR                  UR                  U R                  US9  UR                  n[        R                  " S0 UD6  OUS:X  an  [        R                  " U5      [        R                  " [        R                  " U5      U-
  S5      -  R                  nU
(       a  [        R                   " USSUS9  O/US:X  a)  [#        UU[        U5      S[%        U S	S9US9R                  nWR'                  X5      $ ! [        R                  " S0 WD6  f = f! [        R                  " S0 WD6  f = f)a  Generic sparse coding with precomputed Gram and/or covariance matrices.

Each row of the result is the solution to a Lasso problem.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
    Data matrix.

dictionary : ndarray of shape (n_components, n_features)
    The dictionary matrix against which to solve the sparse coding of
    the data. Some of the algorithms assume normalized rows.

gram : ndarray of shape (n_components, n_components), default=None
    Precomputed Gram matrix, `dictionary * dictionary'`
    gram can be `None` if method is 'threshold'.

cov : ndarray of shape (n_components, n_samples), default=None
    Precomputed covariance, `dictionary * X'`.

algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'},             default='lasso_lars'
    The algorithm used:

    * `'lars'`: uses the least angle regression method
      (`linear_model.lars_path`);
    * `'lasso_lars'`: uses Lars to compute the Lasso solution;
    * `'lasso_cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
      the estimated components are sparse;
    * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
      solution;
    * `'threshold'`: squashes to zero all coefficients less than
      regularization from the projection `dictionary * data'`.

regularization : int or float, default=None
    The regularization parameter. It corresponds to alpha when
    algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
    Otherwise it corresponds to `n_nonzero_coefs`.

init : ndarray of shape (n_samples, n_components), default=None
    Initialization value of the sparse code. Only used if
    `algorithm='lasso_cd'`.

max_iter : int, default=1000
    Maximum number of iterations to perform if `algorithm='lasso_cd'` or
    `'lasso_lars'`.

copy_cov : bool, default=True
    Whether to copy the precomputed covariance matrix; if `False`, it may
    be overwritten.

verbose : int, default=0
    Controls the verbosity; the higher, the more messages.

positive: bool, default=False
    Whether to enforce a positivity constraint on the sparse code.

    .. versionadded:: 0.20

Returns
-------
code : ndarray of shape (n_components, n_features)
    The sparse codes.
r   r)   ignore)allF)alphafit_interceptr3   
precomputefit_pathr%   r2   )Xylasso_cdT)r7   r8   r9   r2   
warm_startr%   N	WRITEABLE)check_inputr    )r8   r3   r9   n_nonzero_coefsr:   	thresholdoutr   )squared)Gramr;   r@   tolnorms_squaredcopy_Xy )shapefloatnpseterrr   fitTcoef_r   flagsarrayr   intsignmaximumabsclipr   r   reshape)X
dictionaryr,   r-   r.   r/   r0   r1   r2   r3   r%   	n_samples
n_featuresn_componentsr7   err_mgtr)   new_codeclfr    s                       r&   _sparse_encode_precomputedra   $   sU   ^ GGI##A&LL n%
2	!iiH-G ##!!J NN:<<N5!''HII  	j	 n%
2
 
  ::k*xx~CI
accu599	f		!iiH-G # #N 3D HHZ\\1333H/zzHII  	k	!GGCL2::bffSkN.JA#NNQQGGHa84	e	%/#At4
 ! 	 I44 II  \ II  s   AI /AI0 I-0Jz
array-like>   r   r    r<   rA   r)      leftclosedbooleanr3   rY   rZ   r,   r-   r.   r@   r7   r0   r1   r2   n_jobsr?   r3   r%   prefer_skip_nested_validation)r,   r-   r.   r@   r7   r0   r1   r2   rh   r?   r3   r%   c                   U(       ao  US:X  aS  [        US[        R                  [        R                  /S9n[        U S[        R                  [        R                  /S9n O[        U5      n[        U 5      n UR                  S   U R                  S   :w  a/  [        SR                  UR                  U R                  5      5      e[        XM5        [        U UUUUUUUUU	U
UUS9$ )a  Sparse coding.

Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::

    X ~= code * dictionary

Read more in the :ref:`User Guide <SparseCoder>`.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Data matrix.

dictionary : array-like of shape (n_components, n_features)
    The dictionary matrix against which to solve the sparse coding of
    the data. Some of the algorithms assume normalized rows for meaningful
    output.

gram : array-like of shape (n_components, n_components), default=None
    Precomputed Gram matrix, `dictionary * dictionary'`.

cov : array-like of shape (n_components, n_samples), default=None
    Precomputed covariance, `dictionary' * X`.

algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'},             default='lasso_lars'
    The algorithm used:

    * `'lars'`: uses the least angle regression method
      (`linear_model.lars_path`);
    * `'lasso_lars'`: uses Lars to compute the Lasso solution;
    * `'lasso_cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
      the estimated components are sparse;
    * `'omp'`: uses orthogonal matching pursuit to estimate the sparse
      solution;
    * `'threshold'`: squashes to zero all coefficients less than
      regularization from the projection `dictionary * data'`.

n_nonzero_coefs : int, default=None
    Number of nonzero coefficients to target in each column of the
    solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
    and is overridden by `alpha` in the `omp` case. If `None`, then
    `n_nonzero_coefs=int(n_features / 10)`.

alpha : float, default=None
    If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
    penalty applied to the L1 norm.
    If `algorithm='threshold'`, `alpha` is the absolute value of the
    threshold below which coefficients will be squashed to zero.
    If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
    the reconstruction error targeted. In this case, it overrides
    `n_nonzero_coefs`.
    If `None`, default to 1.

copy_cov : bool, default=True
    Whether to copy the precomputed covariance matrix; if `False`, it may
    be overwritten.

init : ndarray of shape (n_samples, n_components), default=None
    Initialization value of the sparse codes. Only used if
    `algorithm='lasso_cd'`.

max_iter : int, default=1000
    Maximum number of iterations to perform if `algorithm='lasso_cd'` or
    `'lasso_lars'`.

n_jobs : int, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

check_input : bool, default=True
    If `False`, the input arrays X and dictionary will not be checked.

verbose : int, default=0
    Controls the verbosity; the higher, the more messages.

positive : bool, default=False
    Whether to enforce positivity when finding the encoding.

    .. versionadded:: 0.20

Returns
-------
code : ndarray of shape (n_samples, n_components)
    The sparse codes.

See Also
--------
sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
    path using LARS algorithm.
sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
SparseCoder : Find a sparse representation of data from a fixed precomputed
    dictionary.

Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import sparse_encode
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
...     [[0, 1, 0],
...      [-1, -1, 2],
...      [1, 1, 1],
...      [0, 1, 1],
...      [0, 2, 1]],
...    dtype=np.float64
... )
>>> sparse_encode(X, dictionary, alpha=1e-10)
array([[ 0.,  0., -1.,  0.,  0.],
       [ 0.,  1.,  1.,  0.,  0.]])
r<   C)orderdtyperb   zRDictionary and X have different numbers of features:dictionary.shape: {} X.shape{}r,   r-   r.   r@   r7   r0   r1   r2   rh   r3   r%   )	r   rL   float64float32rJ   r!   r"   r'   _sparse_encoderg   s                 r&   sparse_encoders      s    v 
"$#bjj"**-EJ ASRZZ0HIA$Z0JAAaggaj(--3VJ4D4Dagg-N
 	

 9/	' r(   ro   c                  ^ ^^^^^^^	^^^ T R                   u  pTR                   S   nTS;   a  UmTc  [        [        US-  S5      U5      mOUmTc  SmTc'  TS:w  a!  [        R                  " TTR
                  5      mTc)  TS:w  a#  Sm[        R                  " TT R
                  5      m[        U
5      S:X  d  TS:X  a  [        T TTTTTTTT	TTS	9nU$ T R                   S   nTR                   S   n[        R                  " X45      n[        [        U[        U
5      5      5      n[        U
TS
9" U UUUUUUU	UUU4S jU 5       5      n[        UU5       H  u  nnUUU'   M     U$ )z1Sparse coding without input/parameter validation.r   )r    r   
   rb   g      ?rA   r<   Fr+   )rh   r3   c              3      >#    U  H9  n[        [        5      " TU   TTTb	  TS S 2U4   OS TTTTb  TU   OS T	TT
S9v   M;     g 7f)Nr+   )r   ra   ).0
this_slicerY   r.   r0   r-   rZ   r,   r1   r2   r%   r/   r3   s     r&   	<genexpr>!_sparse_encode.<locals>.<genexpr>  sg      : !J 	*+jM&)oAzM"4)%)%5j!4	
 !s   AA)rJ   minmaxrL   dotrO   r   ra   emptylistr   r   zip)rY   rZ   r,   r-   r.   r@   r7   r0   r1   r2   rh   r3   r%   r[   r\   r]   codeslices
code_viewsrx   	this_viewr/   s   `````  ``` ``        @r&   rr   rr     s{   $ GGI##A&LO#(! Z"_a!8,GN! N|	[0vvj*,,/
{yJ.ffZ%1$	[(@))
  
I##A&L88Y-.D/)-=f-EFGF9 : : !: J  "%VZ!8
I$Z "9Kr(   c           
         UR                   u  p[        U5      nUc  UR                  U-  nUc  UR                  U-  nSn
[        U	5       H  nX;U4   S:  a#  X==   USS2U4   X;   U -  -
  X;U4   -  -  ss'   O]XR	                  U5         nSUR                  5       =(       d    S-  nUR                  SU[        U5      S9nX-   X'   SUSS2U4'   U
S-  n
U(       a  [        R                  " X   SSX   S9  X==   [        [        R                  " X   5      S5      -  ss'   M     U(       a  U
S:  a  [        U
 S35        ggg)	aO  Update the dense dictionary factor in place.

Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
    Value of the dictionary at the previous iteration.

Y : ndarray of shape (n_samples, n_features)
    Data matrix.

code : ndarray of shape (n_samples, n_components)
    Sparse coding of the data against which to optimize the dictionary.

A : ndarray of shape (n_components, n_components), default=None
    Together with `B`, sufficient stats of the online model to update the
    dictionary.

B : ndarray of shape (n_features, n_components), default=None
    Together with `A`, sufficient stats of the online model to update the
    dictionary.

verbose: bool, default=False
    Degree of output the procedure will print.

random_state : int, RandomState instance or None, default=None
    Used for randomly initializing the dictionary. Pass an int for
    reproducible results across multiple function calls.
    See :term:`Glossary <random_state>`.

positive : bool, default=False
    Whether to enforce positivity when finding the dictionary.

    .. versionadded:: 0.20
Nr   gư>g{Gz?rb   )sizerB   z unused atoms resampled.)rJ   r   rO   rangechoicestdnormallenrL   rW   r|   r   normprint)rZ   Yr   ABr3   random_stater%   r[   r]   n_unusedknewdnoise_levelnoises                  r&   _update_dictr     sK   X #jjI%l3LyFFTMyCC$JH< T7T>Ma1gz(99Q!tWDDM ((34D $((*/2K '';SY'GE LJMDAJMHGGJM1d
> 	V[[7;;) !, 8a<
234  wr(   c                   [         R                   " 5       nUb  Ub  [        R                  " USS9nUnOB[        R                  " U SS9u  nnn[        UU5      u  nnUSS2[        R                  4   U-  n[        U5      nUU::  a  USS2SU24   nUSU2SS24   nOr[        R                  U[        R                  " [        U5      UU-
  45      4   n[        R                  U[        R                  " UU-
  UR                  S   45      4   n[        R                  " U5      n/ n[        R                  nU
S:X  a
  [        SSS	9  S
n[        U5       GHd  n[         R                   " 5       U-
  nU
S:X  a>  [         R"                  R%                  S5        [         R"                  R'                  5         OU
(       a  [        SUUUS-  U4-  5        [)        U UUUUUUUU
S9	n[+        UU UU
UUS9  S[        R,                  " U UU-  -
  S-  5      -  U[        R,                  " [        R.                  " U5      5      -  -   nUR1                  U5        US:  a@  US   US
   -
  nUUUS
   -  :  a)  U
S:X  a  [        S5        OU
(       a  [        SU-  5          O'US-  S:X  d  GMN  U	c  GMT  U	" [3        5       5        GMg     U(       a	  UUUUS-   4$ UUU4$ )z"Main dictionary learning algorithmNF)rm   F)full_matricesrb   [dict_learning] )end.zCIteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)<   )r.   r7   r1   rh   r%   r2   r3   r3   r   r%         ?r   r    z+--- Convergence reached after %d iterations   )timerL   rR   r   svdr   newaxisr   c_zerosr_rJ   asfortranarraynanr   r   sysstdoutwriteflushrs   r   sumrV   appendlocals)rY   r]   r7   r2   rF   r$   rh   	dict_init	code_initcallbackr3   r   return_n_iterpositive_dictpositive_codemethod_max_itert0r   rZ   Srerrorscurrent_costiidtdEs                             r&   _dict_learningr   )  s   ( 
B!6xx	-
$jj%@a#D*5jq"**}%
2
JAqA}}$%q 01
uuT288SYq0@$ABBCUU,"2J4D4DQ4G!HII

 "":.JF66L!|S) 
BHoYY[2a<JJS!JJUr27L12 "$

 	%"	
 RVVQ
):%:q$@AAEBFFFF4LM
 E
 
 	l#6fRj(BC&*$$a<"IG"LM6Q;8/VXg j Za//Z''r(   cdr    )rY   return_coder$   r   d      MbP?ru   )r7   r2   r   r   r   
batch_sizer3   shufflerh   r$   r   r   r   r   rF   max_no_improvementc                   SU-   n[        S0 SU_SU_SU_SU
_SU_SU_SU	_S	U_S
U_SU_SU_SU_SU_SU_SU_SU_SU_SU_6R                  U 5      nU(       d  UR                  $ UR                  U 5      nUUR                  4$ )a-  Solve a dictionary learning matrix factorization problem online.

Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::

    (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
                 (U,V)
                 with || V_k ||_2 = 1 for all  0 <= k < n_components

where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.

Read more in the :ref:`User Guide <DictionaryLearning>`.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Data matrix.

n_components : int or None, default=2
    Number of dictionary atoms to extract. If None, then ``n_components``
    is set to ``n_features``.

alpha : float, default=1
    Sparsity controlling parameter.

max_iter : int, default=100
    Maximum number of iterations over the complete dataset before
    stopping independently of any early stopping criterion heuristics.

    .. versionadded:: 1.1

return_code : bool, default=True
    Whether to also return the code U or just the dictionary `V`.

dict_init : ndarray of shape (n_components, n_features), default=None
    Initial values for the dictionary for warm restart scenarios.
    If `None`, the initial values for the dictionary are created
    with an SVD decomposition of the data via
    :func:`~sklearn.utils.extmath.randomized_svd`.

callback : callable, default=None
    A callable that gets invoked at the end of each iteration.

batch_size : int, default=256
    The number of samples to take in each batch.

    .. versionchanged:: 1.3
       The default value of `batch_size` changed from 3 to 256 in version 1.3.

verbose : bool, default=False
    To control the verbosity of the procedure.

shuffle : bool, default=True
    Whether to shuffle the data before splitting it in batches.

n_jobs : int, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

method : {'lars', 'cd'}, default='lars'
    * `'lars'`: uses the least angle regression method to solve the lasso
      problem (`linear_model.lars_path`);
    * `'cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). Lars will be faster if
      the estimated components are sparse.

random_state : int, RandomState instance or None, default=None
    Used for initializing the dictionary when ``dict_init`` is not
    specified, randomly shuffling the data when ``shuffle`` is set to
    ``True``, and updating the dictionary. Pass an int for reproducible
    results across multiple function calls.
    See :term:`Glossary <random_state>`.

positive_dict : bool, default=False
    Whether to enforce positivity when finding the dictionary.

    .. versionadded:: 0.20

positive_code : bool, default=False
    Whether to enforce positivity when finding the code.

    .. versionadded:: 0.20

method_max_iter : int, default=1000
    Maximum number of iterations to perform when solving the lasso problem.

    .. versionadded:: 0.22

tol : float, default=1e-3
    Control early stopping based on the norm of the differences in the
    dictionary between 2 steps.

    To disable early stopping based on changes in the dictionary, set
    `tol` to 0.0.

    .. versionadded:: 1.1

max_no_improvement : int, default=10
    Control early stopping based on the consecutive number of mini batches
    that does not yield an improvement on the smoothed cost function.

    To disable convergence detection based on cost function, set
    `max_no_improvement` to None.

    .. versionadded:: 1.1

Returns
-------
code : ndarray of shape (n_samples, n_components),
    The sparse code (only returned if `return_code=True`).

dictionary : ndarray of shape (n_components, n_features),
    The solutions to the dictionary learning problem.

n_iter : int
    Number of iterations run. Returned only if `return_n_iter` is
    set to `True`.

See Also
--------
dict_learning : Solve a dictionary learning matrix factorization problem.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
    learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.

Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning_online
>>> X, _, _ = make_sparse_coded_signal(
...     n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
...     random_state=42,
... )
>>> U, V = dict_learning_online(
...     X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
... )

We can check the level of sparsity of `U`:

>>> np.mean(U == 0)
np.float64(0.53)

We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:

>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.053)
lasso_r]   r7   r2   rh   fit_algorithmr   r   r   r   transform_algorithmtransform_alphar   r   transform_max_iterr3   r   rF   r   rI   )MiniBatchDictionaryLearningrN   components_	transform)rY   r]   r7   r2   r   r   r   r   r3   r   rh   r$   r   r   r   r   rF   r   r   estr   s                        r&   dict_learning_onliner     s   z #V+
% !  	
     " 0  $ $ +   !" #$ .%& 
c!f' * }}QS__$$r(   )rY   r$   r   r   :0yE>)r2   rF   r$   rh   r   r   r   r3   r   r   r   r   r   c                    [        UUUUUUUU	UU
UUUUS9R                  SS9nUR                  U 5      nU(       a$  UUR                  UR                  UR
                  4$ UUR                  UR                  4$ )a  Solve a dictionary learning matrix factorization problem.

Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::

    (U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
                 (U,V)
                with || V_k ||_2 = 1 for all  0 <= k < n_components

where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.

Read more in the :ref:`User Guide <DictionaryLearning>`.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Data matrix.

n_components : int
    Number of dictionary atoms to extract.

alpha : int or float
    Sparsity controlling parameter.

max_iter : int, default=100
    Maximum number of iterations to perform.

tol : float, default=1e-8
    Tolerance for the stopping condition.

method : {'lars', 'cd'}, default='lars'
    The method used:

    * `'lars'`: uses the least angle regression method to solve the lasso
       problem (`linear_model.lars_path`);
    * `'cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). Lars will be faster if
      the estimated components are sparse.

n_jobs : int, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

dict_init : ndarray of shape (n_components, n_features), default=None
    Initial value for the dictionary for warm restart scenarios. Only used
    if `code_init` and `dict_init` are not None.

code_init : ndarray of shape (n_samples, n_components), default=None
    Initial value for the sparse code for warm restart scenarios. Only used
    if `code_init` and `dict_init` are not None.

callback : callable, default=None
    Callable that gets invoked every five iterations.

verbose : bool, default=False
    To control the verbosity of the procedure.

random_state : int, RandomState instance or None, default=None
    Used for randomly initializing the dictionary. Pass an int for
    reproducible results across multiple function calls.
    See :term:`Glossary <random_state>`.

return_n_iter : bool, default=False
    Whether or not to return the number of iterations.

positive_dict : bool, default=False
    Whether to enforce positivity when finding the dictionary.

    .. versionadded:: 0.20

positive_code : bool, default=False
    Whether to enforce positivity when finding the code.

    .. versionadded:: 0.20

method_max_iter : int, default=1000
    Maximum number of iterations to perform.

    .. versionadded:: 0.22

Returns
-------
code : ndarray of shape (n_samples, n_components)
    The sparse code factor in the matrix factorization.

dictionary : ndarray of shape (n_components, n_features),
    The dictionary factor in the matrix factorization.

errors : array
    Vector of errors at each iteration.

n_iter : int
    Number of iterations run. Returned only if `return_n_iter` is
    set to True.

See Also
--------
dict_learning_online : Solve a dictionary learning matrix factorization
    problem online.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate version
    of the dictionary learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.

Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning
>>> X, _, _ = make_sparse_coded_signal(
...     n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
...     random_state=42,
... )
>>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)

We can check the level of sparsity of `U`:

>>> np.mean(U == 0)
np.float64(0.62)

We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:

>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.0192)
)r]   r7   r2   rF   r   rh   r   r   r   r3   r   r   r   r   default)r   )DictionaryLearning
set_outputfit_transformr   error_n_iter_)rY   r]   r7   r2   rF   r$   rh   r   r   r   r3   r   r   r   r   r   	estimatorr   s                     r&   dict_learningr   u  s    B #!!##* j9j%   ""1%D!!	
 	
 &&	(8(888r(   c                   6    \ rS rSrSrS rS rS rS rS r	Sr
g	)
_BaseSparseCodingi1  z>Base class from SparseCoder and DictionaryLearning algorithms.c                 X    Xl         X l        X0l        Xpl        X@l        XPl        X`l        g N)r   transform_n_nonzero_coefsr   r   
split_signrh   r   )selfr   r   r   r   rh   r   r   s           r&   __init___BaseSparseCoding.__init__4  s,     $7 )B&."4$*r(   c                    [        XSS9n[        U S5      (       a  U R                  c  U R                  nOU R                  n[	        UUU R
                  U R                  UU R                  U R                  U R                  S9nU R                  (       al  UR                  u  pV[        R                  " USU-  45      n[        R                  " US5      USS2SU24'   [        R                  " US5      * USS2US24'   UnU$ )OPrivate method allowing to accommodate both DictionaryLearning and
SparseCoder.F)resetr7   N)r.   r@   r7   r2   rh   r%   r   r   )r   hasattrr   r7   rs   r   r   r   rh   r   r   rJ   rL   r~   rU   minimum)r   rY   rZ   r   r   r[   r\   
split_codes           r&   
_transform_BaseSparseCoding._transformF  s     $/4!!d&:&:&B"jjO"22O.. ::!,,;;''	
 ??$(JJ!I9a*n"=>J)+D!)<Jq+:+~&*,**T1*=)=Jq*+~&Dr(   c                 N    [        U 5        U R                  XR                  5      $ )a  Encode the data as a sparse combination of the dictionary atoms.

Coding method is determined by the object parameter
`transform_algorithm`.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
    Test data to be transformed, must have the same number of
    features as the data used to train the model.

Returns
-------
X_new : ndarray of shape (n_samples, n_components)
    Transformed data.
)r   r   r   r   rY   s     r&   r   _BaseSparseCoding.transforme  s!    " 	q"2"233r(   c                 B   [        U5      nUR                  S   nU R                  (       a  X3-  nUR                  S   U:X  d  [        SU SUR                  S    S35      eU R                  (       a*  UR                  u  pEUS-  nUSS2SU24   USS2US24   -
  nX-  $ )r   r   rb   zkThe number of components in the code is different from the number of components in the dictionary.Expected z, got r   r   N)r   rJ   r   r!   )r   r   rZ   expected_n_componentsr[   r\   s         r&   _inverse_transform$_BaseSparseCoding._inverse_transformy  s     4  * 0 0 3??!:!zz!} 5512&AqJ 
 ??$(JJ!I1J;J;'$q*+~*>>D  r(   c                 N    [        U 5        U R                  XR                  5      $ aC  Transform data back to its original space.

Parameters
----------
X : array-like of shape (n_samples, n_components)
    Data to be transformed back. Must have the same number of
    components as the data used to train the model.

Returns
-------
X_original : ndarray of shape (n_samples, n_features)
    Transformed data.
)r   r   r   r   s     r&   inverse_transform#_BaseSparseCoding.inverse_transform  s#     	&&q*:*:;;r(   )rh   r   r   r   r   r   r   N)__name__
__module____qualname____firstlineno____doc__r   r   r   r   r   __static_attributes__rI   r(   r&   r   r   1  s    H+$>4(!*<r(   r   c                      ^  \ rS rSrSrSSSSSSSS.U 4S jjrSS	 jrSU 4S
 jjrS rU 4S jr	\
S 5       r\
S 5       r\
S 5       rSrU =r$ )SparseCoderi  a  Sparse coding.

Finds a sparse representation of data against a fixed, precomputed
dictionary.

Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::

    X ~= code * dictionary

Read more in the :ref:`User Guide <SparseCoder>`.

Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
    The dictionary atoms used for sparse coding. Lines are assumed to be
    normalized to unit norm.

transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp',             'threshold'}, default='omp'
    Algorithm used to transform the data:

    - `'lars'`: uses the least angle regression method
      (`linear_model.lars_path`);
    - `'lasso_lars'`: uses Lars to compute the Lasso solution;
    - `'lasso_cd'`: uses the coordinate descent method to compute the
      Lasso solution (linear_model.Lasso). `'lasso_lars'` will be faster if
      the estimated components are sparse;
    - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
      solution;
    - `'threshold'`: squashes to zero all coefficients less than alpha from
      the projection ``dictionary * X'``.

transform_n_nonzero_coefs : int, default=None
    Number of nonzero coefficients to target in each column of the
    solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
    and is overridden by `alpha` in the `omp` case. If `None`, then
    `transform_n_nonzero_coefs=int(n_features / 10)`.

transform_alpha : float, default=None
    If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
    penalty applied to the L1 norm.
    If `algorithm='threshold'`, `alpha` is the absolute value of the
    threshold below which coefficients will be squashed to zero.
    If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
    the reconstruction error targeted. In this case, it overrides
    `n_nonzero_coefs`.
    If `None`, default to 1.

split_sign : bool, default=False
    Whether to split the sparse feature vector into the concatenation of
    its negative part and its positive part. This can improve the
    performance of downstream classifiers.

n_jobs : int, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

positive_code : bool, default=False
    Whether to enforce positivity when finding the code.

    .. versionadded:: 0.20

transform_max_iter : int, default=1000
    Maximum number of iterations to perform if `algorithm='lasso_cd'` or
    `lasso_lars`.

    .. versionadded:: 0.22

Attributes
----------
n_components_ : int
    Number of atoms.

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the
    dictionary learning algorithm.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparsePCA : Sparse Principal Components Analysis.
sparse_encode : Sparse coding where each row of the result is the solution
    to a sparse coding problem.

Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import SparseCoder
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
...     [[0, 1, 0],
...      [-1, -1, 2],
...      [1, 1, 1],
...      [0, 1, 1],
...      [0, 2, 1]],
...    dtype=np.float64
... )
>>> coder = SparseCoder(
...     dictionary=dictionary, transform_algorithm='lasso_lars',
...     transform_alpha=1e-10,
... )
>>> coder.transform(X)
array([[ 0.,  0., -1.,  0.,  0.],
       [ 0.,  1.,  1.,  0.,  0.]])
r   NFr*   )r   r   r   r   rh   r   r   c          	      <   > [         T	U ]  UUUUUUU5        Xl        g r   )superr   rZ   )
r   rZ   r   r   r   r   rh   r   r   	__class__s
            r&   r   SparseCoder.__init__  s.     	%	
 %r(   c                     U $ )ad  Do nothing and return the estimator unchanged.

This method is just there to implement the usual API and hence
work in pipelines.

Parameters
----------
X : Ignored
    Not used, present for API consistency by convention.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
self : object
    Returns the instance itself.
rI   r   rY   ys      r&   rN   SparseCoder.fit/  s	    & r(   c                 6   > [         TU ]  XR                  5      $ )a  Encode the data as a sparse combination of the dictionary atoms.

Coding method is determined by the object parameter
`transform_algorithm`.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples
    and `n_features` is the number of features.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
X_new : ndarray of shape (n_samples, n_components)
    Transformed data.
)r  r   rZ   )r   rY   r  r  s      r&   r   SparseCoder.transformD  s    ( w!!__55r(   c                 8    U R                  XR                  5      $ r   )r   rZ   r   s     r&   r   SparseCoder.inverse_transformZ  s     &&q//::r(   c                 X   > [         TU ]  5       nSUl        SS/UR                  l        U$ )NFrp   rq   )r  __sklearn_tags__requires_fittransformer_tagspreserves_dtyper   tagsr  s     r&   r  SparseCoder.__sklearn_tags__j  s0    w')!1:I0F-r(   c                 4    U R                   R                  S   $ )zNumber of atoms.r   rZ   rJ   r   s    r&   n_components_SparseCoder.n_components_p       $$Q''r(   c                 4    U R                   R                  S   $ )z%Number of features seen during `fit`.rb   r  r  s    r&   n_features_in_SparseCoder.n_features_in_u  r  r(   c                     U R                   $ )&Number of transformed output features.)r  r  s    r&   _n_features_outSparseCoder._n_features_outz  s     !!!r(   )rZ   r   )r   r   r   r  r  r   rN   r   r   r  propertyr  r!  r%  r  __classcell__r  s   @r&   r  r    s{    uv ""&% %.*6,;  ( ( ( ( " "r(   r  c                     ^  \ rS rSr% Sr0 S\" \SSSS9S/_S\" \S	SSS9/_S
\" \S	SSS9/_S\" \S	SSS9/_S\" SS15      /_S\" 1 Sk5      /_S\" \SSSS9S/_S\" \S	SSS9S/_S\S/_S\	R                  S/_S\	R                  S/_S\S/_SS/_SS/_SS/_SS/_SS/_S\" \S	SSS9/0Er\\S'    S,SSS SS!SSSSSSS"S"SS"S"SS#.U 4S$ jjjrS,S% jr\" S&S'9S,S( j5       r\S) 5       rU 4S* jrS+rU =r$ )-r   i  a  Dictionary learning.

Finds a dictionary (a set of atoms) that performs well at sparsely
encoding the fitted data.

Solves the optimization problem::

    (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
                (U,V)
                with || V_k ||_2 <= 1 for all  0 <= k < n_components

||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.

Read more in the :ref:`User Guide <DictionaryLearning>`.

Parameters
----------
n_components : int, default=None
    Number of dictionary elements to extract. If None, then ``n_components``
    is set to ``n_features``.

alpha : float, default=1.0
    Sparsity controlling parameter.

max_iter : int, default=1000
    Maximum number of iterations to perform.

tol : float, default=1e-8
    Tolerance for numerical error.

fit_algorithm : {'lars', 'cd'}, default='lars'
    * `'lars'`: uses the least angle regression method to solve the lasso
      problem (:func:`~sklearn.linear_model.lars_path`);
    * `'cd'`: uses the coordinate descent method to compute the
      Lasso solution (:class:`~sklearn.linear_model.Lasso`). Lars will be
      faster if the estimated components are sparse.

    .. versionadded:: 0.17
       *cd* coordinate descent method to improve speed.

transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp',             'threshold'}, default='omp'
    Algorithm used to transform the data:

    - `'lars'`: uses the least angle regression method
      (:func:`~sklearn.linear_model.lars_path`);
    - `'lasso_lars'`: uses Lars to compute the Lasso solution.
    - `'lasso_cd'`: uses the coordinate descent method to compute the
      Lasso solution (:class:`~sklearn.linear_model.Lasso`). `'lasso_lars'`
      will be faster if the estimated components are sparse.
    - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
      solution.
    - `'threshold'`: squashes to zero all coefficients less than alpha from
      the projection ``dictionary * X'``.

    .. versionadded:: 0.17
       *lasso_cd* coordinate descent method to improve speed.

transform_n_nonzero_coefs : int, default=None
    Number of nonzero coefficients to target in each column of the
    solution. This is only used by `algorithm='lars'` and
    `algorithm='omp'`. If `None`, then
    `transform_n_nonzero_coefs=int(n_features / 10)`.

transform_alpha : float, default=None
    If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
    penalty applied to the L1 norm.
    If `algorithm='threshold'`, `alpha` is the absolute value of the
    threshold below which coefficients will be squashed to zero.
    If `None`, defaults to `alpha`.

    .. versionchanged:: 1.2
        When None, default value changed from 1.0 to `alpha`.

n_jobs : int or None, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

code_init : ndarray of shape (n_samples, n_components), default=None
    Initial value for the code, for warm restart. Only used if `code_init`
    and `dict_init` are not None.

dict_init : ndarray of shape (n_components, n_features), default=None
    Initial values for the dictionary, for warm restart. Only used if
    `code_init` and `dict_init` are not None.

callback : callable, default=None
    Callable that gets invoked every five iterations.

    .. versionadded:: 1.3

verbose : bool, default=False
    To control the verbosity of the procedure.

split_sign : bool, default=False
    Whether to split the sparse feature vector into the concatenation of
    its negative part and its positive part. This can improve the
    performance of downstream classifiers.

random_state : int, RandomState instance or None, default=None
    Used for initializing the dictionary when ``dict_init`` is not
    specified, randomly shuffling the data when ``shuffle`` is set to
    ``True``, and updating the dictionary. Pass an int for reproducible
    results across multiple function calls.
    See :term:`Glossary <random_state>`.

positive_code : bool, default=False
    Whether to enforce positivity when finding the code.

    .. versionadded:: 0.20

positive_dict : bool, default=False
    Whether to enforce positivity when finding the dictionary.

    .. versionadded:: 0.20

transform_max_iter : int, default=1000
    Maximum number of iterations to perform if `algorithm='lasso_cd'` or
    `'lasso_lars'`.

    .. versionadded:: 0.22

Attributes
----------
components_ : ndarray of shape (n_components, n_features)
    dictionary atoms extracted from the data

error_ : array
    vector of errors at each iteration

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

n_iter_ : int
    Number of iterations run.

See Also
--------
MiniBatchDictionaryLearning: A faster, less accurate, version of the
    dictionary learning algorithm.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparseCoder : Find a sparse representation of data from a fixed,
    precomputed dictionary.
SparsePCA : Sparse Principal Components Analysis.

References
----------

J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/~fbach/mairal_icml09.pdf)

Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import DictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
...     n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
...     random_state=42,
... )
>>> dict_learner = DictionaryLearning(
...     n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.1,
...     random_state=42,
... )
>>> X_transformed = dict_learner.fit(X).transform(X)

We can check the level of sparsity of `X_transformed`:

>>> np.mean(X_transformed == 0)
np.float64(0.527)

We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:

>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.056)
r]   rb   Nrc   rd   r7   r   r2   rF   r   r    r   r   >   r   r    r<   rA   r)   r   r   rh   r   r   r   r3   r   rf   r   r   r   r   _parameter_constraintsr*   r   r   F)r7   r2   rF   r   r   r   r   rh   r   r   r   r3   r   r   r   r   r   c          	         > [         TU ]  UUUUU	UU5        Xl        X l        X0l        X@l        XPl        Xl        Xl        Xl	        Xl
        Xl        UU l        g r   )r  r   r]   r7   r2   rF   r   r   r   r   r3   r   r   )r   r]   r7   r2   rF   r   r   r   r   rh   r   r   r   r3   r   r   r   r   r   r  s                      r&   r   DictionaryLearning.__init__X  sf    , 	%	
 )
 *"" (*r(   c                 (    U R                  U5        U $ )[  Fit the model from data in X.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples
    and `n_features` is the number of features.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
self : object
    Returns the instance itself.
)r   r  s      r&   rN   DictionaryLearning.fit  s    " 	1r(   Tri   c                 &   [        U R                  U R                  S9  SU R                  -   n[        U R                  5      n[        X5      nU R                  c  UR                  S   nOU R                  n[        UUU R                  U R                  U R                  UU R                  U R                  U R                  U R                  U R                   U R"                  USU R$                  U R                  S9u  pgol        Xpl        Xl        U$ )a  Fit the model from data in X and return the transformed data.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples
    and `n_features` is the number of features.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
V : ndarray of shape (n_samples, n_components)
    Transformed data.
r#   r   rb   T)r7   rF   r2   r$   r   rh   r   r   r   r3   r   r   r   r   )r'   r   r   r   r   r   r]   rJ   r   r7   rF   r2   r   rh   r   r   r   r3   r   r   r   r   )	r   rY   r  r$   r   r]   VUEs	            r&   r    DictionaryLearning.fit_transform  s    $ 	d&8&84CUCUVD...)$*;*;<$"$771:L,,L .**]] 33;;nnnn]]LL%,,,,!!
a$ r(   c                 4    U R                   R                  S   $ r$  r   r   rJ   r  s    r&   r%  "DictionaryLearning._n_features_out       %%a((r(   c                 J   > [         TU ]  5       nSS/UR                  l        U$ Nrp   rq   r  r  r  r  r  s     r&   r  #DictionaryLearning.__sklearn_tags__  (    w')1:I0F-r(   )r7   r   r   r   r   r   r   r2   r]   r   r   r   rF   r3   r   )r   r   r   r  r  r   r   r   r   rL   ndarraycallabler+  dict__annotations__r   rN   r   r   r'  r%  r  r  r(  r)  s   @r&   r   r     s   ~@$(AtFCTJ$(4D89$ 	Xh4?@$ 	q$v67	$
 	*fd^45$ 	MN 
$ 	$hxD&PRV%W$ 	HT1d6BDI$ 	8T"$ 	bjj$'$ 	bjj$'$ 	Xt$$ 	I;$  	yk!$" 	(#$$ 	)%$& 	)'$( 	x!T&IJ)$D 2 )+ !"&))+ )+V( 52 62h ) ) r(   r   c                   :  ^  \ rS rSr% Sr0 S\" \SSSS9S/_S\" \S	SSS9/_S
\" \S	SSS9/_S\" SS15      /_SS\/_S\" \SSSS9/_SS/_SS\	R                  /_S\" 1 Sk5      /_S\" \SSSS9S/_S\" \S	SSS9S/_SS/_SS/_SS/_SS/_SS/_S\" \S	SSS9/_S\/\" \S	SSS9/\" \S	SSS9S/S.Er\\S'    S3SSSSS S!SS"SSS#S#SS#S#SSS$S%S&.U 4S' jjjrS( rS) rS* rS+ rS, r\" S!S-9S3S. j5       r\" S!S-9S3S/ j5       r\S0 5       rU 4S1 jrS2rU =r$ )4r   i  ac  Mini-batch dictionary learning.

Finds a dictionary (a set of atoms) that performs well at sparsely
encoding the fitted data.

Solves the optimization problem::

   (U^*,V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
                (U,V)
                with || V_k ||_2 <= 1 for all  0 <= k < n_components

||.||_Fro stands for the Frobenius norm and ||.||_1,1 stands for
the entry-wise matrix norm which is the sum of the absolute values
of all the entries in the matrix.

Read more in the :ref:`User Guide <DictionaryLearning>`.

Parameters
----------
n_components : int, default=None
    Number of dictionary elements to extract.

alpha : float, default=1
    Sparsity controlling parameter.

max_iter : int, default=1_000
    Maximum number of iterations over the complete dataset before
    stopping independently of any early stopping criterion heuristics.

    .. versionadded:: 1.1

fit_algorithm : {'lars', 'cd'}, default='lars'
    The algorithm used:

    - `'lars'`: uses the least angle regression method to solve the lasso
      problem (`linear_model.lars_path`)
    - `'cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). Lars will be faster if
      the estimated components are sparse.

n_jobs : int, default=None
    Number of parallel jobs to run.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

batch_size : int, default=256
    Number of samples in each mini-batch.

    .. versionchanged:: 1.3
       The default value of `batch_size` changed from 3 to 256 in version 1.3.

shuffle : bool, default=True
    Whether to shuffle the samples before forming batches.

dict_init : ndarray of shape (n_components, n_features), default=None
    Initial value of the dictionary for warm restart scenarios.

transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp',             'threshold'}, default='omp'
    Algorithm used to transform the data:

    - `'lars'`: uses the least angle regression method
      (`linear_model.lars_path`);
    - `'lasso_lars'`: uses Lars to compute the Lasso solution.
    - `'lasso_cd'`: uses the coordinate descent method to compute the
      Lasso solution (`linear_model.Lasso`). `'lasso_lars'` will be faster
      if the estimated components are sparse.
    - `'omp'`: uses orthogonal matching pursuit to estimate the sparse
      solution.
    - `'threshold'`: squashes to zero all coefficients less than alpha from
      the projection ``dictionary * X'``.

transform_n_nonzero_coefs : int, default=None
    Number of nonzero coefficients to target in each column of the
    solution. This is only used by `algorithm='lars'` and
    `algorithm='omp'`. If `None`, then
    `transform_n_nonzero_coefs=int(n_features / 10)`.

transform_alpha : float, default=None
    If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
    penalty applied to the L1 norm.
    If `algorithm='threshold'`, `alpha` is the absolute value of the
    threshold below which coefficients will be squashed to zero.
    If `None`, defaults to `alpha`.

    .. versionchanged:: 1.2
        When None, default value changed from 1.0 to `alpha`.

verbose : bool or int, default=False
    To control the verbosity of the procedure.

split_sign : bool, default=False
    Whether to split the sparse feature vector into the concatenation of
    its negative part and its positive part. This can improve the
    performance of downstream classifiers.

random_state : int, RandomState instance or None, default=None
    Used for initializing the dictionary when ``dict_init`` is not
    specified, randomly shuffling the data when ``shuffle`` is set to
    ``True``, and updating the dictionary. Pass an int for reproducible
    results across multiple function calls.
    See :term:`Glossary <random_state>`.

positive_code : bool, default=False
    Whether to enforce positivity when finding the code.

    .. versionadded:: 0.20

positive_dict : bool, default=False
    Whether to enforce positivity when finding the dictionary.

    .. versionadded:: 0.20

transform_max_iter : int, default=1000
    Maximum number of iterations to perform if `algorithm='lasso_cd'` or
    `'lasso_lars'`.

    .. versionadded:: 0.22

callback : callable, default=None
    A callable that gets invoked at the end of each iteration.

    .. versionadded:: 1.1

tol : float, default=1e-3
    Control early stopping based on the norm of the differences in the
    dictionary between 2 steps.

    To disable early stopping based on changes in the dictionary, set
    `tol` to 0.0.

    .. versionadded:: 1.1

max_no_improvement : int, default=10
    Control early stopping based on the consecutive number of mini batches
    that does not yield an improvement on the smoothed cost function.

    To disable convergence detection based on cost function, set
    `max_no_improvement` to None.

    .. versionadded:: 1.1

Attributes
----------
components_ : ndarray of shape (n_components, n_features)
    Components extracted from the data.

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

n_iter_ : int
    Number of iterations over the full dataset.

n_steps_ : int
    Number of mini-batches processed.

    .. versionadded:: 1.1

See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparseCoder : Find a sparse representation of data from a fixed,
    precomputed dictionary.
SparsePCA : Sparse Principal Components Analysis.

References
----------

J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (https://www.di.ens.fr/~fbach/mairal_icml09.pdf)

Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import MiniBatchDictionaryLearning
>>> X, dictionary, code = make_sparse_coded_signal(
...     n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
...     random_state=42)
>>> dict_learner = MiniBatchDictionaryLearning(
...     n_components=15, batch_size=3, transform_algorithm='lasso_lars',
...     transform_alpha=0.1, max_iter=20, random_state=42)
>>> X_transformed = dict_learner.fit_transform(X)

We can check the level of sparsity of `X_transformed`:

>>> np.mean(X_transformed == 0) > 0.5
np.True_

We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:

>>> X_hat = X_transformed @ dict_learner.components_
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.052)
r]   rb   Nrc   rd   r7   r   r2   r   r   r    rh   r   r   rf   r   r   >   r   r    r<   rA   r)   r   r   r3   r   r   r   r   r   )r   rF   r   r+  r*   r   Tr   Fr   ru   )r7   r2   r   rh   r   r   r   r   r   r   r3   r   r   r   r   r   r   rF   r   c          	         > [         TU ]  U	U
UUUUU5        Xl        X l        X0l        X@l        Xl        Xl        Xpl        X`l	        Xl
        Xl        UU l        UU l        UU l        UU l        g r   )r  r   r]   r7   r2   r   r   r3   r   r   r   r   r   r   r   rF   )r   r]   r7   r2   r   rh   r   r   r   r   r   r   r3   r   r   r   r   r   r   rF   r   r  s                        r&   r   $MiniBatchDictionaryLearning.__init__  s|    0 	%	
 )
 *"$$(* "4r(   c                     U R                   U l        U R                  c  UR                  S   U l        [        U R                  U R
                  5        SU R                  -   U l        [        U R                  UR                  S   5      U l	        g )Nrb   r   r   )
r]   _n_componentsrJ   r'   r   r   _fit_algorithmr{   r   _batch_sizer   s     r&   _check_params)MiniBatchDictionaryLearning._check_params  st    !..%!"D 	t1143E3EF&););; t
;r(   c           	         U R                   b  U R                   nO1[        XR                  US9u  pEnUSS2[        R                  4   U-  nU R                  [        U5      ::  a  USU R                  2SS24   nOZ[        R                  " U[        R                  " U R                  [        U5      -
  UR                  S   4UR                  S945      n[        USUR                  SS9n[        R                  " USS	9nU$ )
z!Initialization of the dictionary.N)r   rb   rn   r   F)rm   rn   copyW)requirements)r   r   rH  rL   r   r   concatenater   rJ   rn   r   require)r   rY   r   rZ   _r   s         r&   _initialize_dict,MiniBatchDictionaryLearning._initialize_dict  s    >>%J  /%%L A* 1bjj=)J6JZ0#$8d&8&8$8!$;<JHH++c*o=z?O?OPQ?RS(..J !3aggER
ZZ
=
r(   c                 F   XCS-
  :  a	  US-   U-  nOUS-  U-   S-   U-
  nUS-   U-
  US-   -  nU =R                   U-  sl         U =R                   UR                  U-  U-  -  sl         U =R                  U-  sl        U =R                  UR                  U-  U-  -  sl        g)zUpdate the inner stats inplace.rb   r   N)_ArO   _B)r   rY   r   r   stepthetabetas          r&   _update_inner_stats/MiniBatchDictionaryLearning._update_inner_stats  s    q. AX+EMD(1,z9E	J&51954466D=:--4133:
**r(   c                    UR                   S   n[        UUU R                  U R                  U R                  U R
                  U R                  U R                  S9nSXU-  -
  S-  R                  5       -  U R                  [        R                  " [        R                  " U5      5      -  -   U-  nU R                  XXT5        [        UUUU R                  U R                  U R                  UU R                  S9  U$ )z7Perform the update on the dictionary for one minibatch.r   )r.   r7   rh   r%   r2   r3   r   r   r   )rJ   rr   rI  r7   rh   r   r   r3   r   rL   rV   r]  r   rX  rY  r   )r   rY   rZ   r   rZ  r   r   
batch_costs           r&   _minibatch_step+MiniBatchDictionaryLearning._minibatch_step(  s    WWQZ
 ))**;;'',,LL	
 Az))a/4466jj266"&&,//0
 	  *; 	GGGGLL%''		
 r(   c           
         UR                   S   nUS-   nU[        SXX-  5      ::  a&  U R                  (       a  [        SU SU SU 35        gU R                  c  X l        O/XS-   -  n	[        U	S5      n	U R                  SU	-
  -  X)-  -   U l        U R                  (       a!  [        SU SU SU SU R                   35        [
        R                  " X4-
  5      U R                  -  n
U R                  S:  a2  XR                  ::  a#  U R                  (       a  [        S	U SU 35        g
U R                  b  U R                  U R                  :  a  SU l
        U R                  U l	        OU =R                  S-  sl
        U R                  b=  U R                  U R                  :  a#  U R                  (       a  [        SU SU 35        g
g)a  Helper function to encapsulate the early stopping logic.

Early stopping is based on two factors:
- A small change of the dictionary between two minibatch updates. This is
  controlled by the tol parameter.
- No more improvement on a smoothed estimate of the objective function for a
  a certain number of consecutive minibatch updates. This is controlled by
  the max_no_improvement parameter.
r   rb   r   zMinibatch step /z: mean batch cost: Fz, ewa cost: z,Converged (small dictionary change) at step Tz>Converged (lack of improvement in objective function) at step )rJ   r{   r3   r   	_ewa_costr   r   rH  rF   _ewa_cost_min_no_improvementr   )r   rY   r`  new_dictold_dictr[   rZ  n_stepsr   r7   	dict_diffs              r&   _check_convergence.MiniBatchDictionaryLearning._check_convergenceN  s    WWQZ
 ax 3sI233||vQwi7J:,WX
 >>!'Na-0EqME!^^q5y9J<NNDN<<!$q	1D,l4>>*:< KK 34t7I7II	88a<I1||DTF!G9UV %$:L:L)L#$D !%D  A%  ##/$$(?(??||#fAgY0 r(   ri   c           
         [        X[        R                  [        R                  /SSS9nU R	                  U5        [        U R                  5      U l        U R                  XR                  5      nUR                  5       nU R                  (       a,  UR                  5       nU R                  R                  U5        OUnUR                  u  pgU R                  (       a  [        S5        [        R                  " U R                  U R                  4UR                   S9U l        [        R                  " XpR                  4UR                   S9U l        SU l        SU l        SU l        [-        X`R.                  5      n[0        R2                  " U5      n[5        [        R6                  " X`R.                  -  5      5      n	U R8                  U	-  n
Sn[;        [=        U
5      U5       Hk  u  pX\   nU R?                  XU R                  U5      nU RA                  XX4XkU
5      (       a    O.U RB                  b  U RC                  [E        5       5        X4SS& Mm     US	-   U l#        [        R6                  " U RF                  U	-  5      U l$        X0l%        U $ )
r/  rl   F)rn   rm   rO  r   rN  Nr   r   rb   )&r   rL   rp   rq   rK  r   r   _random_staterU  rO  r   rJ   r3   r   r   rH  rn   rX  rY  re  rf  rg  r   rJ  	itertoolscyclerS   ceilr2   r   r   ra  rl  r   r   n_steps_r   r   )r   rY   r  rZ   ri  X_trainr[   r\   batchesn_steps_per_iterrj  ibatchX_batchr`  s                  r&   rN   MiniBatchDictionaryLearning.fit  s   $ BJJ

33U
 	1/0A0AB**1.@.@A
??$<<ffhG&&w/G '	<<#$ ((!3!34GMM
 ((J(:(:;7==Q ! i)9)9://'*rwwy3C3C'CDE--"22E'NG4HAnG--T%7%7J &&Z9   }}(fh'$QK# 5& Awwt}}/??@%r(   c                    [        U S5      n[        X[        R                  [        R                  /SU(       + S9nU(       d  U R                  U5        [        U R                  5      U l        U R                  XR                  5      nSU l
        [        R                  " U R                  U R                  4UR                  S9U l        [        R                  " UR                  S   U R                  4UR                  S9U l        OU R"                  nU R%                  XU R                  U R                  5        X@l        U =R                  S-  sl
        U $ )ar  Update the model using the data in X as a mini-batch.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples
    and `n_features` is the number of features.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
self : object
    Return the instance itself.
r   rl   )rn   rm   r   r   rN  rb   )r   r   rL   rp   rq   rK  r   r   ro  rU  rs  r   rH  rn   rX  rJ   rY  r   ra  )r   rY   r  has_componentsrZ   s        r&   partial_fit'MiniBatchDictionaryLearning.partial_fit  s   $ !}5BJJ

33.FX
 q!!3D4E4E!FD..q2D2DEJDMhh 2 2D4F4FGqwwWDGhh
D,>,>?qwwODG))JQD,>,>N%r(   c                 4    U R                   R                  S   $ r7  r8  r  s    r&   r%  +MiniBatchDictionaryLearning._n_features_out	  r:  r(   c                 J   > [         TU ]  5       nSS/UR                  l        U$ r<  r=  r  s     r&   r  ,MiniBatchDictionaryLearning.__sklearn_tags__	  r?  r(   )rX  rY  rJ  re  rf  rI  rH  rg  ro  r7   r   r   r   r   r   r2   r   r]   r   rs  r   r   r   r   rF   r3   r   )r   r   r   r  r  r   r   r   r   rL   r@  rA  r+  rB  rC  r   rK  rU  r]  ra  rl  r   rN   r}  r'  r%  r  r  r(  r)  s   @r&   r   r     sp   N`$(AtFCTJ$(4D89$ 	Xh4?@$ 	*dF^45	$
 	4"$ 	x!T&AB$ 	I;$ 	dBJJ'$ 	MN 
$ 	$hxD&PRV%W$ 	HT1d6BDI$ 	I;$ 	yk$  	(!$" 	)#$$ 	)%$& 	x!T&IJ'$( 8$q$v67'!T&I4P-$D 6 . !"&-. .`<:+$LBH 5O 6Ob 5* 6*X ) ) r(   r   )NNFNF)r   )6r  rp  r   r   numbersr   r   numpyrL   joblibr   scipyr   baser   r	   r
   r   linear_modelr   r   r   r   utilsr   r   r   r   utils._param_validationr   r   r   utils.extmathr   r   r   utils.parallelr   r   utils.validationr   r   r'   ra   rs   rr   r   r   r   r   r   r  r   r   rI   r(   r&   <module>r     s   
  
  "  #   F E Q Q K K @ @ . =
 
	d5N ^#nt$d#MN
 %Xq$vFM4D8$?Kt$h4?@T"!{;K!$ #''4 

	e-,eX 

	Mh 
M5`n(b ^!{tVn-.$Xq$vFG	 #( O% 'O%O%d ^vtn-.#$Xq$vFG	 #( #p9p9fl<79I l<^]"#] ]"@T*M Tn
B	"3] B	r(   