
    -ij              
          S r SSKrSSKrSSKrSSKrSSKJrJr  SSKr	SSK
Jr  SSKJr  SSKJr  SSKJr  SS	KJr  SS
KJrJr  SSKJr  SSKJrJrJr  SSKJrJrJ r J!r!J"r"  SSK#J$r$J%r%  SSK&J'r'J(r(J)r)J*r*  SSK+J,r,J-r-J.r.  S r/S r0SSSSSS\	Rb                  " \	Rd                  5      Rf                  S.S jr4S r5\" S/S/S/S.SS9SSSSSS\	Rb                  " \	Rd                  5      Rf                  SS.S  j5       r6 " S! S"\,5      r7 " S# S$\75      r8SSSSSSS\	Rb                  " \	Rd                  5      Rf                  4S% jr9 " S& S'\75      r:g)(zUGraphicalLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
    N)IntegralReal)linalg   )_fit_context)ConvergenceWarning)_cd_fast)lars_path_gram)check_cvcross_val_score)Bunch)Interval
StrOptionsvalidate_params)MetadataRouterMethodMapping_raise_for_params_routing_enabledprocess_routing)Paralleldelayed)_is_arraylike_not_scalarcheck_random_statecheck_scalarvalidate_data   )EmpiricalCovarianceempirical_covariancelog_likelihoodc                 Z   UR                   S   nS[        X5      -  U[        R                  " S[        R                  -  5      -  -   nXB[        R
                  " U5      R                  5       [        R
                  " [        R                  " U5      5      R                  5       -
  -  -  nU$ )zEvaluation of the graphical-lasso objective function

the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
r          r   )shaper   nplogpiabssumdiag)mle
precision_alphapcosts        R/var/www/html/venv/lib/python3.13/site-packages/sklearn/covariance/_graph_lasso.py
_objectiver/   -   s     	A.11Aq255y8I4IIDRVVJ'++-rwwz7J0K0O0O0QQRRDK    c                    [         R                  " X-  5      nX1R                  S   -  nX2[         R                  " U5      R                  5       [         R                  " [         R                  " U5      5      R                  5       -
  -  -  nU$ )zExpression of the dual gap convergence criterion

The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
r   )r#   r'   r"   r&   r(   )emp_covr*   r+   gaps       r.   	_dual_gapr4   :   sl     &&%
&CACBFF:&**,rvvbggj6I/J/N/N/PPQQCJr0   cd-C6?d   F)cov_initmodetolenet_tolmax_iterverboseepsc                   U R                   u  pUS:X  aq  [        R                  " U 5      nS[        X5      -  nX[        R
                  " S[        R                  -  5      -  -  n[        R                  " X-  5      U
-
  nXX4S4$ Uc  U R                  5       nOUR                  5       nUS-  nU R                  S S U
S-   2   nXR                  S S U
S-   2'   [        R                  " U5      n[        R                  " U
5      nSn[        5       nUS:X  a  [        SSS	9nO	[        SS
9n [        R                  n[        R                  " USS 2SS 24   SS9n[        U5       GH/  n[        U
5       GH[  nUS:  a*  US-
  nUU   UU:g     UU'   US S 2U4   UU:g     US S 2U4'   OUSS 2SS 24   US S & U UUU:g  4   n[        R                   " S0 UD6   US:X  aG  UUU:g  U4   UUU4   SU-  -   -  * n["        R$                  " UUSUUUUU['        S 5      S5
      u  n    n	O#[)        UUUR*                  XS-
  -  SUSSS9u    n	nS S S 5        SUUU4   [        R,                  " UUU:g  U4   W5      -
  -  UUU4'   UUU4   * U-  UUU:g  U4'   UUU4   * U-  UUUU:g  4'   [        R,                  " UU5      nUUUUU:g  4'   UUUU:g  U4'   GM^     [        R.                  " UR                  5       5      (       d  [1        S5      e[3        XU5      n[5        XU5      nU(       a  [7        SUX4-  5        UR9                  X45        [        R:                  " U5      U:  a    OT[        R.                  " U5      (       a  GM  US:  d  GM'  [1        S5      e   [<        R>                  " SXm4-  [@        5         XUUS-   4$ ! , (       d  f       GN= f! [0         a  nURB                  S   S-   4Ul!        UeS nAff = f)Nr   r!   r   gffffff?r   r5   raiseignore)overinvalid)rC   C)orderi  FTlars)XyGram	n_samples	alpha_min	copy_Gramr>   methodreturn_pathg      ?z1The system is too ill-conditioned for this solverz<[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3ezANon SPD result: the system is too ill-conditioned for this solverzDgraphical_lasso: did not converge after %i iteration: dual gap: %.3ez3. The system is too ill-conditioned for this solver )"r"   r   invr   r#   r$   r%   r'   copyflatpinvharangelistdictinfrangeerrstatecd_fastenet_coordinate_descent_gramr   r
   sizedotisfiniteFloatingPointErrorr4   r/   printappendr&   warningswarnr   args)r2   r+   r8   r9   r:   r;   r<   r=   r>   _
n_featuresr*   r-   d_gapcovariance_diagonalindicesicostserrorssub_covarianceidxdirowcoefses                             r.   _graphical_lassors   G   sj    MMMAzZZ(
nW99RVVAI...w+,z9TM144llnmmo 4K||-zA~-.H*2&
Q&'k*Jii
#G	AFEt|7H5g&T QRV!4C@xAZ( 7qB)4RC)HN2&,72,>w#~,NN1b5)(3ABF(;N1%c7c>12[[*6*t| 'w#~s':;)#s(3dSj@B! *1)M)M!!*$$.t4!*q!Q '5"!/&)hh&+A~&>&* ##)(-	'1e) +> (+S)ff[C)<=uEF(
38$ 4>c3h3G2G%2O
7c>3./3=c3h3G2G%2O
33./~u538CC/038GsNC/0e )f ;;z~~/00(G  g59Eg59DR$&' LL$'vve}s";;t$$Q(W G !N MMV#$" E1q500I +*@  &&)SSUsE   +B*O A1O DO $O O ,O  
O
O 
O;O66O;c                     [         R                  " U 5      nSUR                  SSUR                  S   S-   2'   [         R                  " [         R
                  " U5      5      $ )a  Find the maximum alpha for which there are some non-zeros off-diagonal.

Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
    The sample covariance matrix.

Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
r   Nr   )r#   rP   rQ   r"   maxr&   )r2   As     r.   	alpha_maxrw      sI     	A !AFFaggaj1n66"&&)r0   
array-likeboolean)r2   return_costsreturn_n_iterprefer_skip_nested_validation)r9   r:   r;   r<   r=   rz   r>   r{   c                   [        UUSUUUUUSS9	R                  U 5      n
U
R                  U
R                  /nU(       a  UR	                  U
R
                  5        U	(       a  UR	                  U
R                  5        [        U5      $ )a  L1-penalized covariance estimator.

Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

.. versionchanged:: v0.20
    graph_lasso has been renamed to graphical_lasso

Parameters
----------
emp_cov : array-like of shape (n_features, n_features)
    Empirical covariance from which to compute the covariance estimate.

alpha : float
    The regularization parameter: the higher alpha, the more
    regularization, the sparser the inverse covariance.
    Range is (0, inf].

mode : {'cd', 'lars'}, default='cd'
    The Lasso solver to use: coordinate descent or LARS. Use LARS for
    very sparse underlying graphs, where p > n. Elsewhere prefer cd
    which is more numerically stable.

tol : float, default=1e-4
    The tolerance to declare convergence: if the dual gap goes below
    this value, iterations are stopped. Range is (0, inf].

enet_tol : float, default=1e-4
    The tolerance for the elastic net solver used to calculate the descent
    direction. This parameter controls the accuracy of the search direction
    for a given column update, not of the overall parameter estimate. Only
    used for mode='cd'. Range is (0, inf].

max_iter : int, default=100
    The maximum number of iterations.

verbose : bool, default=False
    If verbose is True, the objective function and dual gap are
    printed at each iteration.

return_costs : bool, default=False
    If return_costs is True, the objective function and dual gap
    at each iteration are returned.

eps : float, default=eps
    The machine-precision regularization in the computation of the
    Cholesky diagonal factors. Increase this for very ill-conditioned
    systems. Default is `np.finfo(np.float64).eps`.

return_n_iter : bool, default=False
    Whether or not to return the number of iterations.

Returns
-------
covariance : ndarray of shape (n_features, n_features)
    The estimated covariance matrix.

precision : ndarray of shape (n_features, n_features)
    The estimated (sparse) precision matrix.

costs : list of (objective, dual_gap) pairs
    The list of values of the objective function and the dual gap at
    each iteration. Returned only if return_costs is True.

n_iter : int
    Number of iterations. Returned only if `return_n_iter` is set to True.

See Also
--------
GraphicalLasso : Sparse inverse covariance estimation
    with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with
    cross-validated choice of the l1 penalty.

Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.

One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.

Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> from sklearn.covariance import empirical_covariance, graphical_lasso
>>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
>>> rng = np.random.RandomState(42)
>>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
>>> emp_cov = empirical_covariance(X, assume_centered=True)
>>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
>>> emp_cov
array([[ 1.687,  0.212, -0.209],
       [ 0.212,  0.221, -0.0817],
       [-0.209, -0.0817, 0.232]])
precomputedT)	r+   r9   
covariancer:   r;   r<   r=   r>   assume_centered)GraphicalLassofitrg   r*   r`   costs_n_iter_tuple)r2   r+   r9   r:   r;   r<   r=   rz   r>   r{   modeloutputs               r.   graphical_lassor      s    l  
 
c'l 
 !1!12Fell#emm$=r0   c                   2  ^  \ rS rSr% 0 \R
                  E\" \SSSS9/\" \SSSS9/\" \SSSS9/\	" SS15      /S	/\" \SSS
S9/S.Er\
\S'   \R                  S5        SSSSS\R                  " \R                  5      R                   S4U 4S jjrSrU =r$ )BaseGraphicalLassoii  r   Nrightclosedleftr5   rF   r=   both)r:   r;   r<   r9   r=   r>   _parameter_constraintsstore_precisionr6   r7   Fc                 h   > [         TU ]  US9  Xl        X l        X0l        X@l        XPl        X`l        g )Nr   )super__init__r:   r;   r<   r9   r=   r>   )	selfr:   r;   r<   r9   r=   r>   r   	__class__s	           r.   r   BaseGraphicalLasso.__init__u  s3     	9  	r0   )r;   r>   r<   r9   r:   r=   )__name__
__module____qualname____firstlineno__r   r   r   r   r   r   rU   __annotations__popr#   finfofloat64r>   r   __static_attributes____classcell__r   s   @r.   r   r   i  s    $

4
4$q$w78dAtG<=h4?@T6N+,;q$v67$D  01 HHRZZ $$ r0   r   c            
          ^  \ rS rSr% Sr0 \R                  E\" \SSSS9/\	" S15      S/S.Er\
\S	'    SS
SSSSS\R                  " \R                  5      R                  SS.U 4S jjjr\" SS9SS j5       rSrU =r$ )r   i  a  Sparse inverse covariance estimation with an l1-penalized estimator.

For a usage example see
:ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`.

Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

.. versionchanged:: v0.20
    GraphLasso has been renamed to GraphicalLasso

Parameters
----------
alpha : float, default=0.01
    The regularization parameter: the higher alpha, the more
    regularization, the sparser the inverse covariance.
    Range is (0, inf].

mode : {'cd', 'lars'}, default='cd'
    The Lasso solver to use: coordinate descent or LARS. Use LARS for
    very sparse underlying graphs, where p > n. Elsewhere prefer cd
    which is more numerically stable.

covariance : "precomputed", default=None
    If covariance is "precomputed", the input data in `fit` is assumed
    to be the covariance matrix. If `None`, the empirical covariance
    is estimated from the data `X`.

    .. versionadded:: 1.3

tol : float, default=1e-4
    The tolerance to declare convergence: if the dual gap goes below
    this value, iterations are stopped. Range is (0, inf].

enet_tol : float, default=1e-4
    The tolerance for the elastic net solver used to calculate the descent
    direction. This parameter controls the accuracy of the search direction
    for a given column update, not of the overall parameter estimate. Only
    used for mode='cd'. Range is (0, inf].

max_iter : int, default=100
    The maximum number of iterations.

verbose : bool, default=False
    If verbose is True, the objective function and dual gap are
    plotted at each iteration.

eps : float, default=eps
    The machine-precision regularization in the computation of the
    Cholesky diagonal factors. Increase this for very ill-conditioned
    systems. Default is `np.finfo(np.float64).eps`.

    .. versionadded:: 1.3

assume_centered : bool, default=False
    If True, data are not centered before computation.
    Useful when working with data whose mean is almost, but not exactly
    zero.
    If False, data are centered before computation.

Attributes
----------
location_ : ndarray of shape (n_features,)
    Estimated location, i.e. the estimated mean.

covariance_ : ndarray of shape (n_features, n_features)
    Estimated covariance matrix

precision_ : ndarray of shape (n_features, n_features)
    Estimated pseudo inverse matrix.

n_iter_ : int
    Number of iterations run.

costs_ : list of (objective, dual_gap) pairs
    The list of values of the objective function and the dual gap at
    each iteration. Returned only if return_costs is True.

    .. versionadded:: 1.3

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

See Also
--------
graphical_lasso : L1-penalized covariance estimator.
GraphicalLassoCV : Sparse inverse covariance with
    cross-validated choice of the l1 penalty.

Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import GraphicalLasso
>>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
...                      [0.0, 0.4, 0.0, 0.0],
...                      [0.2, 0.0, 0.3, 0.1],
...                      [0.0, 0.0, 0.1, 0.7]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
...                                   cov=true_cov,
...                                   size=200)
>>> cov = GraphicalLasso().fit(X)
>>> np.around(cov.covariance_, decimals=3)
array([[0.816, 0.049, 0.218, 0.019],
       [0.049, 0.364, 0.017, 0.034],
       [0.218, 0.017, 0.322, 0.093],
       [0.019, 0.034, 0.093, 0.69 ]])
>>> np.around(cov.location_, decimals=3)
array([0.073, 0.04 , 0.038, 0.143])
r   Nr   r   r   )r+   r   r   r5   r6   r7   F)r9   r   r:   r;   r<   r=   r>   r   c          
      D   > [         T
U ]  UUUUUUU	S9  Xl        X0l        g N)r:   r;   r<   r9   r=   r>   r   )r   r   r+   r   )r   r+   r9   r   r:   r;   r<   r=   r>   r   r   s             r.   r   GraphicalLasso.__init__  s8     	+ 	 	
 
$r0   Tr|   c                 T   [        XSSS9nU R                  S:X  a9  UR                  5       n[        R                  " UR
                  S   5      U l        Oc[        XR                  S9nU R                  (       a)  [        R                  " UR
                  S   5      U l        OUR                  S5      U l        [        UU R                  SU R                  U R                  U R                  U R                  U R                   U R"                  S9	u  U l        U l        U l        U l        U $ )	a(  Fit the GraphicalLasso model to X.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Data from which to compute the covariance estimate.

y : Ignored
    Not used, present for API consistency by convention.

Returns
-------
self : object
    Returns the instance itself.
r   )ensure_min_featuresensure_min_samplesr   r   r   r   Nr+   r8   r9   r:   r;   r<   r=   r>   )r   r   rP   r#   zerosr"   	location_r   r   meanrs   r+   r9   r:   r;   r<   r=   r>   rg   r*   r   r   )r   Xyr2   s       r.   r   GraphicalLasso.fit  s    $ $qQO??m+ffhGXXaggaj1DN*1>R>RSG##!#!''!*!5!"GW**]]]]LL
H
D$/4; r0   )r+   r   r   rg   r   r   r*   ){Gz?N)r   r   r   r   __doc__r   r   r   r   r   rU   r   r#   r   r   r>   r   r   r   r   r   r   s   @r.   r   r     s    tl$

3
3$4D89!=/2D9$D  % HHRZZ $$% %2 5( 6(r0   r   c
                 R   [        SUS-
  5      n
[        U 5      nUc  UR                  5       nOUn[        5       n[        5       n[        5       nUb  [        U5      nU H  n [	        UUUUUUUU
U	S9	u  nn  nUR                  U5        UR                  U5        Ub  [        WU5      nUb=  [        R                  " W5      (       d  [        R                  * nUR                  U5        US:X  a!  [        R                  R                  S5        M  US:  d  M  Ub  [        SUW4-  5        M  [        SU-  5        M     Ub  XU4$ X4$ ! [         aR    [        R                  * nUR                  [        R                  5        UR                  [        R                  5         Nf = f)a  l1-penalized covariance estimator along a path of decreasing alphas

Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
    Data from which to compute the covariance estimate.

alphas : array-like of shape (n_alphas,)
    The list of regularization parameters, decreasing order.

cov_init : array of shape (n_features, n_features), default=None
    The initial guess for the covariance.

X_test : array of shape (n_test_samples, n_features), default=None
    Optional test matrix to measure generalisation error.

mode : {'cd', 'lars'}, default='cd'
    The Lasso solver to use: coordinate descent or LARS. Use LARS for
    very sparse underlying graphs, where p > n. Elsewhere prefer cd
    which is more numerically stable.

tol : float, default=1e-4
    The tolerance to declare convergence: if the dual gap goes below
    this value, iterations are stopped. The tolerance must be a positive
    number.

enet_tol : float, default=1e-4
    The tolerance for the elastic net solver used to calculate the descent
    direction. This parameter controls the accuracy of the search direction
    for a given column update, not of the overall parameter estimate. Only
    used for mode='cd'. The tolerance must be a positive number.

max_iter : int, default=100
    The maximum number of iterations. This parameter should be a strictly
    positive integer.

verbose : int or bool, default=False
    The higher the verbosity flag, the more information is printed
    during the fitting.

eps : float, default=eps
    The machine-precision regularization in the computation of the
    Cholesky diagonal factors. Increase this for very ill-conditioned
    systems. Default is `np.finfo(np.float64).eps`.

    .. versionadded:: 1.3

Returns
-------
covariances_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
    The estimated covariance matrices.

precisions_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
    The estimated (sparse) precision matrices.

scores_ : list of shape (n_alphas,), dtype=float
    The generalisation error (log-likelihood) on the test data.
    Returned only if test data is passed.
r   r   r   .z/[graphical_lasso_path] alpha: %.2e, score: %.2ez"[graphical_lasso_path] alpha: %.2e)ru   r   rP   rT   rs   r`   r   r^   r#   rV   nanr]   sysstderrwriter_   )r   alphasr8   X_testr9   r:   r;   r<   r=   r>   inner_verboser2   rg   covariances_precisions_scores_test_emp_covr+   r*   rd   
this_scores                        r.   graphical_lasso_pathr   K  s   V 7Q;'M"1%Glln6L&KfG+F3	',<$!!%
-)KQ ,z*!+L*E

 ;;z** ffW
NN:&a<JJS!q[!Ej)*
 :UBCG H '11$$) " 	'&&J'rvv&	's   #AE

AF&%F&c                     ^  \ rS rSr% Sr0 \R                  E\" \SSSS9S/\" \SSSS9/S	/\S/S
.Er\	\
S'   SSSSSSSSS\R                  " \R                  5      R                  SS.U 4S jjr\" SS9SS j5       rS rSrU =r$ )GraphicalLassoCVi  a  Sparse inverse covariance w/ cross-validated choice of the l1 penalty.

See glossary entry for :term:`cross-validation estimator`.

Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

.. versionchanged:: v0.20
    GraphLassoCV has been renamed to GraphicalLassoCV

Parameters
----------
alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
    If an integer is given, it fixes the number of points on the
    grids of alpha to be used. If a list is given, it gives the
    grid to be used. See the notes in the class docstring for
    more details. Range is [1, inf) for an integer.
    Range is (0, inf] for an array-like of floats.

n_refinements : int, default=4
    The number of times the grid is refined. Not used if explicit
    values of alphas are passed. Range is [1, inf).

cv : int, cross-validation generator or iterable, default=None
    Determines the cross-validation splitting strategy.
    Possible inputs for cv are:

    - None, to use the default 5-fold cross-validation,
    - integer, to specify the number of folds.
    - :term:`CV splitter`,
    - An iterable yielding (train, test) splits as arrays of indices.

    For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.

    Refer :ref:`User Guide <cross_validation>` for the various
    cross-validation strategies that can be used here.

    .. versionchanged:: 0.20
        ``cv`` default value if None changed from 3-fold to 5-fold.

tol : float, default=1e-4
    The tolerance to declare convergence: if the dual gap goes below
    this value, iterations are stopped. Range is (0, inf].

enet_tol : float, default=1e-4
    The tolerance for the elastic net solver used to calculate the descent
    direction. This parameter controls the accuracy of the search direction
    for a given column update, not of the overall parameter estimate. Only
    used for mode='cd'. Range is (0, inf].

max_iter : int, default=100
    Maximum number of iterations.

mode : {'cd', 'lars'}, default='cd'
    The Lasso solver to use: coordinate descent or LARS. Use LARS for
    very sparse underlying graphs, where number of features is greater
    than number of samples. Elsewhere prefer cd which is more numerically
    stable.

n_jobs : int, default=None
    Number of jobs to run in parallel.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

    .. versionchanged:: v0.20
       `n_jobs` default changed from 1 to None

verbose : bool, default=False
    If verbose is True, the objective function and duality gap are
    printed at each iteration.

eps : float, default=eps
    The machine-precision regularization in the computation of the
    Cholesky diagonal factors. Increase this for very ill-conditioned
    systems. Default is `np.finfo(np.float64).eps`.

    .. versionadded:: 1.3

assume_centered : bool, default=False
    If True, data are not centered before computation.
    Useful when working with data whose mean is almost, but not exactly
    zero.
    If False, data are centered before computation.

Attributes
----------
location_ : ndarray of shape (n_features,)
    Estimated location, i.e. the estimated mean.

covariance_ : ndarray of shape (n_features, n_features)
    Estimated covariance matrix.

precision_ : ndarray of shape (n_features, n_features)
    Estimated precision matrix (inverse covariance).

costs_ : list of (objective, dual_gap) pairs
    The list of values of the objective function and the dual gap at
    each iteration. Returned only if return_costs is True.

    .. versionadded:: 1.3

alpha_ : float
    Penalization parameter selected.

cv_results_ : dict of ndarrays
    A dict with keys:

    alphas : ndarray of shape (n_alphas,)
        All penalization parameters explored.

    split(k)_test_score : ndarray of shape (n_alphas,)
        Log-likelihood score on left-out data across (k)th fold.

        .. versionadded:: 1.0

    mean_test_score : ndarray of shape (n_alphas,)
        Mean of scores over the folds.

        .. versionadded:: 1.0

    std_test_score : ndarray of shape (n_alphas,)
        Standard deviation of scores over the folds.

        .. versionadded:: 1.0

n_iter_ : int
    Number of iterations run for the optimal alpha.

n_features_in_ : int
    Number of features seen during :term:`fit`.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Defined only when `X`
    has feature names that are all strings.

    .. versionadded:: 1.0

See Also
--------
graphical_lasso : L1-penalized covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
    with an l1-penalized estimator.

Notes
-----
The search for the optimal penalization parameter (`alpha`) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.

One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of `alpha` then come out as missing values, but the optimum may
be close to these missing values.

In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.

Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import GraphicalLassoCV
>>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
...                      [0.0, 0.4, 0.0, 0.0],
...                      [0.2, 0.0, 0.3, 0.1],
...                      [0.0, 0.0, 0.1, 0.7]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
...                                   cov=true_cov,
...                                   size=200)
>>> cov = GraphicalLassoCV().fit(X)
>>> np.around(cov.covariance_, decimals=3)
array([[0.816, 0.051, 0.22 , 0.017],
       [0.051, 0.364, 0.018, 0.036],
       [0.22 , 0.018, 0.322, 0.094],
       [0.017, 0.036, 0.094, 0.69 ]])
>>> np.around(cov.location_, decimals=3)
array([0.073, 0.04 , 0.038, 0.143])

For an example comparing :class:`sklearn.covariance.GraphicalLassoCV`,
:func:`sklearn.covariance.ledoit_wolf` shrinkage and the empirical covariance
on high-dimensional gaussian data, see
:ref:`sphx_glr_auto_examples_covariance_plot_sparse_cov.py`.
r   Nr   r   rx   r   	cv_object)r   n_refinementscvn_jobsr      r6   r7   r5   F)r   r   r   r:   r;   r<   r9   r   r=   r>   r   c          
      \   > [         TU ]  UUUUU	U
US9  Xl        X l        X0l        Xl        g r   )r   r   r   r   r   r   )r   r   r   r   r:   r;   r<   r9   r   r=   r>   r   r   s               r.   r   GraphicalLassoCV.__init__  sC     	+ 	 	
 *r0   Tr|   c                 8  ^ ^^^ [        UT S5        [        T TSS9mT R                  (       a)  [        R                  " TR
                  S   5      T l        OTR                  S5      T l        [        TT R                  S9n[        T R                  USS9n[        5       nT R                  n[        ST R                  S-
  5      m[        U5      (       aB  T R                   H#  n[!        US	["        S[        R$                  S
S9  M%     T R                  mSn	ObT R&                  n	[)        U5      n
SU
-  n[        R*                  " [        R,                  " U5      [        R,                  " U
5      U5      SSS2   m[/        5       (       a  [1        T S40 UD6nO[3        [3        0 S9S9n[4        R4                  " 5       n[7        U	5       GH  n[8        R:                  " 5          [8        R<                  " S[>        5        [A        T RB                  T R                  S9" UUUU 4S jURD                  " TU40 URF                  RD                  D6 5       5      nSSS5        [I        W6 u  nnn[I        U6 n[I        U6 nURK                  [I        TUU5      5        [M        U[N        RP                  " S5      SS9n[        R$                  * nSn[S        U5       H  u  nu  nnn[        R                  " U5      nUS[        RT                  " [        RV                  5      RX                  -  :  a  [        RZ                  n[        R\                  " U5      (       a  UnUU:  d  M  UnUnM     WS:X  a  US   S   n
US   S   nOhUU:X  a&  U[_        U5      S-
  :X  d  UU   S   n
UUS-      S   nO<U[_        U5      S-
  :X  a  UU   S   n
SUU   S   -  nOUUS-
     S   n
UUS-      S   n[        U5      (       dH  [        R*                  " [        R,                  " U
5      [        R,                  " U5      US-   5      mTSS mT R                  (       d  GM  U	S:  d  GM  [a        SUS-   U	[4        R4                  " 5       U-
  4-  5        GM     [        [I        U6 5      n[        US   5      n[        US   5      mTRc                  S5        URc                  [e        [g        5       TUT RB                  TUS95        [        Rh                  " U5      nS[        Rh                  " T5      0T l5        [7        UR
                  S   5       H  nUSS2U4   T Rj                  SU S3'   M     [        R                  " USS9T Rj                  S'   [        Rl                  " USS9T Rj                  S'   TW   nUT l7        [q        UUT Rr                  T Rt                  T Rv                  T Rx                  TT RX                  S9u  T l=        T l>        T l?        T l@        T $ ! , (       d  f       GN= f) a  Fit the GraphicalLasso covariance model to X.

Parameters
----------
X : array-like of shape (n_samples, n_features)
    Data from which to compute the covariance estimate.

y : Ignored
    Not used, present for API consistency by convention.

**params : dict, default=None
    Parameters to be passed to the CV splitter and the
    cross_val_score function.

    .. versionadded:: 1.5
        Only available if `enable_metadata_routing=True`,
        which can be set by using
        ``sklearn.set_config(enable_metadata_routing=True)``.
        See :ref:`Metadata Routing User Guide <metadata_routing>` for
        more details.

Returns
-------
self : object
    Returns the instance itself.
r   r   )r   r   r   r   F)
classifierr+   r   )min_valmax_valinclude_boundariesr   N)split)splitterrA   )r   r=   c              3      >#    U  Hf  u  p[        [        5      " TU   TTU   TR                  TR                  TR                  [        S TR                  -  5      TTR                  S9	v   Mh     g7f)皙?)r   r   r9   r:   r;   r<   r=   r>   N)r   r   r9   r:   r;   intr<   r>   ).0traintestr   r   r   r   s      r.   	<genexpr>'GraphicalLassoCV.fit.<locals>.<genexpr>  sm      O (V 01%% w!YY HH!%!$S4==%8!9 - HH
 (Vs   A.A1T)keyreverser   z8[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is)r   r   r=   paramsr   r   _test_score)axismean_test_scorestd_test_score)r+   r9   r:   r;   r<   r=   r>   )Ar   r   r   r#   r   r"   r   r   r   r   r   rT   r   ru   r=   r   r   r   rV   r   rw   logspacelog10r   r   r   timerW   ra   catch_warningssimplefilterr   r   r   r   r   zipextendsortedoperator
itemgetter	enumerater   r   r>   r   r]   lenr_   r`   r   r   arraycv_results_stdalpha_rs   r9   r:   r;   r<   rg   r*   r   r   )r   r   r   r   r2   r   pathn_alphasr+   r   alpha_1alpha_0routed_paramst0rj   	this_pathcovsrd   scores
best_scorelast_finite_idxindexr   
best_indexgrid_scores
best_alphar   r   s   ``                        @@r.   r   GraphicalLassoCV.fit  sZ   : 	&$.$q9XXaggaj1DNVVAYDN&q$:N:NOdggqU3 v;;At||a/0#H--FF'. % [[FM ..M(GWnG[['!2BHHW4ExPQUSUQUVF+D%B6BM!5r?;MYY[}%A((* %%h0BC %DKKN O (*xx1'U8N8N8T8T'UO 	 +4 "9oOD!V:D&\FKKFFD12$H$7$7$:DID
 &&JO-6t_))vqWWV_
rxx

';'?'?!??!#J;;z**&+O+!+J!&J .= Q q'!*q'!*.zSYQR]7R z*1-zA~.q1s4y1},z*1-j!1!!44zA~.q1zA~.q1+H55RXXg%68I8VW<X"||| 1N1umTYY[2-=>?Q &Z CJ47md1ga#%{{%		
 hh{+$bhhv&67{((+,A7B1a47HDuQC{34 - /1ggk.J*+-/VVKa-H)*J'
  HX]]]]!	H
D$/4; g +*s   A3X


X	c                     [        U R                  R                  S9R                  [	        U R
                  5      [        5       R                  SSS9S9nU$ )a"  Get metadata routing of this object.

Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.

.. versionadded:: 1.5

Returns
-------
routing : MetadataRouter
    A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
    routing information.
)ownerr   r   )calleecaller)r   method_mapping)r   r   r   addr   r   r   )r   routers     r.   get_metadata_routing%GraphicalLassoCV.get_metadata_routingg  sQ      dnn&=&=>BBdgg&(?..ge.L C 
 r0   )r   r   r   rg   r   r   r   r   r   r   r*   r   )r   r   r   r   r   r   r   r   r   rU   r   r#   r   r   r>   r   r   r   r  r   r   r   s   @r.   r   r     s    yv$

3
3$Haf=|L"8QVDEmT"$D  HHRZZ $$ : 5x 6xt r0   r   );r   r   r   r   ra   numbersr   r   numpyr#   scipyr   baser   
exceptionsr   linear_modelr	   rY   r
   model_selectionr   r   utilsr   utils._param_validationr   r   r   utils.metadata_routingr   r   r   r   r   utils.parallelr   r   utils.validationr   r   r   r    r   r   r   r/   r4   r   r   r>   rs   rw   r   r   r   r   r   rN   r0   r.   <module>r     s_    
   "    + / ) 7  K K  /  H G

	" 	
  B1J&  >"#
 #( 

  D, >' L 	
  }%@n) nr0   