
    {Kgd              
          d Z ddlZddlZddlZddlZddlmZmZ ddlZ	ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZmZm Z m!Z!m"Z" ddl#m$Z$m%Z% ddl&m'Z'm(Z(m)Z) ddl*m+Z+m,Z,m-Z- d Z.d Z/dddddd e	j`                  e	jb                        jd                  ddZ3d Z4 edgdgdgdd      dddddd e	j`                  e	jb                        jd                  ddd        Z5 G d! d"e+      Z6 G d# d$e6      Z7ddddddd e	j`                  e	jb                        jd                  fd%Z8 G d& d'e6      Z9y)(zUGraphicalLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
    N)IntegralReal)linalg   )_fit_context)ConvergenceWarning)_cd_fast)lars_path_gram)check_cvcross_val_score)Bunch)Interval
StrOptionsvalidate_params)MetadataRouterMethodMapping_raise_for_params_routing_enabledprocess_routing)Paralleldelayed)_is_arraylike_not_scalarcheck_random_statecheck_scalar   )EmpiricalCovarianceempirical_covariancelog_likelihoodc                 V   |j                   d   }dt        | |      z  |t        j                  dt        j                  z        z  z   }||t        j
                  |      j                         t        j
                  t        j                  |            j                         z
  z  z  }|S )zEvaluation of the graphical-lasso objective function

    the objective function is made of a shifted scaled version of the
    normalized log-likelihood (i.e. its empirical mean over the samples) and a
    penalisation term to promote sparsity
    r          r   )shaper   nplogpiabssumdiag)mle
precision_alphapcosts        c/home/alanp/www/video.onchill/myenv/lib/python3.12/site-packages/sklearn/covariance/_graph_lasso.py
_objectiver.   ,   s     	A.j11Aq255y8I4IIDERVVJ'++-rwwz7J0K0O0O0QQRRDK    c                    t        j                  | |z        }||j                  d   z  }||t        j                  |      j                         t        j                  t        j                  |            j                         z
  z  z  }|S )zExpression of the dual gap convergence criterion

    The specific definition is given in Duchi "Projected Subgradient Methods
    for Learning Sparse Gaussians".
    r   )r"   r&   r!   r%   r'   )emp_covr)   r*   gaps       r-   	_dual_gapr3   9   sr     &&:%
&C:AC5BFF:&**,rvvbggj6I/J/N/N/PPQQCJr/   cd-C6?d   F)cov_initmodetolenet_tolmax_iterverboseepsc                   | j                   \  }	}
|dk(  rst        j                  |       }dt        | |      z  }||
t	        j
                  dt        j                  z        z  z  }t	        j                  | |z        |
z
  }| |||fdfS || j                         }n|j                         }|dz  }| j                  d d |
dz      }||j                  d d |
dz   <   t        j                  |      }t	        j                  |
      }d}t               }|dk(  rt        dd	      }nt        d
      }	 t        j                  }t	        j                  |dd dd f   d      }t        |      D ]  }t        |
      D ]X  }|dkD  r*|dz
  }||   ||k7     ||<   |d d |f   ||k7     |d d |f<   n|dd dd f   |d d  | |||k7  f   }t	        j                   di |5  |dk(  rF|||k7  |f   |||f   d|z  z   z   }t#        j$                  ||d|||||t'        d       d
      \  }}	}	}	n't)        |||j*                  ||
dz
  z  d|dd      \  }	}	}d d d        d|||f   t	        j,                  |||k7  |f         z
  z  |||f<   |||f    |z  |||k7  |f<   |||f    |z  ||||k7  f<   t	        j,                  ||      }|||||k7  f<   ||||k7  |f<   [ t	        j.                  |j                               st1        d      t3        | ||      }t5        | ||      }|rt7        d|||fz         |j9                  ||f       t	        j:                  |      |k  r nIt	        j.                  |      r
|dkD  st1        d       t=        j>                  d||fz  t@               ||||dz   fS # 1 sw Y   exY w# t0        $ r}|jB                  d   dz   f|_!        |d }~ww xY w)Nr   r    r   gffffff?r   r4   raiseignore)overinvalid)rB   C)orderi  FTlars)XyGram	n_samples	alpha_min	copy_Gramr=   methodreturn_pathg      ?z1The system is too ill-conditioned for this solverz<[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3ezANon SPD result: the system is too ill-conditioned for this solverzDgraphical_lasso: did not converge after %i iteration: dual gap: %.3ez3. The system is too ill-conditioned for this solver )"r!   r   invr   r"   r#   r$   r&   copyflatpinvharangelistdictinfrangeerrstatecd_fastenet_coordinate_descent_gramr   r
   sizedotisfiniteFloatingPointErrorr3   r.   printappendr%   warningswarnr   args)r1   r*   r7   r8   r9   r:   r;   r<   r=   _
n_featuresr)   r,   d_gapcovariance_diagonalindicesicostserrorssub_covarianceidxdirowcoefses                             r-   _graphical_lassorr   F   s    MMMAzzZZ(
nWj99
RVVAI...w+,z9
T5M144llnmmo 4K||-zA~-.H*2K&
Q&'k*Jii
#G	AFEt|7H5g&T QRV!4C@xAZ( 7qB)4RC)HN2&,72,>w#~,NN1b5)(3ABF(;N1%c7c>12[[*6*t| 'w#~s':;)#s(3dSj@B! *1)M)M!!*$$.t4!*q!Q '5"!/&)hh&+zA~&>&* ##)(-	'1e) +> (+S)ff[C)<=uEF(
38$ 4>c3h3G2G%2O
7c>3./3=c3h3G2G%2O
33./~u538CC/038GsNC/0e )f ;;z~~/0(G  gz59Egz59DR$&' LL$'vve}s";;t$Q(W G !N MMVU#$" 
E1q500I +*@  &&)SSUs?   0B*N= A3N0D&N= 5N= <+N= 0N:5N= =	O%O  O%c                     t        j                  |       }d|j                  dd|j                  d   dz   <   t        j                  t        j
                  |            S )a  Find the maximum alpha for which there are some non-zeros off-diagonal.

    Parameters
    ----------
    emp_cov : ndarray of shape (n_features, n_features)
        The sample covariance matrix.

    Notes
    -----
    This results from the bound for the all the Lasso that are solved
    in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
    bound for alpha is given by `max(abs(Xy))`, the result follows.
    r   Nr   )r"   rO   rP   r!   maxr%   )r1   As     r-   	alpha_maxrv      sI     	A !AFFaggaj1n66"&&)r/   
array-likeboolean)r1   return_costsreturn_n_iterprefer_skip_nested_validation)r8   r9   r:   r;   r<   ry   r=   rz   c                   t        ||d|||||d	      j                  |       }
|
j                  |
j                  g}|r|j	                  |
j
                         |	r|j	                  |
j                         t        |      S )a<  L1-penalized covariance estimator.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        graph_lasso has been renamed to graphical_lasso

    Parameters
    ----------
    emp_cov : array-like of shape (n_features, n_features)
        Empirical covariance from which to compute the covariance estimate.

    alpha : float
        The regularization parameter: the higher alpha, the more
        regularization, the sparser the inverse covariance.
        Range is (0, inf].

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        The maximum number of iterations.

    verbose : bool, default=False
        If verbose is True, the objective function and dual gap are
        printed at each iteration.

    return_costs : bool, default=False
        If return_costs is True, the objective function and dual gap
        at each iteration are returned.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

    return_n_iter : bool, default=False
        Whether or not to return the number of iterations.

    Returns
    -------
    covariance : ndarray of shape (n_features, n_features)
        The estimated covariance matrix.

    precision : ndarray of shape (n_features, n_features)
        The estimated (sparse) precision matrix.

    costs : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

    n_iter : int
        Number of iterations. Returned only if `return_n_iter` is set to True.

    See Also
    --------
    GraphicalLasso : Sparse inverse covariance estimation
        with an l1-penalized estimator.
    GraphicalLassoCV : Sparse inverse covariance with
        cross-validated choice of the l1 penalty.

    Notes
    -----
    The algorithm employed to solve this problem is the GLasso algorithm,
    from the Friedman 2008 Biostatistics paper. It is the same algorithm
    as in the R `glasso` package.

    One possible difference with the `glasso` R package is that the
    diagonal coefficients are not penalized.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.datasets import make_sparse_spd_matrix
    >>> from sklearn.covariance import empirical_covariance, graphical_lasso
    >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
    >>> rng = np.random.RandomState(42)
    >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
    >>> emp_cov = empirical_covariance(X, assume_centered=True)
    >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
    >>> emp_cov
    array([[ 1.68...,  0.21..., -0.20...],
           [ 0.21...,  0.22..., -0.08...],
           [-0.20..., -0.08...,  0.23...]])
    precomputedT)	r*   r8   
covariancer9   r:   r;   r<   r=   assume_centered)GraphicalLassofitrf   r)   r_   costs_n_iter_tuple)r1   r*   r8   r9   r:   r;   r<   ry   r=   rz   modeloutputs               r-   graphical_lassor      s    l  
 
c'l 
 !1!12Fell#emm$=r/   c                   >    e Zd ZU i ej                   eeddd      g eeddd      g eeddd      g eddh      gdg eeddd	      gd
Ze	e
d<   ej                  d       ddddd ej                  ej                        j                  df fd	Z xZS )BaseGraphicalLassor   Nrightclosedleftr4   rE   r<   both)r9   r:   r;   r8   r<   r=   _parameter_constraintsstore_precisionr5   r6   Fc                 z    t         |   |       || _        || _        || _        || _        || _        || _        y )Nr   )super__init__r9   r:   r;   r8   r<   r=   )	selfr9   r:   r;   r8   r<   r=   r   	__class__s	           r-   r   zBaseGraphicalLasso.__init__t  s?     	9  	r/   )__name__
__module____qualname__r   r   r   r   r   r   rT   __annotations__popr"   finfofloat64r=   r   __classcell__r   s   @r-   r   r   h  s    $

4
4$q$w78dAtG<=h4?@T6N+,;q$v67$D  01 BHHRZZ $$ r/   r   c            
            e Zd ZU dZi ej
                   eeddd      g edh      dgdZe	e
d<   	 dd	dd
d
dd ej                  ej                        j                  dd fdZ ed      dd       Z xZS )r   ag  Sparse inverse covariance estimation with an l1-penalized estimator.

    For a usage example see
    :ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        GraphLasso has been renamed to GraphicalLasso

    Parameters
    ----------
    alpha : float, default=0.01
        The regularization parameter: the higher alpha, the more
        regularization, the sparser the inverse covariance.
        Range is (0, inf].

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    covariance : "precomputed", default=None
        If covariance is "precomputed", the input data in `fit` is assumed
        to be the covariance matrix. If `None`, the empirical covariance
        is estimated from the data `X`.

        .. versionadded:: 1.3

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        The maximum number of iterations.

    verbose : bool, default=False
        If verbose is True, the objective function and dual gap are
        plotted at each iteration.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    assume_centered : bool, default=False
        If True, data are not centered before computation.
        Useful when working with data whose mean is almost, but not exactly
        zero.
        If False, data are centered before computation.

    Attributes
    ----------
    location_ : ndarray of shape (n_features,)
        Estimated location, i.e. the estimated mean.

    covariance_ : ndarray of shape (n_features, n_features)
        Estimated covariance matrix

    precision_ : ndarray of shape (n_features, n_features)
        Estimated pseudo inverse matrix.

    n_iter_ : int
        Number of iterations run.

    costs_ : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

        .. versionadded:: 1.3

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    graphical_lasso : L1-penalized covariance estimator.
    GraphicalLassoCV : Sparse inverse covariance with
        cross-validated choice of the l1 penalty.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.covariance import GraphicalLasso
    >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
    ...                      [0.0, 0.4, 0.0, 0.0],
    ...                      [0.2, 0.0, 0.3, 0.1],
    ...                      [0.0, 0.0, 0.1, 0.7]])
    >>> np.random.seed(0)
    >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
    ...                                   cov=true_cov,
    ...                                   size=200)
    >>> cov = GraphicalLasso().fit(X)
    >>> np.around(cov.covariance_, decimals=3)
    array([[0.816, 0.049, 0.218, 0.019],
           [0.049, 0.364, 0.017, 0.034],
           [0.218, 0.017, 0.322, 0.093],
           [0.019, 0.034, 0.093, 0.69 ]])
    >>> np.around(cov.location_, decimals=3)
    array([0.073, 0.04 , 0.038, 0.143])
    r   Nr   r   r~   )r*   r   r   r4   r5   r6   F)r8   r   r9   r:   r;   r<   r=   r   c          	      N    t         
|   |||||||	       || _        || _        y N)r9   r:   r;   r8   r<   r=   r   )r   r   r*   r   )r   r*   r8   r   r9   r:   r;   r<   r=   r   r   s             r-   r   zGraphicalLasso.__init__  s<     	+ 	 	
 
$r/   Tr{   c                 d   | j                  |dd      }| j                  dk(  r8|j                         }t        j                  |j
                  d         | _        nat        || j                        }| j                  r(t        j                  |j
                  d         | _        n|j                  d      | _        t        || j                  d| j                  | j                  | j                  | j                  | j                   | j"                  	      \  | _        | _        | _        | _        | S )	a  Fit the GraphicalLasso model to X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Data from which to compute the covariance estimate.

        y : Ignored
            Not used, present for API consistency by convention.

        Returns
        -------
        self : object
            Returns the instance itself.
        r   )ensure_min_featuresensure_min_samplesr~   r   r   r   Nr*   r7   r8   r9   r:   r;   r<   r=   )_validate_datar   rO   r"   zerosr!   	location_r   r   meanrr   r*   r8   r9   r:   r;   r<   r=   rf   r)   r   r   )r   Xyr1   s       r-   r   zGraphicalLasso.fit  s    $ qQO??m+ffhGXXaggaj1DN*1d>R>RSG##!#!''!*!5!"GW**]]]]LL
H
D$/4; r/   ){Gz?N)r   r   r   __doc__r   r   r   r   r   rT   r   r"   r   r   r=   r   r   r   r   r   s   @r-   r   r     s    tl$

3
3$4D89!=/2D9$D  % BHHRZZ $$%2 5( 6(r/   r   c
                 4   t        d|dz
        }
t        |       }||j                         }n|}t               }t               }t               }|t        |      }|D ]  }	 t	        ||||||||
|		      \  }}}}|j                  |       |j                  |       |t        |      }|7t        j                        st        j                   }|j                  |       |dk(  r t        j                  j                  d       |dkD  s|t        d|fz         t        d|z          ||||fS ||fS # t        $ rR t        j                   }|j                  t        j                         |j                  t        j                         Y w xY w)a	  l1-penalized covariance estimator along a path of decreasing alphas

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    Parameters
    ----------
    X : ndarray of shape (n_samples, n_features)
        Data from which to compute the covariance estimate.

    alphas : array-like of shape (n_alphas,)
        The list of regularization parameters, decreasing order.

    cov_init : array of shape (n_features, n_features), default=None
        The initial guess for the covariance.

    X_test : array of shape (n_test_samples, n_features), default=None
        Optional test matrix to measure generalisation error.

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. The tolerance must be a positive
        number.

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. The tolerance must be a positive number.

    max_iter : int, default=100
        The maximum number of iterations. This parameter should be a strictly
        positive integer.

    verbose : int or bool, default=False
        The higher the verbosity flag, the more information is printed
        during the fitting.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    Returns
    -------
    covariances_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
        The estimated covariance matrices.

    precisions_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
        The estimated (sparse) precision matrices.

    scores_ : list of shape (n_alphas,), dtype=float
        The generalisation error (log-likelihood) on the test data.
        Returned only if test data is passed.
    r   r   r   .z/[graphical_lasso_path] alpha: %.2e, score: %.2ez"[graphical_lasso_path] alpha: %.2e)rt   r   rO   rS   rr   r_   r   r]   r"   rU   nanr\   sysstderrwriter^   )r   alphasr7   X_testr8   r9   r:   r;   r<   r=   inner_verboser1   rf   covariances_precisions_scores_test_emp_covr*   r)   rc   
this_scores                        r-   graphical_lasso_pathr   J  s   V 7Q;'M"1%Glln6L&KfG+F3	',<$!!%
-)KQ ,z*!+L*E

 ;;z* ffW
NN:&a<JJS!q[!Ej)*
 :UBCG H ['11$$) " 	'&&J'rvv&	's   !A	D<<AFFc                       e Zd ZU dZi ej
                   eeddd      dg eeddd      gdgedgd	Zee	d
<   ddddddddd e
j                  e
j                        j                  dd fd
Z ed      dd       Zd Z xZS )GraphicalLassoCVa?  Sparse inverse covariance w/ cross-validated choice of the l1 penalty.

    See glossary entry for :term:`cross-validation estimator`.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        GraphLassoCV has been renamed to GraphicalLassoCV

    Parameters
    ----------
    alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
        If an integer is given, it fixes the number of points on the
        grids of alpha to be used. If a list is given, it gives the
        grid to be used. See the notes in the class docstring for
        more details. Range is [1, inf) for an integer.
        Range is (0, inf] for an array-like of floats.

    n_refinements : int, default=4
        The number of times the grid is refined. Not used if explicit
        values of alphas are passed. Range is [1, inf).

    cv : int, cross-validation generator or iterable, default=None
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:

        - None, to use the default 5-fold cross-validation,
        - integer, to specify the number of folds.
        - :term:`CV splitter`,
        - An iterable yielding (train, test) splits as arrays of indices.

        For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.

        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.

        .. versionchanged:: 0.20
            ``cv`` default value if None changed from 3-fold to 5-fold.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        Maximum number of iterations.

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where number of features is greater
        than number of samples. Elsewhere prefer cd which is more numerically
        stable.

    n_jobs : int, default=None
        Number of jobs to run in parallel.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

        .. versionchanged:: v0.20
           `n_jobs` default changed from 1 to None

    verbose : bool, default=False
        If verbose is True, the objective function and duality gap are
        printed at each iteration.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    assume_centered : bool, default=False
        If True, data are not centered before computation.
        Useful when working with data whose mean is almost, but not exactly
        zero.
        If False, data are centered before computation.

    Attributes
    ----------
    location_ : ndarray of shape (n_features,)
        Estimated location, i.e. the estimated mean.

    covariance_ : ndarray of shape (n_features, n_features)
        Estimated covariance matrix.

    precision_ : ndarray of shape (n_features, n_features)
        Estimated precision matrix (inverse covariance).

    costs_ : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

        .. versionadded:: 1.3

    alpha_ : float
        Penalization parameter selected.

    cv_results_ : dict of ndarrays
        A dict with keys:

        alphas : ndarray of shape (n_alphas,)
            All penalization parameters explored.

        split(k)_test_score : ndarray of shape (n_alphas,)
            Log-likelihood score on left-out data across (k)th fold.

            .. versionadded:: 1.0

        mean_test_score : ndarray of shape (n_alphas,)
            Mean of scores over the folds.

            .. versionadded:: 1.0

        std_test_score : ndarray of shape (n_alphas,)
            Standard deviation of scores over the folds.

            .. versionadded:: 1.0

    n_iter_ : int
        Number of iterations run for the optimal alpha.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    graphical_lasso : L1-penalized covariance estimator.
    GraphicalLasso : Sparse inverse covariance estimation
        with an l1-penalized estimator.

    Notes
    -----
    The search for the optimal penalization parameter (`alpha`) is done on an
    iteratively refined grid: first the cross-validated scores on a grid are
    computed, then a new refined grid is centered around the maximum, and so
    on.

    One of the challenges which is faced here is that the solvers can
    fail to converge to a well-conditioned estimate. The corresponding
    values of `alpha` then come out as missing values, but the optimum may
    be close to these missing values.

    In `fit`, once the best parameter `alpha` is found through
    cross-validation, the model is fit again using the entire training set.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.covariance import GraphicalLassoCV
    >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
    ...                      [0.0, 0.4, 0.0, 0.0],
    ...                      [0.2, 0.0, 0.3, 0.1],
    ...                      [0.0, 0.0, 0.1, 0.7]])
    >>> np.random.seed(0)
    >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
    ...                                   cov=true_cov,
    ...                                   size=200)
    >>> cov = GraphicalLassoCV().fit(X)
    >>> np.around(cov.covariance_, decimals=3)
    array([[0.816, 0.051, 0.22 , 0.017],
           [0.051, 0.364, 0.018, 0.036],
           [0.22 , 0.018, 0.322, 0.094],
           [0.017, 0.036, 0.094, 0.69 ]])
    >>> np.around(cov.location_, decimals=3)
    array([0.073, 0.04 , 0.038, 0.143])
    r   Nr   r   rw   r   	cv_object)r   n_refinementscvn_jobsr      r5   r6   r4   F)r   r   r   r9   r:   r;   r8   r   r<   r=   r   c          	      j    t         |   |||||	|
|       || _        || _        || _        || _        y r   )r   r   r   r   r   r   )r   r   r   r   r9   r:   r;   r8   r   r<   r=   r   r   s               r-   r   zGraphicalLassoCV.__init__  sK     	+ 	 	
 *r/   Tr{   c           
          t        | d        j                  d       j                  r(t        j                  j
                  d          _        nj                  d       _        t         j                        }t         j                  |d      }t               } j                  }t        d j                  dz
        t        |      rC j                  D ]%  }t!        |d	t"        dt        j$                  d
       '  j                  d}	n_ j&                  }	t)        |      }
d|
z  }t        j*                  t        j,                  |      t        j,                  |
      |      ddd   t/               rt1         dfi |}nt3        t3        i             }t5        j4                         }t7        |	      D ]  }t9        j:                         5  t9        j<                  dt>                tA         jB                   j                         fd |jD                  |fi |jF                  jD                  D              }ddd       tI         \  }}}tI        | }tI        | }|jK                  tI        ||             tM        |tO        jP                  d      d      }t        j$                   }d}tS        |      D ]  \  }\  }}}t        j                  |      }|dt        jT                  t        jV                        jX                  z  k\  rt        jZ                  }t        j\                  |      r|}||k\  s|}|} dk(  r|d   d   }
|d   d   }ne||k(  r%|t_        |      dz
  k(  s||   d   }
||dz      d   }n;|t_        |      dz
  k(  r||   d   }
d||   d   z  }n||dz
     d   }
||dz      d   }t        |      sEt        j*                  t        j,                  |
      t        j,                  |      |dz         dd  j                  si|	dkD  spta        d|dz   |	t5        j4                         |z
  fz          t        tI        |       }t        |d         }t        |d         jc                  d       |jc                  te        tg               | jB                  |             t        jh                  |      }dt        jh                        i _5        t7        |j
                  d         D ]  }|dd|f    jj                  d| d<    t        j                  |d       jj                  d<   t        jl                  |d       jj                  d<      }| _7        tq        || jr                   jt                   jv                   jx                   jX                        \   _=         _>         _?         _@         S # 1 sw Y   xY w) aX  Fit the GraphicalLasso covariance model to X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Data from which to compute the covariance estimate.

        y : Ignored
            Not used, present for API consistency by convention.

        **params : dict, default=None
            Parameters to be passed to the CV splitter and the
            cross_val_score function.

            .. versionadded:: 1.5
                Only available if `enable_metadata_routing=True`,
                which can be set by using
                ``sklearn.set_config(enable_metadata_routing=True)``.
                See :ref:`Metadata Routing User Guide <metadata_routing>` for
                more details.

        Returns
        -------
        self : object
            Returns the instance itself.
        r   r   )r   r   r   r   F)
classifierr*   r   )min_valmax_valinclude_boundariesr   N)split)splitterr@   )r   r<   c              3      K   | ]i  \  }} t        t              |   |   j                  j                  j                  t        d j                  z        j                  	       k yw)皙?)r   r   r8   r9   r:   r;   r<   r=   N)r   r   r8   r9   r:   intr;   r=   ).0traintestr   r   r   r   s      r-   	<genexpr>z'GraphicalLassoCV.fit.<locals>.<genexpr>  sw      O (Vt 2G01%% w!YY HH!%!$S4==%8!9 - HH
 
 (Vs   A/A2T)keyreverser   z8[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is)r   r   r<   paramsr   r   _test_score)axismean_test_scorestd_test_score)r*   r8   r9   r:   r;   r<   r=   )Ar   r   r   r"   r   r!   r   r   r   r   r   rS   r   rt   r<   r   r   r   rU   r   rv   logspacelog10r   r   r   timerV   r`   catch_warningssimplefilterr   r   r   r   r   zipextendsortedoperator
itemgetter	enumerater   r   r=   r   r\   lenr^   r_   r   r   arraycv_results_stdalpha_rr   r8   r9   r:   r;   rf   r)   r   r   )r   r   r   r   r1   r   pathn_alphasr*   r   alpha_1alpha_0routed_paramst0ri   	this_pathcovsrc   scores
best_scorelast_finite_idxindexr   
best_indexgrid_scores
best_alphar   r   s   ``                        @@r-   r   zGraphicalLassoCV.fit  sX   : 	&$.q9XXaggaj1DNVVAYDN&q$:N:NOdggqU3 v;;At||a/0#H-FF'. % [[FM ..M(GWnG[['!2BHHW4ExPQUSUQUVF+D%B6BM!5r?;MYY[}%A((* %%h0BC OHDKKN O (0rxx1'U8N8N8T8T'UO 	 +4 "9oOD!V:D&\FKKFFD12$H$7$7$:DID
 &&JO-6t_))vqWWV_
rxx

';'?'?!??!#J;;z*&+O+!+J!&J .= Q q'!*q'!*.zSYQR]7R z*1-zA~.q1s4y1},z*1-j!1!!44zA~.q1zA~.q1+H5RXXg%68I8VW<X"|| 1N1umTYY[2-=>?Q &Z CJ47md1ga#%{{%		
 hh{+$bhhv&67{((+,A7B1a47HDuQC{34 - /1ggk.J*+-/VVKa-H)*J'
  HX]]]]!	H
D$/4; g +*s   A4W//W9	c                     t        | j                  j                        j                  t	        | j
                        t               j                  dd            }|S )aj  Get metadata routing of this object.

        Please check :ref:`User Guide <metadata_routing>` on how the routing
        mechanism works.

        .. versionadded:: 1.5

        Returns
        -------
        routing : MetadataRouter
            A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
            routing information.
        )ownerr   r   )calleecaller)r   method_mapping)r   r   r   addr   r   r   )r   routers     r-   get_metadata_routingz%GraphicalLassoCV.get_metadata_routinga  sQ      dnn&=&=>BBdgg&(?..ge.L C 
 r/   r   )r   r   r   r   r   r   r   r   rT   r   r"   r   r   r=   r   r   r   r  r   r   s   @r-   r   r     s    tl$

3
3$Haf=|L"8QVDEmT"$D  BHHRZZ $$: 5x 6xtr/   r   ):r   r   r   r   r`   numbersr   r   numpyr"   scipyr   baser   
exceptionsr   linear_modelr	   rX   r
   model_selectionr   r   utilsr   utils._param_validationr   r   r   utils.metadata_routingr   r   r   r   r   utils.parallelr   r   utils.validationr   r   r    r   r   r   r.   r3   r   r   r=   rr   rv   r   r   r   r   r   rM   r/   r-   <module>r     se    
   "    + / ) 7  K K  / 
 H G

	" 	  B1J&  >"#
 #( 
  D, >' L 	  }%@i) ir/   