
    -ib                         S SK Jr  S SKJrJrJr  S SKJrJr  S SK	r
SSKJrJr  SSKJr  SSKJr  SS	KJrJr  SS
KJr  SSKJrJr  SSKJrJr  SSKJr  SSKJ r J!r!  SS/r" " S S5      r#S r$ " S S\5      r% " S S\%5      r& " S S\%5      r'g)    )abstractmethod)ceilfloorlog)IntegralRealN   )_fit_contextis_classifier)get_scorer_names)resample)Interval
StrOptions)check_classification_targets)_num_samplesvalidate_data   )ParameterGridParameterSampler)BaseSearchCV)_yields_constant_splitscheck_cvHalvingGridSearchCVHalvingRandomSearchCVc                   $    \ rS rSrSrS rS rSrg)_SubsampleMetaSplitter   z8Splitter that subsamples a given fraction of the datasetc                4    Xl         X l        X0l        X@l        g Nbase_cvfractionsubsample_testrandom_state)selfr!   r"   r#   r$   s        e/var/www/html/venv/lib/python3.13/site-packages/sklearn/model_selection/_search_successive_halving.py__init___SubsampleMetaSplitter.__init__   s     ,(    c              +   X  #    U R                   R                  " X40 UD6 H  u  pE[        USU R                  [	        U R
                  [        U5      -  5      S9nU R                  (       a5  [        USU R                  [	        U R
                  [        U5      -  5      S9nXE4v   M     g 7f)NF)replacer$   	n_samples)r!   splitr   r$   intr"   lenr#   )r%   Xykwargs	train_idxtest_idxs         r&   r-   _SubsampleMetaSplitter.split    s     #'<<#5#5a#Ef#EI !..dmmc)n<=	I ""#!!%!2!2!$--#h-"?@	 %% $Fs   B(B*)r!   r"   r$   r#   N)__name__
__module____qualname____firstlineno____doc__r'   r-   __static_attributes__ r)   r&   r   r      s    B)&r)   r   c                 L   S U S   U S   U S   4 5       u  p4n[         R                  " X2:H  5      nXF   n[         R                  " [         R                  " U5      [         R                  " [         R
                  " U5      5      5      n[         R                  " XV   X* S     5      $ )Nc              3   N   #    U  H  n[         R                  " U5      v   M     g 7fr   )npasarray).0as     r&   	<genexpr>_top_k.<locals>.<genexpr>4   s!      *QA 	

1Qs   #%itermean_test_scoreparams)r?   flatnonzerorollargsortcount_nonzeroisnanarray)	resultskitr	iterationrF   rG   iter_indicesscoressorted_indicess	            r&   _top_krU   2   s    *&/7+<#=wx?PQ*&I >>)"23L*F WWRZZ/1A1A"((6BR1STN88F()<=>>r)   c                     ^  \ rS rSr% Sr0 \R                  E\" \" \	" 5       5      5      \
S/S/\" \SSSS9\" S15      /\" \SSSS9\" S	S
15      /\/\" \SSSS9/S/S.Er\\S'   \R#                  S5        SSSSSS\R&                  SSS	SSSS.U 4S jjrS r\S 5       r\" SS9SU 4S jj5       rS r\S 5       rSrU =r$ )BaseSuccessiveHalvingA   zImplements successive halving.

Ref:
Almost optimal exploration in multi-armed bandits, ICML 13
Zohar Karnin, Tomer Koren, Oren Somekh
Nr$   r   neitherclosedautoexhaustsmallestboolean)scoringr$   max_resourcesmin_resourcesresourcefactoraggressive_elimination_parameter_constraintspre_dispatchT   r,      F)r`   n_jobsrefitcvverboser$   error_scorereturn_train_scorera   rb   rc   rd   re   c                v   > [         TU ]  UUUUUUUU	S9  Xpl        Xl        Xl        Xl        Xl        Xl        g )N)r`   rj   rk   rl   rm   rn   ro   )superr'   r$   ra   rc   rd   rb   re   )r%   	estimatorr`   rj   rk   rl   rm   r$   rn   ro   ra   rb   rc   rd   re   	__class__s                  r&   r'   BaseSuccessiveHalving.__init__\   sS    $ 	#1 	 		
 )* *&<#r)   c                    [        U R                  5      (       d  [        S5      eU R                  S:w  aa  U R                  U R                  R                  5       ;  a9  [        SU R                   SU R                  R                  R                   35      e[        U [        5      (       a/  U R                  U R                  s=:X  a  S:X  a  O  O[        S5      eU R                  U l        U R                  S;   a  U R                  S:X  a  U R                  R                  " X40 UD6nSnXE-  U l        [        U R                  5      (       aN  [        U S	US
9n[!        U5        ["        R$                  " U5      R&                  S   nU =R                  U-  sl        OSU l        U R(                  U l        U R*                  S:X  a+  U R                  S:X  d  [        S5      e[-        U5      U l        U R                  U R*                  :  a&  [        SU R                   SU R*                   S35      eU R                  S:X  a  [        SU R                   S35      eg )Nz{The cv parameter must yield consistent folds across calls to split(). Set its random_state to an int, or set shuffle=False.r,   zCannot use resource=z% which is not supported by estimator r]   z?n_candidates and min_resources cannot be both set to 'exhaust'.)r^   r]   r	   no_validation)r0   r1   r   r   r\   z:resource can only be 'n_samples' when max_resources='auto'zmin_resources_=z  is greater than max_resources_=.z+: you might have passed an empty dataset X.)r   _checked_cv_orig
ValueErrorrc   rr   
get_paramsrs   r6   
isinstancer   rb   n_candidatesmin_resources_get_n_splitsr   r   r   r?   uniqueshapera   max_resources_r   )r%   r0   r1   split_paramsn_splitsmagic_factor	n_classess          r&   _check_input_parameters-BaseSuccessiveHalving._check_input_parameters   s8    't'<'<==!  MM[(T^^%>%>%@@&t}}o 6  $ 8 8 A ABD 
 d122!!T%6%6C)C !U  #00"99}}+00==aSlS &.&=# 00%doCA03 "		! 2 21 5I''94'&'# #00&(==K/ P  #/q/D!4!44!$"5"5!6 7''+':':&;1> 
 !#!$"5"5!6 7& &  $r)   c                    [         R                  " US   5      n[         R                  " US   U:H  5      nUS   U   n[         R                  " U5      R	                  5       (       a  SnXF   $ [         R
                  " U5      nXF   $ )a)  Custom refit callable to return the index of the best candidate.

We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.

Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
rE   rF   r   )r?   maxrH   rL   all	nanargmax)rk   refit_metricrN   	last_iterlast_iter_indicestest_scoresbest_idxs          r&   _select_best_index(BaseSuccessiveHalving._select_best_index   s     FF76?+	NN76?i+GH/01BC 88K $$&&H !** ||K0H **r)   )prefer_skip_nested_validationc                 T  > [        U R                  U[        U R                  5      S9U l        U R                  U5      nU R                  XUR                  R                  S9  [        U5      U l
        [        TU ]0  " U4SU0UD6  U R                  S   U R                     U l        U $ )a%  Run fit with all sets of parameters.

Parameters
----------

X : array-like, shape (n_samples, n_features)
    Training vector, where `n_samples` is the number of samples and
    `n_features` is the number of features.

y : array-like, shape (n_samples,) or (n_samples, n_output), optional
    Target relative to X for classification or regression;
    None for unsupervised learning.

**params : dict of string -> object
    Parameters passed to the ``fit`` method of the estimator.

Returns
-------
self : object
    Instance of fitted estimator.
)
classifier)r0   r1   r   r1   rF   )r   rl   r   rr   rx   _get_routed_params_for_fitr   splitterr-   r   _n_samples_origrq   fitcv_results_best_index_best_score_)r%   r0   r1   rG   routed_paramsrs   s        r&   r   BaseSuccessiveHalving.fit   s    4 !)GGQ=#@!
 77?$$=#9#9#?#? 	% 	
  ,AA%%f%  ++,=>t?O?OPr)   c                 R  ^  T R                  5       nT R                  S:w  a3  [        U 4S jU 5       5      (       a  [        ST R                   S35      eS[	        [        [        U5      T R                  5      5      -   nT R                  S:X  a:  US-
  n[        T R                  T R                  T R                  U-  -  5      T l
        S[	        [        T R                  T R                  -  T R                  5      5      -   nT R                  (       a  UnO[        XS5      nT R                  (       a  [        SU 35        [        SU 35        [        S	U 35        [        S
T R                   35        [        ST R                   35        [        ST R                   35        [        ST R                   35        / T l        / T l        [%        U5       GH  nUnT R                  (       a  [        SXs-
  U-   5      n['        T R                  U-  T R                  -  5      n	[        U	T R                  5      n	T R                   R)                  U	5        [        U5      n
T R"                  R)                  U
5        T R                  (       a5  [        S5        [        SU 35        [        SU
 35        [        SU	 35        T R                  S:X  a.  [+        T R,                  U	T R.                  -  ST R0                  S9nOBU Vs/ s H  oR3                  5       PM     nnU H  nXT R                  '   M     T R,                  nU/U
-  U	/U
-  S.nU" X+US9n[5        U
T R                  -  5      n[7        UUU5      nGM     [        U5      T l        UT l        UT l        UT l        g s  snf )Nr,   c              3   B   >#    U  H  nTR                   U;   v   M     g 7fr   )rc   )rA   	candidater%   s     r&   rC   4BaseSuccessiveHalving._run_search.<locals>.<genexpr>  s      0
8H9DMMY&8Hs   zCannot use parameter z= as the resource since it is part of the searched parameters.r   r]   zn_iterations: zn_required_iterations: zn_possible_iterations: zmin_resources_: zmax_resources_: zaggressive_elimination: zfactor: r   z
----------ziter: zn_candidates: zn_resources: Tr    )rE   n_resources)more_results) _generate_candidate_paramsrc   anyry   r   r   r/   rd   rb   r   r}   r   re   minrm   printn_resources_n_candidates_ranger.   appendr   rx   r   r$   copyr   rU   n_remaining_candidates_n_required_iterations_n_possible_iterations_n_iterations_)r%   evaluate_candidatescandidate_paramsn_required_iterationslast_iterationn_possible_iterationsn_iterationsrP   powerr   r|   rl   cr   r   rN   n_candidates_to_keeps   `                r&   _run_search!BaseSuccessiveHalving._run_search  s   ::<==K'C 0
8H0
 -
 -
 ' 79 9  !"E#c2B.CT[[*Q$R R* 3Q6N"%####t{{N'BB#D !"E##t':'::DKKH%
 !
 &&0L4LL<<N<.12+,A+BCD+,A+BCD$T%8%8$9:;$T%8%8$9:;,T-H-H,IJKHT[[M*+&CE**
 As:=RRSdkk5043F3FFGKk4+>+>?K$$[1/0L%%l3||hsen%|n56k]34}}++ 11(4+?+??#'!%!2!2	 7G#G6FFFH6F #G!1I/:dmm, "2** , +}|;L
 * <G $(t{{(B#C %g/CSIe 'h (++;'<$&;#&;#)) $Hs   N$c                     g r   r<   r%   s    r&   r   0BaseSuccessiveHalving._generate_candidate_paramsq  s    r)   )rx   r   re   r   rd   ra   r   rb   r}   r   r   r   r   r   r   r$   rc   r   )r6   r7   r8   r9   r:   r   rf   r   setr   callabler   r   strr   dict__annotations__popr?   nanr'   r   staticmethodr   r
   r   r   r   r   r;   __classcell__rs   s   @r&   rW   rW   A   s@   $

-
-$ s#3#5674H'(Xq$y9x 

 Xq$y9	:./
 ED!T)<=#,+$D " ~. FF$!"= "=H?B + +, &+&	&Pk*Z  r)   rW   c                      ^  \ rS rSr% Sr0 \R                  ES\\/0Er\\	S'   SSSSS	S
SS\
R                  SSSSS.U 4S jjrS rSrU =r$ )r   iv  a0  Search over specified parameter values with successive halving.

The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using
more and more resources.

Read more in the :ref:`User guide <successive_halving_user_guide>`.

.. note::

  This estimator is still **experimental** for now: the predictions
  and the API might change without any deprecation cycle. To use it,
  you need to explicitly import ``enable_halving_search_cv``::

    >>> # explicitly require this experimental feature
    >>> from sklearn.experimental import enable_halving_search_cv # noqa
    >>> # now you can import normally from model_selection
    >>> from sklearn.model_selection import HalvingGridSearchCV

Parameters
----------
estimator : estimator object
    This is assumed to implement the scikit-learn estimator interface.
    Either estimator needs to provide a ``score`` function,
    or ``scoring`` must be passed.

param_grid : dict or list of dictionaries
    Dictionary with parameters names (string) as keys and lists of
    parameter settings to try as values, or a list of such
    dictionaries, in which case the grids spanned by each dictionary
    in the list are explored. This enables searching over any sequence
    of parameter settings.

factor : int or float, default=3
    The 'halving' parameter, which determines the proportion of candidates
    that are selected for each subsequent iteration. For example,
    ``factor=3`` means that only one third of the candidates are selected.

resource : ``'n_samples'`` or str, default='n_samples'
    Defines the resource that increases with each iteration. By default,
    the resource is the number of samples. It can also be set to any
    parameter of the base estimator that accepts positive integer
    values, e.g. 'n_iterations' or 'n_estimators' for a gradient
    boosting estimator. In this case ``max_resources`` cannot be 'auto'
    and must be set explicitly.

max_resources : int, default='auto'
    The maximum amount of resource that any candidate is allowed to use
    for a given iteration. By default, this is set to ``n_samples`` when
    ``resource='n_samples'`` (default), else an error is raised.

min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
    The minimum amount of resource that any candidate is allowed to use
    for a given iteration. Equivalently, this defines the amount of
    resources `r0` that are allocated for each candidate at the first
    iteration.

    - 'smallest' is a heuristic that sets `r0` to a small value:

      - ``n_splits * 2`` when ``resource='n_samples'`` for a regression problem
      - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
        classification problem
      - ``1`` when ``resource != 'n_samples'``

    - 'exhaust' will set `r0` such that the **last** iteration uses as
      much resources as possible. Namely, the last iteration will use the
      highest value smaller than ``max_resources`` that is a multiple of
      both ``min_resources`` and ``factor``. In general, using 'exhaust'
      leads to a more accurate estimator, but is slightly more time
      consuming.

    Note that the amount of resources used at each iteration is always a
    multiple of ``min_resources``.

aggressive_elimination : bool, default=False
    This is only relevant in cases where there isn't enough resources to
    reduce the remaining candidates to at most `factor` after the last
    iteration. If ``True``, then the search process will 'replay' the
    first iteration for as long as needed until the number of candidates
    is small enough. This is ``False`` by default, which means that the
    last iteration may evaluate more than ``factor`` candidates. See
    :ref:`aggressive_elimination` for more details.

cv : int, cross-validation generator or iterable, default=5
    Determines the cross-validation splitting strategy.
    Possible inputs for cv are:

    - integer, to specify the number of folds in a `(Stratified)KFold`,
    - :term:`CV splitter`,
    - An iterable yielding (train, test) splits as arrays of indices.

    For integer/None inputs, if the estimator is a classifier and ``y`` is
    either binary or multiclass, :class:`StratifiedKFold` is used. In all
    other cases, :class:`KFold` is used. These splitters are instantiated
    with `shuffle=False` so the splits will be the same across calls.

    Refer :ref:`User Guide <cross_validation>` for the various
    cross-validation strategies that can be used here.

    .. note::
        Due to implementation details, the folds produced by `cv` must be
        the same across multiple calls to `cv.split()`. For
        built-in `scikit-learn` iterators, this can be achieved by
        deactivating shuffling (`shuffle=False`), or by setting the
        `cv`'s `random_state` parameter to an integer.

scoring : str or callable, default=None
    Scoring method to use to evaluate the predictions on the test set.

    - str: see :ref:`scoring_string_names` for options.
    - callable: a scorer callable object (e.g., function) with signature
      ``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
    - `None`: the `estimator`'s
      :ref:`default evaluation criterion <scoring_api_overview>` is used.

refit : bool or callable, default=True
    Refit an estimator using the best found parameters on the whole
    dataset.

    Where there are considerations other than maximum score in
    choosing a best estimator, ``refit`` can be set to a function which
    returns the selected ``best_index_`` given ``cv_results_``. In that
    case, the ``best_estimator_`` and ``best_params_`` will be set
    according to the returned ``best_index_`` while the ``best_score_``
    attribute will not be available.

    The refitted estimator is made available at the ``best_estimator_``
    attribute and permits using ``predict`` directly on this
    ``HalvingGridSearchCV`` instance.

    See :ref:`this example
    <sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py>`
    for an example of how to use ``refit=callable`` to balance model
    complexity and cross-validated score.

error_score : 'raise' or numeric
    Value to assign to the score if an error occurs in estimator fitting.
    If set to 'raise', the error is raised. If a numeric value is given,
    FitFailedWarning is raised. This parameter does not affect the refit
    step, which will always raise the error. Default is ``np.nan``.

return_train_score : bool, default=False
    If ``False``, the ``cv_results_`` attribute will not include training
    scores.
    Computing training scores is used to get insights on how different
    parameter settings impact the overfitting/underfitting trade-off.
    However computing the scores on the training set can be computationally
    expensive and is not strictly required to select the parameters that
    yield the best generalization performance.

random_state : int, RandomState instance or None, default=None
    Pseudo random number generator state used for subsampling the dataset
    when `resources != 'n_samples'`. Ignored otherwise.
    Pass an int for reproducible output across multiple function calls.
    See :term:`Glossary <random_state>`.

n_jobs : int or None, default=None
    Number of jobs to run in parallel.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

verbose : int
    Controls the verbosity: the higher, the more messages.

Attributes
----------
n_resources_ : list of int
    The amount of resources used at each iteration.

n_candidates_ : list of int
    The number of candidate parameters that were evaluated at each
    iteration.

n_remaining_candidates_ : int
    The number of candidate parameters that are left after the last
    iteration. It corresponds to `ceil(n_candidates[-1] / factor)`

max_resources_ : int
    The maximum number of resources that any candidate is allowed to use
    for a given iteration. Note that since the number of resources used
    at each iteration must be a multiple of ``min_resources_``, the
    actual number of resources used at the last iteration may be smaller
    than ``max_resources_``.

min_resources_ : int
    The amount of resources that are allocated for each candidate at the
    first iteration.

n_iterations_ : int
    The actual number of iterations that were run. This is equal to
    ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
    Else, this is equal to ``min(n_possible_iterations_,
    n_required_iterations_)``.

n_possible_iterations_ : int
    The number of iterations that are possible starting with
    ``min_resources_`` resources and without exceeding
    ``max_resources_``.

n_required_iterations_ : int
    The number of iterations that are required to end up with less than
    ``factor`` candidates at the last iteration, starting with
    ``min_resources_`` resources. This will be smaller than
    ``n_possible_iterations_`` when there isn't enough resources.

cv_results_ : dict of numpy (masked) ndarrays
    A dict with keys as column headers and values as columns, that can be
    imported into a pandas ``DataFrame``. It contains lots of information
    for analysing the results of a search.
    Please refer to the :ref:`User guide<successive_halving_cv_results>`
    for details.

best_estimator_ : estimator or dict
    Estimator that was chosen by the search, i.e. estimator
    which gave highest score (or smallest loss if specified)
    on the left out data. Not available if ``refit=False``.

best_score_ : float
    Mean cross-validated score of the best_estimator.

best_params_ : dict
    Parameter setting that gave the best results on the hold out data.

best_index_ : int
    The index (of the ``cv_results_`` arrays) which corresponds to the best
    candidate parameter setting.

    The dict at ``search.cv_results_['params'][search.best_index_]`` gives
    the parameter setting for the best model, that gives the highest
    mean score (``search.best_score_``).

scorer_ : function or a dict
    Scorer function used on the held out data to choose the best
    parameters for the model.

n_splits_ : int
    The number of cross-validation splits (folds/iterations).

refit_time_ : float
    Seconds used for refitting the best model on the whole dataset.

    This is present only if ``refit`` is not False.

multimetric_ : bool
    Whether or not the scorers compute several metrics.

classes_ : ndarray of shape (n_classes,)
    The classes labels. This is present only if ``refit`` is specified and
    the underlying estimator is a classifier.

n_features_in_ : int
    Number of features seen during :term:`fit`. Only defined if
    `best_estimator_` is defined (see the documentation for the `refit`
    parameter for more details) and that `best_estimator_` exposes
    `n_features_in_` when fit.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Only defined if
    `best_estimator_` is defined (see the documentation for the `refit`
    parameter for more details) and that `best_estimator_` exposes
    `feature_names_in_` when fit.

    .. versionadded:: 1.0

See Also
--------
:class:`HalvingRandomSearchCV`:
    Random search over a set of parameters using successive halving.

Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.

All parameter combinations scored with a NaN will share the lowest rank.

Examples
--------

>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv  # noqa
>>> from sklearn.model_selection import HalvingGridSearchCV
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
...
>>> param_grid = {"max_depth": [3, None],
...               "min_samples_split": [5, 10]}
>>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
...                              max_resources=10,
...                              random_state=0).fit(X, y)
>>> search.best_params_  # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}

param_gridrf   ri   r,   r\   r]   Frh   NTr   )rd   rc   ra   rb   re   rl   r`   rk   rn   ro   r$   rj   rm   c                F   > [         TU ]  UU	UU
UUUUUUUUUUS9  X l        g N)r`   rj   rk   rm   rl   r$   rn   ro   ra   rc   rd   rb   re   )rq   r'   r   )r%   rr   r   rd   rc   ra   rb   re   rl   r`   rk   rn   ro   r$   rj   rm   rs   s                   r&   r'   HalvingGridSearchCV.__init__  sH    & 	%#1''#9 	 	
  %r)   c                 ,    [        U R                  5      $ r   )r   r   r   s    r&   r   .HalvingGridSearchCV._generate_candidate_params  s    T__--r)   )r   )r6   r7   r8   r9   r:   rW   rf   r   listr   r?   r   r'   r   r;   r   r   s   @r&   r   r   v  sw    iV	$

6
6$tTl$D  $FF##% #%J. .r)   c                      ^  \ rS rSr% Sr0 \R                  E\\/\	" \
SSSS9\" S15      /S.Er\\S	'   SS
SSSSSSS\R                  SSSSS.U 4S jjrS rSrU =r$ )r   i  a4  Randomized search on hyper parameters.

The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using more
and more resources.

The candidates are sampled at random from the parameter space and the
number of sampled candidates is determined by ``n_candidates``.

Read more in the :ref:`User guide<successive_halving_user_guide>`.

.. note::

  This estimator is still **experimental** for now: the predictions
  and the API might change without any deprecation cycle. To use it,
  you need to explicitly import ``enable_halving_search_cv``::

    >>> # explicitly require this experimental feature
    >>> from sklearn.experimental import enable_halving_search_cv # noqa
    >>> # now you can import normally from model_selection
    >>> from sklearn.model_selection import HalvingRandomSearchCV

Parameters
----------
estimator : estimator object
    This is assumed to implement the scikit-learn estimator interface.
    Either estimator needs to provide a ``score`` function,
    or ``scoring`` must be passed.

param_distributions : dict or list of dicts
    Dictionary with parameters names (`str`) as keys and distributions
    or lists of parameters to try. Distributions must provide a ``rvs``
    method for sampling (such as those from scipy.stats.distributions).
    If a list is given, it is sampled uniformly.
    If a list of dicts is given, first a dict is sampled uniformly, and
    then a parameter is sampled using that dict as above.

n_candidates : "exhaust" or int, default="exhaust"
    The number of candidate parameters to sample, at the first
    iteration. Using 'exhaust' will sample enough candidates so that the
    last iteration uses as many resources as possible, based on
    `min_resources`, `max_resources` and `factor`. In this case,
    `min_resources` cannot be 'exhaust'.

factor : int or float, default=3
    The 'halving' parameter, which determines the proportion of candidates
    that are selected for each subsequent iteration. For example,
    ``factor=3`` means that only one third of the candidates are selected.

resource : ``'n_samples'`` or str, default='n_samples'
    Defines the resource that increases with each iteration. By default,
    the resource is the number of samples. It can also be set to any
    parameter of the base estimator that accepts positive integer
    values, e.g. 'n_iterations' or 'n_estimators' for a gradient
    boosting estimator. In this case ``max_resources`` cannot be 'auto'
    and must be set explicitly.

max_resources : int, default='auto'
    The maximum number of resources that any candidate is allowed to use
    for a given iteration. By default, this is set ``n_samples`` when
    ``resource='n_samples'`` (default), else an error is raised.

min_resources : {'exhaust', 'smallest'} or int, default='smallest'
    The minimum amount of resource that any candidate is allowed to use
    for a given iteration. Equivalently, this defines the amount of
    resources `r0` that are allocated for each candidate at the first
    iteration.

    - 'smallest' is a heuristic that sets `r0` to a small value:

      - ``n_splits * 2`` when ``resource='n_samples'`` for a regression problem
      - ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
        classification problem
      - ``1`` when ``resource != 'n_samples'``

    - 'exhaust' will set `r0` such that the **last** iteration uses as
      much resources as possible. Namely, the last iteration will use the
      highest value smaller than ``max_resources`` that is a multiple of
      both ``min_resources`` and ``factor``. In general, using 'exhaust'
      leads to a more accurate estimator, but is slightly more time
      consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.

    Note that the amount of resources used at each iteration is always a
    multiple of ``min_resources``.

aggressive_elimination : bool, default=False
    This is only relevant in cases where there isn't enough resources to
    reduce the remaining candidates to at most `factor` after the last
    iteration. If ``True``, then the search process will 'replay' the
    first iteration for as long as needed until the number of candidates
    is small enough. This is ``False`` by default, which means that the
    last iteration may evaluate more than ``factor`` candidates. See
    :ref:`aggressive_elimination` for more details.

cv : int, cross-validation generator or an iterable, default=5
    Determines the cross-validation splitting strategy.
    Possible inputs for cv are:

    - integer, to specify the number of folds in a `(Stratified)KFold`,
    - :term:`CV splitter`,
    - An iterable yielding (train, test) splits as arrays of indices.

    For integer/None inputs, if the estimator is a classifier and ``y`` is
    either binary or multiclass, :class:`StratifiedKFold` is used. In all
    other cases, :class:`KFold` is used. These splitters are instantiated
    with `shuffle=False` so the splits will be the same across calls.

    Refer :ref:`User Guide <cross_validation>` for the various
    cross-validation strategies that can be used here.

    .. note::
        Due to implementation details, the folds produced by `cv` must be
        the same across multiple calls to `cv.split()`. For
        built-in `scikit-learn` iterators, this can be achieved by
        deactivating shuffling (`shuffle=False`), or by setting the
        `cv`'s `random_state` parameter to an integer.

scoring : str or callable, default=None
    Scoring method to use to evaluate the predictions on the test set.

    - str: see :ref:`scoring_string_names` for options.
    - callable: a scorer callable object (e.g., function) with signature
      ``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
    - `None`: the `estimator`'s
      :ref:`default evaluation criterion <scoring_api_overview>` is used.

refit : bool or callable, default=True
    Refit an estimator using the best found parameters on the whole
    dataset.

    Where there are considerations other than maximum score in
    choosing a best estimator, ``refit`` can be set to a function which
    returns the selected ``best_index_`` given ``cv_results_``. In that
    case, the ``best_estimator_`` and ``best_params_`` will be set
    according to the returned ``best_index_`` while the ``best_score_``
    attribute will not be available.

    The refitted estimator is made available at the ``best_estimator_``
    attribute and permits using ``predict`` directly on this
    ``HalvingRandomSearchCV`` instance.

    See :ref:`this example
    <sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py>`
    for an example of how to use ``refit=callable`` to balance model
    complexity and cross-validated score.

error_score : 'raise' or numeric
    Value to assign to the score if an error occurs in estimator fitting.
    If set to 'raise', the error is raised. If a numeric value is given,
    FitFailedWarning is raised. This parameter does not affect the refit
    step, which will always raise the error. Default is ``np.nan``.

return_train_score : bool, default=False
    If ``False``, the ``cv_results_`` attribute will not include training
    scores.
    Computing training scores is used to get insights on how different
    parameter settings impact the overfitting/underfitting trade-off.
    However computing the scores on the training set can be computationally
    expensive and is not strictly required to select the parameters that
    yield the best generalization performance.

random_state : int, RandomState instance or None, default=None
    Pseudo random number generator state used for subsampling the dataset
    when `resources != 'n_samples'`. Also used for random uniform
    sampling from lists of possible values instead of scipy.stats
    distributions.
    Pass an int for reproducible output across multiple function calls.
    See :term:`Glossary <random_state>`.

n_jobs : int or None, default=None
    Number of jobs to run in parallel.
    ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
    ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
    for more details.

verbose : int
    Controls the verbosity: the higher, the more messages.

Attributes
----------
n_resources_ : list of int
    The amount of resources used at each iteration.

n_candidates_ : list of int
    The number of candidate parameters that were evaluated at each
    iteration.

n_remaining_candidates_ : int
    The number of candidate parameters that are left after the last
    iteration. It corresponds to `ceil(n_candidates[-1] / factor)`

max_resources_ : int
    The maximum number of resources that any candidate is allowed to use
    for a given iteration. Note that since the number of resources used at
    each iteration must be a multiple of ``min_resources_``, the actual
    number of resources used at the last iteration may be smaller than
    ``max_resources_``.

min_resources_ : int
    The amount of resources that are allocated for each candidate at the
    first iteration.

n_iterations_ : int
    The actual number of iterations that were run. This is equal to
    ``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
    Else, this is equal to ``min(n_possible_iterations_,
    n_required_iterations_)``.

n_possible_iterations_ : int
    The number of iterations that are possible starting with
    ``min_resources_`` resources and without exceeding
    ``max_resources_``.

n_required_iterations_ : int
    The number of iterations that are required to end up with less than
    ``factor`` candidates at the last iteration, starting with
    ``min_resources_`` resources. This will be smaller than
    ``n_possible_iterations_`` when there isn't enough resources.

cv_results_ : dict of numpy (masked) ndarrays
    A dict with keys as column headers and values as columns, that can be
    imported into a pandas ``DataFrame``. It contains lots of information
    for analysing the results of a search.
    Please refer to the :ref:`User guide<successive_halving_cv_results>`
    for details.

best_estimator_ : estimator or dict
    Estimator that was chosen by the search, i.e. estimator
    which gave highest score (or smallest loss if specified)
    on the left out data. Not available if ``refit=False``.

best_score_ : float
    Mean cross-validated score of the best_estimator.

best_params_ : dict
    Parameter setting that gave the best results on the hold out data.

best_index_ : int
    The index (of the ``cv_results_`` arrays) which corresponds to the best
    candidate parameter setting.

    The dict at ``search.cv_results_['params'][search.best_index_]`` gives
    the parameter setting for the best model, that gives the highest
    mean score (``search.best_score_``).

scorer_ : function or a dict
    Scorer function used on the held out data to choose the best
    parameters for the model.

n_splits_ : int
    The number of cross-validation splits (folds/iterations).

refit_time_ : float
    Seconds used for refitting the best model on the whole dataset.

    This is present only if ``refit`` is not False.

multimetric_ : bool
    Whether or not the scorers compute several metrics.

classes_ : ndarray of shape (n_classes,)
    The classes labels. This is present only if ``refit`` is specified and
    the underlying estimator is a classifier.

n_features_in_ : int
    Number of features seen during :term:`fit`. Only defined if
    `best_estimator_` is defined (see the documentation for the `refit`
    parameter for more details) and that `best_estimator_` exposes
    `n_features_in_` when fit.

    .. versionadded:: 0.24

feature_names_in_ : ndarray of shape (`n_features_in_`,)
    Names of features seen during :term:`fit`. Only defined if
    `best_estimator_` is defined (see the documentation for the `refit`
    parameter for more details) and that `best_estimator_` exposes
    `feature_names_in_` when fit.

    .. versionadded:: 1.0

See Also
--------
:class:`HalvingGridSearchCV`:
    Search over a grid of parameters using successive halving.

Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.

All parameter combinations scored with a NaN will share the lowest rank.

Examples
--------

>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv  # noqa
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from scipy.stats import randint
>>> import numpy as np
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
>>> np.random.seed(0)
...
>>> param_distributions = {"max_depth": [3, None],
...                        "min_samples_split": randint(2, 11)}
>>> search = HalvingRandomSearchCV(clf, param_distributions,
...                                resource='n_estimators',
...                                max_resources=10,
...                                random_state=0).fit(X, y)
>>> search.best_params_  # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
r   NrY   rZ   r]   )param_distributionsr|   rf   ri   r,   r\   r^   Frh   T)r|   rd   rc   ra   rb   re   rl   r`   rk   rn   ro   r$   rj   rm   c                R   > [         TU ]  UU
UUUU	UUUUUUUUS9  X l        X0l        g r   )rq   r'   r   r|   )r%   rr   r   r|   rd   rc   ra   rb   re   rl   r`   rk   rn   ro   r$   rj   rm   rs   s                    r&   r'   HalvingRandomSearchCV.__init__  sO    ( 	%#1''#9 	 	
  $7 (r)   c                     U R                   nUS:X  a  U R                  U R                  -  n[        U R                  UU R
                  S9$ )Nr]   )r$   )r|   r   r}   r   r   r$   )r%   n_candidates_first_iters     r&   r   0HalvingRandomSearchCV._generate_candidate_params=  sQ    "&"3"3"i/ '+&9&9T=P=P&P#$$#**
 	
r)   )r|   r   )r6   r7   r8   r9   r:   rW   rf   r   r   r   r   r   r   r?   r   r'   r   r;   r   r   s   @r&   r   r     s    zx	$

6
6$ $d|Xq$y9	{#
$D   $FF%%) %)N

 

r)   )(abcr   mathr   r   r   numbersr   r   numpyr?   baser
   r   metrics._scorerr   utilsr   utils._param_validationr   r   utils.multiclassr   utils.validationr   r    r   r   _searchr   _splitr   r   __all__r   rU   rW   r   r   r<   r)   r&   <module>r      sy     ! ! "  . .  : ; : - ! 5 "9
:& &6?rL rj	W./ W.t
w
1 w
r)   