
    -iK                     h   S SK r S SKJr  S SKJrJrJrJrJr  S SK	J
r
Jr  \\\/S/SSSSSSSS.	\\// S	QSS
S/SSSS./r\R                  \R                  \R                  \R                   \R"                  /S
SS/SSSSSS.\R                  \R                  \R                  \R                   \R"                  /S/SSSSSSR%                  SR'                  5       5      S./r\ R*                  R-                  S\5      \S 5       5       r\ R*                  R-                  S\5      \S 5       5       rg)    N)metrics)BaggingClassifierBaggingRegressorIsolationForestStackingClassifierStackingRegressor)assert_docstring_consistencyskip_if_no_numpydocmax_samplesFz4The number of samples to draw from X to train each.*)	objectsinclude_paramsexclude_paramsinclude_attrsexclude_attrsinclude_returnsexclude_returnsdescr_regex_patternignore_types)cvn_jobspassthroughverboseTfinal_estimator_)r   r   r   r   r   r   r   r   averagezero_division a/  This parameter is required for multiclass/multilabel targets\.
            If ``None``, the metrics for each class are returned\. Otherwise, this
            determines the type of averaging performed on the data:
            ``'binary'``:
                Only report results for the class specified by ``pos_label``\.
                This is applicable only if targets \(``y_\{true,pred\}``\) are binary\.
            ``'micro'``:
                Calculate metrics globally by counting the total true positives,
                false negatives and false positives\.
            ``'macro'``:
                Calculate metrics for each label, and find their unweighted
                mean\.  This does not take label imbalance into account\.
            ``'weighted'``:
                Calculate metrics for each label, and find their average weighted
                by support \(the number of true instances for each label\)\. This
                alters 'macro' to account for label imbalance; it can result in an
                F-score that is not between precision and recall\.[\s\w]*\.*
            ``'samples'``:
                Calculate metrics for each instance, and find their average \(only
                meaningful for multilabel classification where this differs from
                :func:`accuracy_score`\)\.casec                     [        S0 U D6  g)z@Check docstrings parameters consistency between related classes.N r	   r   s    f/var/www/html/venv/lib/python3.13/site-packages/sklearn/tests/test_docstring_parameters_consistency.py test_class_docstring_consistencyr#   f        !(4(    c                     [        S0 U D6  g)zBCheck docstrings parameters consistency between related functions.Nr   r    r!   s    r"   #test_function_docstring_consistencyr'   m   r$   r%   )pytestsklearnr   sklearn.ensembler   r   r   r   r   sklearn.utils._testingr	   r
   !CLASS_DOCSTRING_CONSISTENCY_CASESprecision_recall_fscore_supportf1_scorefbeta_scoreprecision_scorerecall_scorejoinsplit$FUNCTION_DOCSTRING_CONSISTENCY_CASESmarkparametrizer#   r'   r   r%   r"   <module>r7      s      U &'7I(/ V&
 '(9:D,- #	% !6 33##  
 $o6 #$ 33##  
 %+ "xx.. eg3 
)#;( $| !BC)  D)
 !EF)  G)r%   