
    {Kg}                     H    d Z ddlmZ ddlZddlmZmZ ddlm	Z	 d	dZ
d
dZy)z
Common code for all metrics.

    )combinationsN   )check_arraycheck_consistent_length)type_of_targetc           	         d}||vrt        dj                  |            t        |      }|dvrt        dj                  |            |dk(  r | |||      S t        |||       t	        |      }t	        |      }d}|}d}	|d	k(  rF|#t        j                  ||j                  d         }|j                         }|j                         }n|d
k(  r~|@t        j                  t        j                  |t        j                  |d            d      }	nt        j                  |d      }	t        j                  |	j                         d      ry|dk(  r|}	d}d}|j                  dk(  r|j                  d      }|j                  dk(  r|j                  d      }|j                  |   }
t        j                  |
f      }t        |
      D ]T  }|j!                  |g|      j                         }|j!                  |g|      j                         } | |||      ||<   V |6|	t        j"                  |	      }	d||	dk(  <   t        j$                  ||	      S |S )aM  Average a binary metric for multilabel classification.

    Parameters
    ----------
    y_true : array, shape = [n_samples] or [n_samples, n_classes]
        True binary labels in binary label indicators.

    y_score : array, shape = [n_samples] or [n_samples, n_classes]
        Target scores, can either be probability estimates of the positive
        class, confidence values, or binary decisions.

    average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
        If ``None``, the scores for each class are returned. Otherwise,
        this determines the type of averaging performed on the data:

        ``'micro'``:
            Calculate metrics globally by considering each element of the label
            indicator matrix as a label.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label).
        ``'samples'``:
            Calculate metrics for each instance, and find their average.

        Will be ignored when ``y_true`` is binary.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    binary_metric : callable, returns shape [n_classes]
        The binary metric function to use.

    Returns
    -------
    score : float or array of shape [n_classes]
        If not ``None``, average the score, else return the score for each
        classes.

    )Nmicromacroweightedsampleszaverage has to be one of {0})binaryzmultilabel-indicatorz{0} format is not supportedr   )sample_weight   Nr	   r   )r   r   )axisg        r   weights)
ValueErrorformatr   r   r   nprepeatshaperavelsummultiplyreshapeisclosendimzerosrangetakeasarrayaverage)binary_metricy_truey_scorer#   r   average_optionsy_typenot_average_axisscore_weightaverage_weight	n_classesscorecy_true_c	y_score_cs                  Y/home/alanp/www/video.onchill/myenv/lib/python3.12/site-packages/sklearn/metrics/_base.py_average_binary_scorer2      sE   V FOo%7>>OPPF#F776==fEFFVWMJJFG]; F'"G LN'#99\6<<?CL--/	J	#VVFBJJ|W$EFQN  VVF3N::n((*C0	I	%{{a(||q//'*./IHHi\"E9;;s)9;:@@BLL!+;L<BBD	 9LQa  %  ZZ7N)*E.A%&zz%88    c                 "   t        ||       t        j                  |      }|j                  d   }||dz
  z  dz  }t        j                  |      }|dk(  }|rt        j                  |      nd}	t        t        |d            D ]s  \  }
\  }}||k(  }||k(  }t        j                  ||      }|rt        j                  |      |	|
<   ||   }||   } | ||||f         } | ||||f         }||z   dz  ||
<   u t        j                  ||	      S )aL  Average one-versus-one scores for multiclass classification.

    Uses the binary metric for one-vs-one multiclass classification,
    where the score is computed according to the Hand & Till (2001) algorithm.

    Parameters
    ----------
    binary_metric : callable
        The binary metric function to use that accepts the following as input:
            y_true_target : array, shape = [n_samples_target]
                Some sub-array of y_true for a pair of classes designated
                positive and negative in the one-vs-one scheme.
            y_score_target : array, shape = [n_samples_target]
                Scores corresponding to the probability estimates
                of a sample belonging to the designated positive class label

    y_true : array-like of shape (n_samples,)
        True multiclass labels.

    y_score : array-like of shape (n_samples, n_classes)
        Target scores corresponding to probability estimates of a sample
        belonging to a particular class.

    average : {'macro', 'weighted'}, default='macro'
        Determines the type of averaging performed on the pairwise binary
        metric scores:
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean. This does not take label imbalance into account. Classes
            are assumed to be uniformly distributed.
        ``'weighted'``:
            Calculate metrics for each label, taking into account the
            prevalence of the classes.

    Returns
    -------
    score : float
        Average of the pairwise binary metric scores.
    r   r   r   r   Nr   )	r   r   uniquer   empty	enumerater   
logical_orr#   )r$   r%   r&   r#   y_true_uniquer,   n_pairspair_scoresis_weighted
prevalenceixaba_maskb_maskab_maska_trueb_truea_true_scoreb_true_scores                       r1   _average_multiclass_ovo_scorerH      s$   P FG,IIf%M##A&I9q=)Q.G((7#KZ'K&1'"tJ  ]A >?
FQ11--/ZZ0JrN$VWWaZ-@A$VWWaZ-@A',6!;B @ ::k:66r3   )N)r
   )__doc__	itertoolsr   numpyr   utilsr   r   utils.multiclassr   r2   rH    r3   r1   <module>rO      s%    #  8 -jZC7r3   