
    {Kg                     D   d Z ddlZddlmZ ddlZddlmZ ddlm	Z	 ddl
mZmZmZmZmZ ddlmZmZmZmZ dd	lmZ dd
lmZmZmZmZmZ g dZd=dZ edgdgddg eddh      dgdd      dddd       Z edgdgddg eeddd      g eddh      dgdd      ddddd       Z  edgdgddg eddh      dgdd      dddd       Z! edgdgddg eddh      dg e edh            dgdd      dddd d!       Z" edgdgddg eddh      dgdd      dddd"       Z# edgdgddg eddh      dg e edh            dgdd      dddd d#       Z$ edgdgddg eddh      dgdd      dddd$       Z% edgdg eddh      dgddgd%d      ddd&d'       Z&d( Z' edgdgddg eh d)      dgdgd*d      dddd+d,       Z( edgdgddg eh d)      ddgdgd*d      dddd+d-       Z) edgdgd.d      d/        Z*d0 Z+ edgdgddg eeddd1       eeddd2      gd3d      ddd4d5       Z, edgdgddgd6d      dd7d8       Z- edgdgddgd6d      dd7d9       Z. edgdgddg eeddd1       eeddd2      gd3d      ddd4d:       Z/ edgdgddg eeddd      g eddh      dgdd      ddddd;       Z0 edgdgddg eddh      dgdd      dddd<       Z1y)>zMetrics to assess performance on regression task.

Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.

Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
    N)Real)xlogy   )UndefinedMetricWarning)_average_find_matching_floating_dtypeget_namespaceget_namespace_and_devicesize)HiddenInterval
StrOptionsvalidate_params)_weighted_percentile)_check_sample_weight_num_samplescheck_arraycheck_consistent_lengthcolumn_or_1d)	max_errormean_absolute_errormean_squared_errormean_squared_log_errormedian_absolute_errormean_absolute_percentage_errormean_pinball_lossr2_scoreroot_mean_squared_log_errorroot_mean_squared_errorexplained_variance_scoremean_tweedie_deviancemean_poisson_deviancemean_gamma_devianced2_tweedie_scored2_pinball_scored2_absolute_error_scorec                    t        | |||      \  }}t        | |       t        | d|      } t        |d|      }| j                  dk(  r|j	                  | d      } |j                  dk(  r|j	                  |d      }| j
                  d   |j
                  d   k7  r5t        dj                  | j
                  d   |j
                  d               | j
                  d   }d}t        |t              r||vrat        dj                  ||            |Dt        |d	      }|dk(  rt        d
      |t        |      k7  rt        dt        |      |fz        |dk(  rdnd}|| ||fS )aF  Check that y_true and y_pred belong to the same regression task.

    Parameters
    ----------
    y_true : array-like

    y_pred : array-like

    multioutput : array-like or string in ['raw_values', uniform_average',
        'variance_weighted'] or None
        None is accepted due to backward compatibility of r2_score().

    dtype : str or list, default="numeric"
        the dtype argument passed to check_array.

    Returns
    -------
    type_true : one of {'continuous', continuous-multioutput'}
        The type of the true target data, as output by
        'utils.multiclass.type_of_target'.

    y_true : array-like of shape (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples, n_outputs)
        Estimated target values.

    multioutput : array-like of shape (n_outputs) or string in ['raw_values',
        uniform_average', 'variance_weighted'] or None
        Custom output weights if ``multioutput`` is array-like or
        just the corresponding argument if ``multioutput`` is a
        correct keyword.
    xpF)	ensure_2ddtype   )r,   z<y_true and y_pred have different number of output ({0}!={1}))
raw_valuesuniform_averagevariance_weightedzIAllowed 'multioutput' string values are {}. You provided multioutput={!r})r*   z5Custom weights are useful only in multi-output cases.z?There must be equally many custom weights (%d) as outputs (%d).
continuouscontinuous-multioutput)r	   r   r   ndimreshapeshape
ValueErrorformat
isinstancestrlen)	y_truey_predmultioutputr+   r)   _	n_outputsallowed_multioutput_stry_types	            _/home/alanp/www/video.onchill/myenv/lib/python3.12/site-packages/sklearn/metrics/_regression.py_check_reg_targetsrC   K   s}   D &&+"=EBFF+5>F5>F{{aFG,{{aFG,||A&,,q/)JQQQa
 	
 QIT+s#55006+[1  
	 !+?>TUU#k**Q{#Y/0  '!^\1IF66;..    z
array-liker.   r/   r;   r<   sample_weightr=   T)prefer_skip_nested_validationrF   r=   c                   t        | ||      \  }} }}t        | ||       t        j                  t        j                  || z
        |d      }t        |t              r|dk(  r|S |dk(  rd}t        j                  ||      S )a*  Mean absolute error regression loss.

    Read more in the :ref:`User Guide <mean_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAE output is non-negative floating point. The best value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_error(y_true, y_pred)
    np.float64(0.5)
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_error(y_true, y_pred)
    np.float64(0.75)
    >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    np.float64(0.85...)
    r   weightsaxisr.   r/   NrK   )rC   r   npaverageabsr8   r9   )r;   r<   rF   r=   rA   output_errorss         rB   r   r      s    @ +=+'FFFK FFM:JJrvvfvo6TUVM+s#,&  --K::m[99rD   r,   both)closed)r;   r<   rF   alphar=         ?rF   rT   r=   c                n   t        | ||      \  }} }}t        | ||       | |z
  }|dk\  j                  |j                        }||z  |z  d|z
  d|z
  z  |z  z
  }t	        j
                  ||d      }	t        |t              r|dk(  r|	S t        |t              r|dk(  rd}t	        j
                  |	|      S )a"  Pinball loss for quantile regression.

    Read more in the :ref:`User Guide <pinball_loss>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    alpha : float, slope of the pinball loss, default=0.5,
        This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
        `alpha=0.95` is minimized by estimators of the 95th percentile.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        The pinball loss output is a non-negative floating point. The best
        value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_pinball_loss
    >>> y_true = [1, 2, 3]
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
    np.float64(0.03...)
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
    np.float64(0.3...)
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
    np.float64(0.3...)
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
    np.float64(0.03...)
    >>> mean_pinball_loss(y_true, y_true, alpha=0.1)
    np.float64(0.0)
    >>> mean_pinball_loss(y_true, y_true, alpha=0.9)
    np.float64(0.0)
    r   r,   rJ   r.   r/   NrM   )rC   r   astyper+   rN   rO   r8   r9   )
r;   r<   rF   rT   r=   rA   diffsignlossrQ   s
             rB   r   r      s    N +=+'FFFK FFM:F?DAIdjj)D4<$!e)D!9D!@@DJJt]CM+s#|(C+s#7H(H::m[99rD   c                   t        | ||      \  }} }}t        | ||       t        j                  t        j                        j
                  }t        j                  || z
        t        j                  t        j                  |       |      z  }t        j                  ||d      }t        |t              r|dk(  r|S |dk(  rd}t        j                  ||      S )a
  Mean absolute percentage error (MAPE) regression loss.

    Note here that the output is not a percentage in the range [0, 100]
    and a value of 100 does not mean 100% but 1e2. Furthermore, the output
    can be arbitrarily high when `y_true` is small (which is specific to the
    metric) or when `abs(y_true - y_pred)` is large (which is common for most
    regression metrics). Read more in the
    :ref:`User Guide <mean_absolute_percentage_error>`.

    .. versionadded:: 0.24

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.
        If input is list then the shape must be (n_outputs,).

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute percentage error
        is returned for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAPE output is non-negative floating point. The best value is 0.0.
        But note that bad predictions can lead to arbitrarily large
        MAPE values, especially if some `y_true` values are very close to zero.
        Note that we return a large value instead of `inf` when `y_true` is zero.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_percentage_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    np.float64(0.3273...)
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    np.float64(0.5515...)
    >>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
    np.float64(0.6198...)
    >>> # the value when some element of the y_true is zero is arbitrarily high because
    >>> # of the division by epsilon
    >>> y_true = [1., 0., 2.4, 7.]
    >>> y_pred = [1.2, 0.1, 2.4, 8.]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    np.float64(112589990684262.48)
    r   rJ   r.   r/   NrM   )rC   r   rN   finfofloat64epsrP   maximumrO   r8   r9   )r;   r<   rF   r=   rA   epsilonmaperQ   s           rB   r   r   A  s    \ +=+'FFFK FFM:hhrzz"&&G66&6/"RZZv%HHDJJt]CM+s#,&  --K::m[99rD   
deprecatedboolean)r;   r<   rF   r=   squared)rF   r=   re   c                F   |dk7  r+t        j                  dt               |st        | |||      S t	        | ||      \  }} }}t        | ||       t        j                  | |z
  dz  d|      }t        |t              r|dk(  r|S |dk(  rd	}t        j                  ||
      S )a  Mean squared error regression loss.

    Read more in the :ref:`User Guide <mean_squared_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    squared : bool, default=True
        If True returns MSE value, if False returns RMSE value.

        .. deprecated:: 1.4
           `squared` is deprecated in 1.4 and will be removed in 1.6.
           Use :func:`~sklearn.metrics.root_mean_squared_error`
           instead to calculate the root mean squared error.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_squared_error(y_true, y_pred)
    np.float64(0.375)
    >>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
    >>> y_pred = [[0, 2],[-1, 2],[8, -5]]
    >>> mean_squared_error(y_true, y_pred)
    np.float64(0.708...)
    >>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
    array([0.41666667, 1.        ])
    >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
    np.float64(0.825...)
    rc   z'squared' is deprecated in version 1.4 and will be removed in 1.6. To calculate the root mean squared error, use the function'root_mean_squared_error'.rH   r   r   )rL   rK   r.   r/   NrM   )
warningswarnFutureWarningr   rC   r   rN   rO   r8   r9   )r;   r<   rF   r=   re   rA   rQ   s          rB   r   r     s    V ,-
 	
 *m  +=+'FFFK FFM:JJA5A}UM+s#,&  --K::m[99rD   c                    t        j                  t        | ||d            }t        |t              r|dk(  r|S |dk(  rd}t        j
                  ||      S )a  Root mean squared error regression loss.

    Read more in the :ref:`User Guide <mean_squared_error>`.

    .. versionadded:: 1.4

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import root_mean_squared_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> root_mean_squared_error(y_true, y_pred)
    np.float64(0.612...)
    >>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
    >>> y_pred = [[0, 2],[-1, 2],[8, -5]]
    >>> root_mean_squared_error(y_true, y_pred)
    np.float64(0.822...)
    r.   rH   r/   NrM   )rN   sqrtr   r8   r9   rO   )r;   r<   rF   r=   rQ   s        rB   r   r   
  s]    t GGF-\	
M +s#,&  --K::m[99rD   c                l   |dk7  r+t        j                  dt               |st        | |||      S t	        | ||      \  }} }}t        | ||       | dk  j                         s|dk  j                         rt        d      t        t        j                  |       t        j                  |      ||      S )aS  Mean squared logarithmic error regression loss.

    Read more in the :ref:`User Guide <mean_squared_log_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'

        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors when the input is of multioutput
            format.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    squared : bool, default=True
        If True returns MSLE (mean squared log error) value.
        If False returns RMSLE (root mean squared log error) value.

        .. deprecated:: 1.4
           `squared` is deprecated in 1.4 and will be removed in 1.6.
           Use :func:`~sklearn.metrics.root_mean_squared_log_error`
           instead to calculate the root mean squared logarithmic error.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_log_error
    >>> y_true = [3, 5, 2.5, 7]
    >>> y_pred = [2.5, 5, 4, 8]
    >>> mean_squared_log_error(y_true, y_pred)
    np.float64(0.039...)
    >>> y_true = [[0.5, 1], [1, 2], [7, 6]]
    >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
    >>> mean_squared_log_error(y_true, y_pred)
    np.float64(0.044...)
    >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
    array([0.00462428, 0.08377444])
    >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
    np.float64(0.060...)
    rc   z'squared' is deprecated in version 1.4 and will be removed in 1.6. To calculate the root mean squared logarithmic error, use the function'root_mean_squared_log_error'.rH   r   zSMean Squared Logarithmic Error cannot be used when targets contain negative values.)rg   rh   ri   r   rC   r   anyr6   r   rN   log1p)r;   r<   rF   r=   re   rA   s         rB   r   r   T  s    \ ,1
 	
 .m  +=+'FFFK FFM:
fqj--//
 	

 

#	 rD   c                   t        | ||      \  }} }}t        | ||       | dk  j                         s|dk  j                         rt        d      t	        t        j                  |       t        j                  |      ||      S )a{  Root mean squared logarithmic error regression loss.

    Read more in the :ref:`User Guide <mean_squared_log_error>`.

    .. versionadded:: 1.4

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'

        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors when the input is of multioutput
            format.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import root_mean_squared_log_error
    >>> y_true = [3, 5, 2.5, 7]
    >>> y_pred = [2.5, 5, 4, 8]
    >>> root_mean_squared_log_error(y_true, y_pred)
    np.float64(0.199...)
    r   zXRoot Mean Squared Logarithmic Error cannot be used when targets contain negative values.rH   )rC   r   rm   r6   r   rN   rn   )r;   r<   rF   r=   r>   s        rB   r   r     s    p &8%T"Avv{FFM:
fqj--//
 	

 #

#	 rD   )r;   r<   r=   rF   )r=   rF   c                N   t        | ||      \  }} }}|.t        j                  t        j                  || z
        d      }n/t	        ||      }t        t        j                  || z
        |      }t        |t              r|dk(  r|S |dk(  rd}t        j                  ||      S )aa  Median absolute error regression loss.

    Median absolute error output is non-negative floating point. The best value
    is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values. Array-like value defines
        weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

        .. versionadded:: 0.24

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

    Examples
    --------
    >>> from sklearn.metrics import median_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> median_absolute_error(y_true, y_pred)
    np.float64(0.5)
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> median_absolute_error(y_true, y_pred)
    np.float64(0.75)
    >>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    np.float64(0.85)
    Nr   rL   rF   r.   r/   rM   )	rC   rN   medianrP   r   r   r8   r9   rO   )r;   r<   r=   rF   rA   rQ   s         rB   r   r     s    B +=+'FFFK 		"&&&"9B,]FC,FF6F?#=
 +s#,&  --K::m[99rD   c                 t   | j                   }|dk7  }|s	d| |z  z
  }	n9| dk7  }
|j                  |g||      }	||
z  }d| |   ||   z  z
  |	|<   d|	|
| z  <   t        |t              r*|dk(  r|	S |dk(  rd}n|dk(  r|}|j	                  |      sd}n|}t        |		      }t        |      dk(  rt        |      S |S )
zCCommon part used by explained variance score and :math:`R^2` score.r   r,   )devicer+           r.   r/   Nr0   rM   )r+   onesr8   r9   rm   r   r   float)	numeratordenominatorr?   r=   force_finiter)   ru   r+   nonzero_denominatoroutput_scoresnonzero_numeratorvalid_scoreavg_weightsresults                 rB   _assemble_r2_explained_variancer   b  s    OOE%*Y45%N F%H),==%&k"[%==&
k" CF'+>*>>?+s#,&  --K//%K66-. #!m[9FF|qV}MrD   >   r.   r/   r0   )r;   r<   rF   r=   r{   )rF   r=   r{   c          
      z   t        | ||      \  }} }}t        | ||       t        j                  | |z
  |d      }t        j                  | |z
  |z
  dz  |d      }t        j                  | |d      }t        j                  | |z
  dz  |d      }	t	        ||	| j
                  d   ||t        |       d   d      S )a  Explained variance regression score function.

    Best possible score is 1.0, lower values are worse.

    In the particular case when ``y_true`` is constant, the explained variance
    score is not finite: it is either ``NaN`` (perfect predictions) or
    ``-Inf`` (imperfect predictions). To prevent such non-finite numbers to
    pollute higher-level experiments such as a grid search cross-validation,
    by default these cases are replaced with 1.0 (perfect predictions) or 0.0
    (imperfect predictions) respectively. If ``force_finite``
    is set to ``False``, this score falls back on the original :math:`R^2`
    definition.

    .. note::
       The Explained Variance score is similar to the
       :func:`R^2 score <r2_score>`, with the notable difference that it
       does not account for systematic offsets in the prediction. Most often
       the :func:`R^2 score <r2_score>` should be preferred.

    Read more in the :ref:`User Guide <explained_variance_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or             array-like of shape (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    score : float or ndarray of floats
        The explained variance or ndarray if 'multioutput' is 'raw_values'.

    See Also
    --------
    r2_score :
        Similar metric, but accounting for systematic offsets in
        prediction.

    Notes
    -----
    This is not a symmetric function.

    Examples
    --------
    >>> from sklearn.metrics import explained_variance_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> explained_variance_score(y_true, y_pred)
    0.957...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
    0.983...
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> explained_variance_score(y_true, y_pred)
    1.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> explained_variance_score(y_true, y_pred)
    0.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    -inf
    r   rJ   r   r,   Nry   rz   r?   r=   r{   r)   ru   )rC   r   rN   rO   r   r5   r	   )
r;   r<   rF   r=   r{   rA   
y_diff_avgry   
y_true_avgrz   s
             rB   r    r      s    h +=+'FFFK FFM:FVO]KJ

	&:	%!+]I FMBJ**fz1a7UVWK*,,q/! #	 	rD   c          
         t        | |||      \  }}}t        | |||      }t        | ||||      \  }} }}t        | ||       t	        |      dk  r'd}	t        j                  |	t               t        d      S |t        ||      }|dddf   }
nd}
|j                  |
| |z
  dz  z  d	
      }|j                  |
| t        | d	||      z
  dz  z  d	
      }t        ||| j                  d   ||||      S )aX  :math:`R^2` (coefficient of determination) regression score function.

    Best possible score is 1.0 and it can be negative (because the
    model can be arbitrarily worse). In the general case when the true y is
    non-constant, a constant model that always predicts the average y
    disregarding the input features would get a :math:`R^2` score of 0.0.

    In the particular case when ``y_true`` is constant, the :math:`R^2` score
    is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf``
    (imperfect predictions). To prevent such non-finite numbers to pollute
    higher-level experiments such as a grid search cross-validation, by default
    these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect
    predictions) respectively. You can set ``force_finite`` to ``False`` to
    prevent this fix from happening.

    Note: when the prediction residuals have zero mean, the :math:`R^2` score
    is identical to the
    :func:`Explained Variance score <explained_variance_score>`.

    Read more in the :ref:`User Guide <r2_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'},             array-like of shape (n_outputs,) or None, default='uniform_average'

        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.
        Default is "uniform_average".

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

        .. versionchanged:: 0.19
            Default value of multioutput is 'uniform_average'.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    z : float or ndarray of floats
        The :math:`R^2` score or ndarray of scores if 'multioutput' is
        'raw_values'.

    Notes
    -----
    This is not a symmetric function.

    Unlike most other scores, :math:`R^2` score may be negative (it need not
    actually be the square of a quantity R).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] `Wikipedia entry on the Coefficient of determination
            <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_

    Examples
    --------
    >>> from sklearn.metrics import r2_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> r2_score(y_true, y_pred)
    0.948...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> r2_score(y_true, y_pred,
    ...          multioutput='variance_weighted')
    0.938...
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> r2_score(y_true, y_pred)
    -3.0
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    -inf
    r(   )r+   r)   r   z9R^2 score is not well-defined with less than two samples.nanNr+   g      ?r   rq   )rL   rK   r)   r,   r   )r
   r   rC   r   r   rg   rh   r   rx   r   sumr   r   r5   )r;   r<   rF   r=   r{   r)   r>   device_r+   msgweightry   rz   s                rB   r   r   !  s,   Z .{NB7 *&&-BOE%75R&"Avv{ FFM:FaIc12U| $]%@q$w'v&Q 66Q?I&&&8FMbQQVWWW  K
 +,,q/! rD   )r;   r<   c                     t        | |d      \  }} }}|dk(  rt        d      t        j                  t        j                  | |z
              S )al  
    The max_error metric calculates the maximum residual error.

    Read more in the :ref:`User Guide <max_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    Returns
    -------
    max_error : float
        A positive floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import max_error
    >>> y_true = [3, 2, 7, 1]
    >>> y_pred = [4, 2, 7, 1]
    >>> max_error(y_true, y_pred)
    np.int64(1)
    Nr2   z&Multioutput not supported in max_error)rC   r6   rN   maxrP   )r;   r<   rA   r>   s       rB   r   r     sM    D !3664 HFFFA))ABB66"&&&)**rD   c                    |}|dk  rdt        j                  t        j                  | d      d|z
        d|z
  d|z
  z  z  | t        j                  |d|z
        z  d|z
  z  z
  t        j                  |d|z
        d|z
  z  z   z  }n|dk(  r	| |z
  dz  }n|dk(  rdt        | | |z        | z
  |z   z  }n|dk(  r%dt        j                  || z        | |z  z   dz
  z  }nkdt        j                  | d|z
        d|z
  d|z
  z  z  | t        j                  |d|z
        z  d|z
  z  z
  t        j                  |d|z
        d|z
  z  z   z  }t        j
                  ||      S )z&Mean Tweedie deviance regression loss.r   r   r,   rM   )rN   powerr`   r   logrO   )r;   r<   rF   r   pdevs         rB   _mean_tweedie_deviancer     s{   A1uHHRZZ*AE2q1uQ6GHrxxA..!a%89hhvq1u%Q/0

 
a1$	
a5&1F:VCD	
a266&6/*Vf_<q@AHHVQU#A!a%'89rxxA..!a%89hhvq1u%Q/0
 ::c=11rD   rightleft)r;   r<   rF   r   rF   r   c                l   t        | |dt        j                  t        j                  g      \  }} }}|dk(  rt	        d      t        | ||       |"t        |      }|ddt        j                  f   }d| d}|dk  r!|dk  j                         rt	        |dz         |dk(  rnd	|cxk  rd
k  r7n n4| dk  j                         s|dk  j                         rMt	        |dz         |d
k\  r4| dk  j                         s|dk  j                         rt	        |dz         t        t        | |||      S )ag  Mean Tweedie deviance regression loss.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to
          mean_squared_error. y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_tweedie_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_tweedie_deviance(y_true, y_pred, power=1)
    np.float64(1.4260...)
    Nr   r2   z2Multioutput not supported in mean_tweedie_deviancez'Mean Tweedie deviance error with power=z can only be used on r   zstrictly positive y_pred.r,   r   z,non-negative y and strictly positive y_pred.zstrictly positive y and y_pred.r   )
rC   rN   r^   float32r6   r   r   newaxisrm   r   )r;   r<   rF   r   rA   r>   messages          rB   r!   r!     sD   x !3RZZ$<!FFFA ))MNNFFM: $]3%am47w>STGqyaKW'BBCC	!	
eaQJ&A+!2!2!4W'UUVV	!aK6Q;"3"3"5W'HHII !m5 rD   r;   r<   rF   rr   c                     t        | ||d      S )ap  Mean Poisson deviance regression loss.

    Poisson deviance is equivalent to the Tweedie deviance with
    the power parameter `power=1`.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true >= 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_poisson_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_poisson_deviance(y_true, y_pred)
    np.float64(1.4260...)
    r,   r   r!   r   s      rB   r"   r"   z  s    P !}TUVVrD   c                     t        | ||d      S )a  Mean Gamma deviance regression loss.

    Gamma deviance is equivalent to the Tweedie deviance with
    the power parameter `power=2`. It is invariant to scaling of
    the target variable, and measures relative errors.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true > 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_gamma_deviance
    >>> y_true = [2, 0.5, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_gamma_deviance(y_true, y_pred)
    np.float64(1.0568...)
    r   r   r   r   s      rB   r#   r#     s    R !}TUVVrD   c                   t        | |dt        j                  t        j                  g      \  }} }}|dk(  rt	        d      t        |      dk  r'd}t        j                  |t               t        d      S t        j                  |       t        j                  |      }} t        | |||      }t        j                  | |	      }t        | |||      }	d
||	z  z
  S )aW
  
    :math:`D^2` regression score function, fraction of Tweedie deviance explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical mean of `y_true` as
    constant prediction, disregarding the input features, gets a D^2 score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.0

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to r2_score.
          y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    z : float or ndarray of floats
        The D^2 score.

    Notes
    -----
    This is not a symmetric function.

    Like R^2, D^2 score may be negative (it need not actually be the square of
    a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_tweedie_score
    >>> y_true = [0.5, 1, 2.5, 7]
    >>> y_pred = [1, 1, 5, 3.5]
    >>> d2_tweedie_score(y_true, y_pred)
    np.float64(0.285...)
    >>> d2_tweedie_score(y_true, y_pred, power=1)
    np.float64(0.487...)
    >>> d2_tweedie_score(y_true, y_pred, power=2)
    np.float64(0.630...)
    >>> d2_tweedie_score(y_true, y_true, power=2)
    np.float64(1.0)
    Nr   r2   z-Multioutput not supported in d2_tweedie_scorer   9D^2 score is not well-defined with less than two samples.r   r   rM   r,   )rC   rN   r^   r   r6   r   rg   rh   r   rx   squeezer!   rO   r   )
r;   r<   rF   r   rA   r>   r   ry   y_avgrz   s
             rB   r$   r$     s    r !3RZZ$<!FFFA ))HIIFaIc12U|ZZ'F);FF%m5I JJv}5E(]%K y;&&&rD   c                   t        | ||      \  }} }}t        | ||       t        |      dk  r'd}t        j                  |t
               t        d      S t        | |||d      }|;t        j                  t        j                  | |dz  d	      t        |       d
f      }n<t        ||       }t        j                  t        | ||dz        t        |       d
f      }t        | |||d      }	|dk7  }
|	dk7  }|
|z  }t        j                  | j                  d
         }d
||   |	|   z  z
  ||<   d||
| z  <   t!        |t"              r
|dk(  r|S d}n|}t        j$                  ||      S )u
  
    :math:`D^2` regression score function, fraction of pinball loss explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical alpha-quantile of
    `y_true` as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    alpha : float, default=0.5
        Slope of the pinball deviance. It determines the quantile level alpha
        for which the pinball deviance and also D2 are optimal.
        The default `alpha=0.5` is equivalent to `d2_absolute_error_score`.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with a pinball deviance
        or ndarray of scores if `multioutput='raw_values'`.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for a single point and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999).
           "Goodness of Fit and Related Inference Processes for Quantile Regression"
           <https://doi.org/10.1080/01621459.1999.10473882>`_
    .. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_pinball_score
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 3, 3]
    >>> d2_pinball_score(y_true, y_pred)
    np.float64(0.5)
    >>> d2_pinball_score(y_true, y_pred, alpha=0.9)
    np.float64(0.772...)
    >>> d2_pinball_score(y_true, y_pred, alpha=0.1)
    np.float64(-1.045...)
    >>> d2_pinball_score(y_true, y_true, alpha=0.1)
    np.float64(1.0)
    r   r   r   r.   rV   Nd   r   )qrL   r,   )rF   
percentilerv   rM   )rC   r   r   rg   rh   r   rx   r   rN   tiler   r:   r   r   rw   r5   r8   r9   rO   )r;   r<   rF   rT   r=   rA   r   ry   
y_quantilerz   r~   r|   r   r}   r   s                  rB   r%   r%   B  s   x +=+'FFFK FFM:FaIc12U|!# I WWMM&ECKa83v;:J

 -]FCWW m [!	

 $# K "Q%*#&99KGGFLLO,M!"i&<{;?W&W!XM+>AM#':&::;+s#,&   K!::m[99rD   c                "    t        | ||d|      S )a  
    :math:`D^2` regression score function, fraction of absolute error explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical median of `y_true`
    as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with an absolute error deviance
        or ndarray of scores if 'multioutput' is 'raw_values'.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_absolute_error_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> d2_absolute_error_score(y_true, y_pred)
    np.float64(0.764...)
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average')
    np.float64(0.691...)
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values')
    array([0.8125    , 0.57142857])
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> d2_absolute_error_score(y_true, y_pred)
    np.float64(1.0)
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> d2_absolute_error_score(y_true, y_pred)
    np.float64(0.0)
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> d2_absolute_error_score(y_true, y_pred)
    np.float64(-1.0)
    rU   rV   )r%   rE   s       rB   r&   r&     s    ~ m3K rD   )numericN)2__doc__rg   numbersr   numpyrN   scipy.specialr   
exceptionsr   utils._array_apir   r   r	   r
   r   utils._param_validationr   r   r   r   utils.statsr   utils.validationr   r   r   r   r   __ALL__rC   r   r   r   r   r   r   r   r   r   r    r   r   r   r!   r"   r#   r$   r%   r&    rD   rB   <module>r      s	  :     /  T S . *J/Z ..&-"L2C#DE|T	 #' &*7HC:C:L ..&-4Af56"L2C#DE|T #'	 &*BSM:	M:` ..&-"L2C#DE|T	 #' &*7HS:S:l ..&-"L2C#DE|T:|n56	B #'	 !]:	]:@ ..&-"L2C#DE|T	 #' &*7H>:>:B ..&-"L2C#DE|T:|n56	B #'	 !c	cL ..&-"L2C#DE|T	 #' &*7H==@ .."L2C#DE|T&-	 #' $5DI:I:X/d ..&-MN
 #	 #'" !}}@ ..&-MN

 #
 #'$ !ccL .. #'++B2: ..&-T473T1d62
	 #' <@q QQh ..&-
 #' <@  W WF ..&-
 #' :> !W!WH ..&-T473T1d62
	 #' 7;! b'b'J ..&-4Af56&789
	 #' &*BSH:H:V ..&-&789
	 #' &*7HUUrD   