
    {Kg                         d Z ddlZddlmZ ddlmZ d Zd Zd Z	d Z
d	 Zee	ee
ed
Zd Zd Zd Zd ZeeeedZd Zd Zd ZeeedZy)z(Utilities for the neural network modules    N)expit)xlogyc                      y)zSimply leave the input array unchanged.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        Data, where `n_samples` is the number of samples
        and `n_features` is the number of features.
    N Xs    `/home/alanp/www/video.onchill/myenv/lib/python3.12/site-packages/sklearn/neural_network/_base.pyinplace_identityr
              c                     t        | |        y)zCompute the logistic function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
    outN)logistic_sigmoidr   s    r	   inplace_logisticr      s     QAr   c                 2    t        j                  | |        y)zCompute the hyperbolic tan function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
    r   N)nptanhr   s    r	   inplace_tanhr   "   s     GGA1r   c                 4    t        j                  | d|        y)zCompute the rectified linear unit function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
    r   r   N)r   maximumr   s    r	   inplace_relur   -   s     JJq!r   c                     | | j                  d      ddt        j                  f   z
  }t        j                  ||        | | j	                  d      ddt        j                  f   z  } y)zCompute the K-way softmax function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
       axisNr   )maxr   newaxisexpsum)r   tmps     r	   inplace_softmaxr"   8   sT     aeeemArzzM*
*CFF3AAq"**}	%%Ar   )identityr   logisticrelusoftmaxc                      y)a  Apply the derivative of the identity function: do nothing.

    Parameters
    ----------
    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
        The data which was output from the identity activation function during
        the forward pass.

    delta : {array-like}, shape (n_samples, n_features)
         The backpropagated error signal to be modified inplace.
    Nr   Zdeltas     r	   inplace_identity_derivativer+   N   r   r   c                     || z  }|d| z
  z  }y)a  Apply the derivative of the logistic sigmoid function.

    It exploits the fact that the derivative is a simple function of the output
    value from logistic function.

    Parameters
    ----------
    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
        The data which was output from the logistic activation function during
        the forward pass.

    delta : {array-like}, shape (n_samples, n_features)
         The backpropagated error signal to be modified inplace.
    r   Nr   r(   s     r	   inplace_logistic_derivativer-   ]   s     
QJE	QUNEr   c                     |d| dz  z
  z  }y)a  Apply the derivative of the hyperbolic tanh function.

    It exploits the fact that the derivative is a simple function of the output
    value from hyperbolic tangent.

    Parameters
    ----------
    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
        The data which was output from the hyperbolic tangent activation
        function during the forward pass.

    delta : {array-like}, shape (n_samples, n_features)
         The backpropagated error signal to be modified inplace.
    r      Nr   r(   s     r	   inplace_tanh_derivativer0   p   s     
QAXEr   c                     d|| dk(  <   y)a  Apply the derivative of the relu function.

    It exploits the fact that the derivative is a simple function of the output
    value from rectified linear units activation function.

    Parameters
    ----------
    Z : {array-like, sparse matrix}, shape (n_samples, n_features)
        The data which was output from the rectified linear units activation
        function during the forward pass.

    delta : {array-like}, shape (n_samples, n_features)
         The backpropagated error signal to be modified inplace.
    r   Nr   r(   s     r	   inplace_relu_derivativer2      s     E!q&Mr   )r#   r   r$   r%   c                 4    | |z
  dz  j                         dz  S )a  Compute the squared loss for regression.

    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) values.

    y_pred : array-like or label indicator matrix
        Predicted values, as returned by a regression estimator.

    Returns
    -------
    loss : float
        The degree to which the samples are correctly predicted.
    r/   )mean)y_truey_preds     r	   squared_lossr7      s!      f_"((*Q..r   c                    t        j                  |j                        j                  }t        j                  ||d|z
        }|j
                  d   dk(  rt        j                  d|z
  |d      }| j
                  d   dk(  rt        j                  d| z
  | d      } t        | |      j                          |j
                  d   z  S )a  Compute Logistic loss for classification.

    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) labels.

    y_prob : array-like of float, shape = (n_samples, n_classes)
        Predicted probabilities, as returned by a classifier's
        predict_proba method.

    Returns
    -------
    loss : float
        The degree to which the samples are correctly predicted.
    r   r   r   )	r   finfodtypeepsclipshapeappendr   r    r5   y_probr;   s      r	   log_lossrA      s    " ((6<<
 
$
$CWWVS!c'*F||A!1v:vA6||A!1v:vA6&&!%%''&,,q/99r   c                     t        j                  |j                        j                  }t        j                  ||d|z
        }t        | |      j                         t        d| z
  d|z
        j                         z    |j                  d   z  S )a!  Compute binary logistic loss for classification.

    This is identical to log_loss in binary classification case,
    but is kept for its use in multilabel case.

    Parameters
    ----------
    y_true : array-like or label indicator matrix
        Ground truth (correct) labels.

    y_prob : array-like of float, shape = (n_samples, 1)
        Predicted probabilities, as returned by a classifier's
        predict_proba method.

    Returns
    -------
    loss : float
        The degree to which the samples are correctly predicted.
    r   r   )r   r9   r:   r;   r<   r   r    r=   r?   s      r	   binary_log_lossrC      sz    ( ((6<<
 
$
$CWWVS!c'*F


#
#
%a&j!f*(E(I(I(K
KL
,,q/	r   )squared_errorrA   rC   )__doc__numpyr   scipy.specialr   r   r   r
   r   r   r   r"   ACTIVATIONSr+   r-   r0   r2   DERIVATIVESr7   rA   rC   LOSS_FUNCTIONSr   r   r	   <module>rK      s    .
  3 
& ! &$& ,#+#	/&:8: "&r   