
    Ph                     <   d dl Z d dlmZmZmZmZmZmZmZ d dl	Z	d dl	m
Z
mZ d dlmZmZ ee	j                  ee	j                     f   Zg dZ	 	 ddededed	ed
ee   de	j                  fdZ	 	 ddededed	ed
ee   de	j                  fdZddeded
ee   ddfdZy)    N)UnionIterableListDictTupleOptionalcast)Tensorinf)"_group_tensors_by_device_and_dtype_has_foreach_support)clip_grad_norm_clip_grad_normclip_grad_value_
parametersmax_norm	norm_typeerror_if_nonfiniteforeachreturnc           
         t        | t        j                        r| g} | D cg c]  }|j                  |j                   }}t	        |      }t	        |      }t        |      dk(  rt        j                  d      S |d   j                  }t        |D cg c]  }|j                          c}g      }	|t        k(  r|D cg c]C  }t        j                  j                  |j                         t              j                  |      E }
}t        |
      dk(  r|
d   n't        j                  t        j                  |
            }ng }
|	j!                         D ]  \  \  }}\  \  }}||r3t#        ||      r&|
j%                  t        j&                  ||             E|rt)        d|j*                   d      |
j%                  |D cg c]"  }t        j                  j                  ||      $ c}        t        j                  j                  t        j                  |
D cg c]  }|j                  |       c}      |      }|rAt        j,                  |j/                         |j1                               rt)        d| d      ||d	z   z  }t        j2                  |d
      }|	j!                         D ]  \  \  }}\  \  }}||r3t#        ||      r&t        j4                  ||j                  |             E|rt)        d|j*                   d      |j                  |      }|D ]!  }|j                         j7                  |       #  |S c c}w c c}w c c}w c c}w c c}w )aG  Clip the gradient norm of an iterable of parameters.

    The norm is computed over all gradients together, as if they were
    concatenated into a single vector. Gradients are modified in-place.

    Args:
        parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
            single Tensor that will have gradients normalized
        max_norm (float): max norm of the gradients
        norm_type (float): type of the used p-norm. Can be ``'inf'`` for
            infinity norm.
        error_if_nonfinite (bool): if True, an error is thrown if the total
            norm of the gradients from :attr:`parameters` is ``nan``,
            ``inf``, or ``-inf``. Default: False (will switch to True in the future)
        foreach (bool): use the faster foreach-based implementation.
            If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently
            fall back to the slow implementation for other device types.
            Default: ``None``

    Returns:
        Total norm of the parameter gradients (viewed as a single vector).
    r   g           device:foreach=True was passed, but can't use the foreach API on  tensorszThe total norm of order z for gradients from `parameters` is non-finite, so it cannot be clipped. To disable this error and scale the gradients by the non-finite norm anyway, set `error_if_nonfinite=False`gư>g      ?)max)
isinstancetorchr
   gradfloatlentensorr   r   detachr   linalgvector_normtor   stackitemsr   extend_foreach_normRuntimeErrortype
logical_orisnanisinfclamp_foreach_mul_mul_)r   r   r   r   r   pgradsfirst_deviceggrouped_gradsnorms
total_normr   _norm	clip_coefclip_coef_clampedclip_coef_clamped_devices                     cC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/nn/utils/clip_grad.pyr   r      s   2 *ell+ \
'>Z166+=QVVZE>XHi I
5zQ||B8??L
,5.I5aqxxz5.I-J
K  CUZ[UZPQ))!((*c:==lKUZ[!$UqU1XeiiE@R6S
+8+>+>+@'[fa,7EA70DUSY0ZU00	BC"%`agalal`mmu#vwweTeell66q)DeTU ,A \\--ekk]b:c]bUY477<;P]b:c.dfop
e..z/?/?/A:CSCSCUV&yk 2- -. 	.
 J-.I I37'4':':'<#&!lwOw,@v,V'8';';F'CD!\]c]h]h\iiqrss'8';';F'C$
 89  (= [ ? /J \ U:cs#   MMMAM 	'M%
%M*c                 N    t        j                  dd       t        | ||||      S )zClip the gradient norm of an iterable of parameters.

    .. warning::
        This method is now deprecated in favor of
        :func:`torch.nn.utils.clip_grad_norm_`.
    z[torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.   )
stacklevel)warningswarnr   )r   r   r   r   r   s        r@   r   r   W   s.     MM 7CDF:x<NPWXX    
clip_valuec                    t        | t        j                        r| g} t        |      }| D cg c]  }|j                  |j                   }}t        |g      }|j                         D ]  \  \  }}\  \  }}||rzt        t        t        t           |      |      rXt        j                  t        t        t           |      |        t        j                  t        t        t           |      |       |rt        d|j                   d      t        j                         5  |D ]$  }t        t        |      j                  | |       & 	 ddd        yc c}w # 1 sw Y   xY w)a  Clip the gradients of an iterable of parameters at specified value.

    Gradients are modified in-place.

    Args:
        parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
            single Tensor that will have gradients normalized
        clip_value (float): maximum allowed value of the gradients.
            The gradients are clipped in the range
            :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
        foreach (bool): use the faster foreach-based implementation
            If ``None``, use the foreach implementation for CUDA and CPU native tensors and
            silently fall back to the slow implementation for other device types.
            Default: ``None``
    Nr   r   r   )minr   )r   r   r
   r!   r    r   r)   r   r	   r   _foreach_clamp_min__foreach_clamp_max_r,   r-   no_gradclamp_)	r   rG   r   r4   r5   r8   r   r;   r    s	            r@   r   r   e   s,     *ell+ \
z"J'>Z166+=QVVZE>6w?M'4':':'<#&!lwOw,@d6lTYAZci,j%%d4<&?*M%%d4<&?L!\]c]h]h\iiqrss!D&--:+:-N " ! (= ? !s   EE&*E!!E*	)g       @FN)N)rD   typingr   r   r   r   r   r   r	   r   r
   r   torch.utils._foreach_utilsr   r   _tensor_or_tensors__all__r!   boolr   r   r    rF   r@   <module>rT      s    E E E   _5<<%,,)??@ 
C MPDHH&H27HDIH H3;D>HMR\\HX MODHY&Y27YDIY Y3;D>YMR\\Y O!3  O  OQYZ^Q_  Oko  OrF   