
    PhD                        d dl Z d dlmZmZ d dlZd dlmZ ddlmZmZm	Z	m
Z
mZmZmZmZmZ ddgZ G d de      Zd	d
e de	 dz   e_        	 	 	 	 ddee   dee   dee   dee   dee   dedee   dededededededefdZdee   dee   dee   dee   dee   dedededededededefdZdee   dee   dee   dee   dee   dedededededededefdZy)    N)ListOptional)Tensor   )		Optimizer_default_to_fused_or_foreach_differentiable_doc_dispatch_sqrt_foreach_doc
_get_value_stack_if_compiling_use_grad_for_differentiable_view_as_realRAdamradamc                   h     e Zd Z	 	 	 	 	 dddddedee   def fdZ fdZd	 Zedd
       Z	 xZ
S )r   FN)foreachdifferentiabledecoupled_weight_decayr   r   c          	      @   d|k  st        d|       d|k  st        d|       d|d   cxk  rdk  sn t        d|d          d|d   cxk  rdk  sn t        d|d          d|k  st        d	|       t        |||||||
      }	t        
|   ||	       y )N        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )lrbetasepsweight_decayr   r   r   )
ValueErrordictsuper__init__)selfparamsr   r   r   r   r   r   r   defaults	__class__s             \C:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/optim/radam.pyr    zRAdam.__init__   s     by6rd;<<cz6se<==eAh$$B58*MNNeAh$$B58*MNNl";L>JKK%#9)
 	*    c                    t         |   |       | j                  D ]8  }|j                  dd        |j                  dd       |j                  dd       : t	        | j
                  j                               }t        |      dk7  xr t        j                  |d   d         }|s<|D ]6  }t        j                  t        |d         t        j                        |d<   8 y y )Nr   r   Fr   r   stepdtype)r   __setstate__param_groups
setdefaultliststatevalueslentorch	is_tensortensorfloatfloat32)r!   r/   groupstate_valuesstep_is_tensorsr$   s         r%   r+   zRAdam.__setstate__8   s    U#&&EY--u55u= ' DJJ--/0l+q0 
eooOF#7
 !!LLqy)9O&	 " r&   c                    d}|d   D ]F  }|j                   |t        j                  |      z  }|j                  |       |j                   j                  rt        d      |j                  |j                          | j                  |   }	t        |	      dk(  rxt        j                  dt        j                        |	d<   t        j                  |t        j                        |	d	<   t        j                  |t        j                        |	d
<   |j                  |	d	          |j                  |	d
          |j                  |	d          I |S )NFr"   z'RAdam does not support sparse gradientsr   r   r)   r(   )memory_formatexp_avg
exp_avg_sq)gradr2   
is_complexappend	is_sparseRuntimeErrorr/   r1   r4   r6   
zeros_likepreserve_format)
r!   r7   params_with_gradgradsexp_avgsexp_avg_sqsstate_stepshas_complexpr/   s
             r%   _init_groupzRAdam._init_groupF   s   xAvv!u//22 ''*66##&'PQQQVV$

1u:?$)LLEMM$JE&M','7'7)>)>(E)$ +0*:*:)>)>+E,' i 01""5#67""5=1/ !2 r&   c                 6   d}|$t        j                         5   |       }ddd       | j                  D ]U  }g }g }g }g }g }|d   \  }	}
| j                  ||||||      }t	        ||||||	|
|d   |d   |d   |d   |d   |d   |	       W |S # 1 sw Y   oxY w)
zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   )	beta1beta2r   r   r   r   r   r   rK   )r2   enable_gradr,   rM   r   )r!   closurelossr7   rF   rG   rH   rI   rJ   rO   rP   rK   s               r%   r(   z
RAdam.stepc   s     ""$y % &&E!EHKK >LE5**52BE8U`bmnK ;">2%Li($%56',-E'F' '6 = %$s   BB)gMbP?)g?g+?g:0yE>r   FN)__name__
__module____qualname__boolr   r    r+   rM   r   r(   __classcell__)r$   s   @r%   r   r      sh     ',+ #'$+ !%+ $+ +BP: "' "'r&   a  Implements RAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \beta_1, \beta_2
                \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
                \lambda \text{ (weightdecay)},                                                   \\
            &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay}         \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)},                                       \\
            &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1                      \\[-1.ex]
            &\rule{110mm}{0.4pt}  \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{6mm} g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1})                      \\
            &\hspace{6mm} \theta_t \leftarrow \theta_{t-1}                                       \\
            &\hspace{6mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay}                       \\
            &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t}            \\
            &\hspace{12mm}\textbf{else}                                                          \\
            &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t}                               \\
            &\hspace{6mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{6mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{6mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
                2 t \beta^t_2 /\big(1-\beta_2^t \big)                                    \\[0.1.ex]
            &\hspace{6mm}\textbf{if} \: \rho_t > 5                                               \\
            &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon  } \\
            &\hspace{12mm} r_t \leftarrow
      \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t        \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.

    This implementation provides an option to use either the original weight_decay implementation as in Adam
    (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied
    to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False
    (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which
    corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information
    about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.

    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        decoupled_weight_decay (bool, optional): whether to use decoupled weight
            decay as in AdamW to obtain RAdamW (default: False)
        z	
        a  

    .. _On the variance of the adaptive learning rate and beyond:
        https://arxiv.org/abs/1908.03265
    .. _author's implementation:
        https://github.com/LiyuanLucasLiu/RAdam
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101

    r"   rG   rH   rI   rJ   r   r   r   rK   rO   rP   r   r   r   c	                >   t        d |D              st        d      |t        | |d      \  }}|r)t        j                  j                         rt        d      |r%t        j                  j                         st        }nt        } || |||||	|
||||||       y)zpFunctional API that performs RAdam algorithm computation.

    See :class:`~torch.optim.RAdam` for details.
    c              3   P   K   | ]  }t        |t        j                           y wrT   )
isinstancer2   r   ).0ts     r%   	<genexpr>zradam.<locals>.<genexpr>   s     @Kqz!U\\*Ks   $&zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)rO   rP   r   r   r   r   r   rK   )allrC   r   r2   jitis_scripting_multi_tensor_radam_single_tensor_radam)r"   rG   rH   rI   rJ   r   r   r   rK   rO   rP   r   r   r   _funcs                   r%   r   r      s    0 @K@@^
 	
 1&.TYZ
7599))+STTuyy--/"#!5%r&   c                   t        |       D ]  \  }}||   }||   }||   }||   }t        j                  |      rTt        j                  |      }t        j                  |      }t        j                  |      }t        j                  |      }|dz  }t	        |      }d||z  z
  }d||z  z
  }|dk7  r-|r|j                  d||z  z
         n|j                  ||      }|j                  |d|z
         |j                  |      j                  ||d|z
         ||z  }dd|z
  z  dz
  }|d|z  ||z  z  |z  z
  }|dkD  rt        j                  |dz
  |dz
  z  |z  |dz
  |dz
  z  |z  z        }|j                         }|
r|j                  |	      }n|j                  |	      }t        j                  |      |z  }|j                  ||z  |z  |z  d       |j                  ||z  d        y )	Nr   r   alpha)value   g      @   g      )	enumerater2   r@   view_as_realr   mul_addlerp_addcmul_mathsqrtadd_)r"   rG   rH   rI   rJ   rO   rP   r   r   r   r   r   rK   iparamr?   r=   r>   step_tr(   bias_correction1bias_correction2bias_corrected_exp_avgrho_infrho_trectexp_avg_sq_sqrtadaptive_lrs                               r%   re   re     s,   " f%5Qx1+ ^
QE"&&u-E%%d+D((1G++J7J 	!&!u},u},1%

1rL001xx\x: 	dAI&''d!e)'D ")+;!; q5y/A%!d(etm47GGG3;9919 aKGaK058:D )oo/O"1"5"5c":"1"6"6s";))$45GKJJ-2[@4GtJTJJ-2$J?k &r&   c                $   t        |       dk(  ry |rJ d       t        j                  | ||||g      }|j                         D ]  \  \  }}}}}}|d   j                  r.t        j                  |t        j                  dd      d       nt        j                  |d       |rt        ||||       dd|z
  z  dz
  }|D cg c]4  }|dt        |      z  |t        |      z  z  d|t        |      z  z
  z  z
  6 }}|dk7  r7|
rt        j                  |d||z  z
         nt        j                  |||      }t        j                  ||d|z
         t        j                  ||       t        j                  |||d|z
         ~|D cg c]/  }|d	kD  r&t        |d
z
  |dz
  z  |z  |d
z
  |dz
  z  |z  z        nd1 }}|D cg c]  }|dkD  rdnd }}|D cg c]  }d|t        |      z  z
   }}t        t!        ||      D cg c]  \  }}||z  |z  dz   c}}      }t!        ||      D cg c],  \  }}}t        d|t        |      z  z
        ||z  |z  z  dz  . }}}}t        j"                  |      }t        j                  ||	       t        j$                  ||       t        j&                  |       t        j                  ||       t        j                  |||        y c c}w c c}w c c}w c c}w c c}}w c c}}}w )Nr   z#_foreach ops don't support autogradr   cpu)deviceri   r   rl      rm   )r1   r   "_group_tensors_by_device_and_dtyper0   is_cpur2   _foreach_add_r4   r   r   _foreach_mul__foreach_add_foreach_lerp__foreach_addcmul_r
   r   zip_foreach_sqrt_foreach_div__foreach_reciprocal_)r"   rG   rH   rI   rJ   rO   rP   r   r   r   r   r   rK   grouped_tensorsgrouped_paramsgrouped_gradsgrouped_exp_avgsgrouped_exp_avg_sqsgrouped_state_stepsrf   r}   r(   
rho_t_listr~   r   unrectifiedrz   bcunrect_step_size*bias_correction2_sqrt_times_rect_step_sizebuffers                                  r%   rd   rd   X  s   " 6{aDDDBBFES[]hjuCvwO ##%	
 

 q!(( 3U\\#e5T\_` 3Q7.-9IK^_ q5y/A% CVWBU$ Jt$4 4DAQ8Q R5Jt$444!6 6BU 	 W 1%##NA\8I4IJ % 2 2=.Xd e 	-}a%iH/7 3]MSTW\S\]  $

 $ qy 19 aKGaK058:  $ 	 

 ;??$$D1Hq#-$?FYZFYdAD)9 99FYZ.VYZegwVx/yVx($PRdR20EVx/yz #&&94AQ"R6
"RdB 1u
4(8889R$Y^LrQ"R 	3 6

 $$%89FC(F$NO""6*F$45 	0@&I{ & W$

 @Z/y6
s$   9K1	4K6K;L L51L
)FNFF)rt   typingr   r   r2   r   	optimizerr   r   r	   r
   r   r   r   r   r   __all__r   __doc__rX   r5   r   re   rd    r&   r%   <module>r      s    !  
 
 
 G
uI up/^	 
 		 	_E` $)" 6L6<6 6l6 f	6
 f6 !6 d^6 6 6 6 6 	6  !6" 
#6rF@LF@<F@ 6lF@ f	F@
 fF@ F@ F@ 	F@ F@ 
F@ F@ !F@ F@RZJLZJ<ZJ 6lZJ f	ZJ
 fZJ ZJ ZJ 	ZJ ZJ 
ZJ !ZJ ZJ ZJr&   