
    Ph.                         d Z ddlZddlmZ ddlmZmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ  eej6                  d      rddlmZ ddlmZ ddl m!Z! y)aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
    N)optim   )_apply_optimizer_in_backward_get_in_backward_optimizers)_FunctionalAdadelta)_FunctionalAdagrad)_FunctionalAdam)_FunctionalAdamax)_FunctionalAdamW)_FunctionalRMSprop)_FunctionalRprop)_FunctionalSGD)_NamedOptimizer)as_functional_optim	_rpc_init)DistributedOptimizer)PostLocalSGDOptimizer)ZeroRedundancyOptimizer)"__doc__torchr   apply_optimizer_in_backwardr   r   functional_adadeltar   functional_adagradr   functional_adamr	   functional_adamaxr
   functional_adamwr   functional_rmspropr   functional_rpropr   functional_sgdr   named_optimizerr   utilsr   hasattr_C	optimizerr   post_localSGD_optimizerr   zero_redundancy_optimizerr        kC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/distributed/optim/__init__.py<module>r*      sP      5 2 , 0 . 2 . * , &
 588[!/ : >r(   