
    Ph                     n    d dl mZmZmZmZmZ d dlmZ d dlm	Z	 d dl
mZ  G d dej                        Zy)    )ListUnionMappingDictAnyN)Tensor)ShardedTensorc                   z    e Zd Zdeeeeef   f   fdZdde	fdZ
ddZdeeef   fdZd	eeef   fd
ZdefdZy)ShardedOptimizernamed_paramsc                 r   g }|j                         D ]T  }t        |t              r1|j                         D ]  }|j	                  |j
                          D|j	                  |       V || _         ||g|i || _        | j                  j                  | _        | j                  j                  | _	        y)a  
        ShardedOptimizer collects all tensors and local shard tensors of
        ShardedTensor, then use these tensors as ``params`` for optimizers

        Args:
            named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
                of parameters, where key is the parameter key, value is either
                Tensor or ShardedTensor parameter.
            optimizer_class (torch.optim.Optimizer): the Optimizer to use
                locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
            *optimizer_args: the arguments to initialize the optimizer.
            **optimizer_kwargs: the key-word arguments to initialize the optimizer.

        N)
values
isinstancer	   local_shardsappendtensorr   _optimparam_groupsstate)selfr   optimizer_classoptimizer_argsoptimizer_kwargstensorsvaluelocal_shards           uC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/distributed/_shard/sharded_optim/api.py__init__zShardedOptimizer.__init__	   s    * !#!((*E%/#(#5#5#7KNN;#5#56 $8 u% + )%gSSBRS KK44[[&&
    set_to_nonec                 :    | j                   j                  |       y)a  Resets the gradients of all optimized :class:`torch.Tensor` s.

        Args:
            set_to_none (bool): instead of setting to zero, set the grads to None.
                This will in general have lower memory footprint, and can modestly improve performance.
                However, it changes certain behaviors. For example:
                1. When the user tries to access a gradient and perform manual ops on it,
                a None attribute or a Tensor full of 0s will behave differently.
                2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
                are guaranteed to be None for params that did not receive a gradient.
                3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
                (in one case it does the step with a gradient of 0 and in the other it skips
                the step altogether).
        N)r   	zero_grad)r   r    s     r   r"   zShardedOptimizer.zero_grad+   s     	k*r   Nc                 :    | j                   j                  |       y)af  Performs a single optimization step (parameter update).

        Args:
            closure (Callable): A closure that reevaluates the model and
                returns the loss. Optional for most optimizers.

        .. note::
            Unless otherwise specified, this function should not modify the
            ``.grad`` field of the parameters.
        N)r   step)r   closures     r   r$   zShardedOptimizer.step<   s     	!r   returnc                     t        d      )z
        Returned state and param_groups will contain parameter keys
        instead of parameter indices like torch.optim.Optimizer.
        This allows for advanced functionality like optimizer re-sharding to be implemented.
        z0ShardedOptimizer state_dict not implemented yet!NotImplementedError)r   s    r   
state_dictzShardedOptimizer.state_dictI   s     ""TUUr   r*   c                     t        d      )zLoads the ShardedOptimizer state.

        Args:
            state_dict (dict): ShardedOptimizer state. Should be an object returned
                from a call to :meth:`state_dict`.
        z5ShardedOptimizer load_state_dict not implemented yet!r(   )r   r*   s     r   load_state_dictz ShardedOptimizer.load_state_dictS   s     ""YZZr   param_groupc                     t        d      )zAdd a new param group
        z5ShardedOptimizer add_param_group not implemented yet!r(   )r   r-   s     r   add_param_groupz ShardedOptimizer.add_param_group]   s     ""YZZr   )T)N)__name__
__module____qualname__r   strr   r   r	   r   boolr"   r$   r   r   r*   r,   r/    r   r   r   r      sp     'c5)>#??@ 'D+T +""VDcN V['#s(*; [[3 [r   r   )typingr   r   r   r   r   torch.optimoptimtorchr   'torch.distributed._shard.sharded_tensorr	   	Optimizerr   r5   r   r   <module>r<      s(    2 2   AY[u Y[r   