
    Ph%                     T   U d dl mZmZ d dlmZ d dlZd dlmZmZm	Z	m
Z
 d dlZddlmZmZmZmZ d dlmZ d dlmc mc mc mZ d dlmZ e
rd d	lmZ  G d
 de      Ze G d de             Z G d de      Zi Z ee!eeef   f   e"d<   d Z#defdZ$d Z%e G d de             Z&d Z'y)    )ABCabstractmethod)	dataclassN)CallableDictListTYPE_CHECKING   )check_tensorget_chunked_dim_sizeget_split_size(validate_non_overlapping_shards_metadata)ShardMetadata)_decorator_func)ShardedTensorc                       e Zd ZdZy)PlacementSpecz
    Base class representing the placement of an entity. Subclasses of this
    class can be used to specify customized placements which might not be
    covered by existing APIs.
    N)__name__
__module____qualname____doc__     uC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/distributed/_shard/sharding_spec/api.pyr   r      s    
 	r   r   c                   J    e Zd ZU dZej
                  j                  ed<   d Zy)DevicePlacementSpecz
    Associates placement of an entity with a single device.

    Args:
        device(:class:`torch.distributed._remote_device`): The device to place the entity on.
    devicec                     t        | j                  t        j                  j                        s/t        j                  j	                  | j                        | _        y y N)
isinstancer   torchdistributed_remote_device)selfs    r   __post_init__z!DevicePlacementSpec.__post_init__,   s>    $++u'8'8'G'GH++::4;;GDK Ir   N)	r   r   r   r   r!   r"   r#   __annotations__r%   r   r   r   r   r   !   s!     ,,,Hr   r   c                       e Zd ZdZedej                  dej                  dej                  fd       Z
eddej                  dedd	fd
       Zy)ShardingSpecz:
    Base class representing sharding specifications.
    tensor_sizestensor_propertiesreturnc                      y)a  
        Given a global tensor size, define how to shard a tensor like this shape
        across ranks, return ShardedTensorMetadata
        Args:
            tensor_sizes (:class:`torch.Size`):
                The tensor shape to shard on, a `torch.Size` object that represents the
                tensor shape to be sharded according to the ShardingSpec.
            tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties):
                Tensor properties used to create a ShardedTensor.
        Returns:
            A :class:`ShardedTensorMetadata` object that encodes the information about
            the layout of the ShardedTensor and its properties.
        Nr   r$   r)   r*   s      r   build_metadatazShardingSpec.build_metadata4       r   Ntensorsrc_rankr   c                      y)a  
        Given a global tensor on src_rank, shard this tensor
        across ranks within the process group, return a ShardedTensor.
        Args:
            tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
        Keyword args:
            src_rank (int, optional): The source rank which is used as the ground truth of
                the data for the parameter that would be sharded and scattered
                across the rest of the ranks.
                Default: 0.
            process_group (ProcessGroup, optional): The process group to work on. If None,
                the default process group will be used.
        Returns:
            A :class:`ShardedTensor` sharded from the given tensor.
        Nr   r$   r0   r1   process_groups       r   shardzShardingSpec.shardG   r/   r   r   N)r   r   r   r   r   r!   Sizesharded_tensor_metaTensorPropertiesShardedTensorMetadatar.   Tensorintr5   r   r   r   r(   r(   0   ss     %*ZZ*=*N*N 0EE $ ELL C Tc  r   r(   _CUSTOM_SHARDING_SPEC_OPSc                 V    t        |       j                  }|t        v xr |t        |   v S )zQ
    Returns whether or not the ShardingSpec has a custom op implementation.
    )typer   r=   )sharding_specop
class_names      r   _has_custom_oprC   \   s1     m$11J22br=VWa=b7bbr   rA   c                     t        |       j                  }t        | |      st        d| d|       t        |   |   } |||||      S )zA
    Calls the custom op for this ShardingSpec if it exists.
    zCustom op: z not registered for )r?   r   rC   RuntimeErrorr=   )r@   rA   typesargskwargsr4   rB   funcs           r   _dispatch_custom_oprJ   c   sW     m$11J-,[,@MNN$Z04DtV]33r   c                     | j                   }|t        vr	i t        |<   t        j                  t        |t        |         S )z
    Decorator to allow custom registration of ops.
    Args:
        sharding_spec_class(type): The ShardingSpec for which we need to add this custom op.
        func(Callable): The op to override (ex: torch.bmm)
    )rA   op_table)r   r=   	functoolspartialr   )sharding_spec_classrI   rB   s      r   custom_sharding_spec_oprP   m   sD     %11J2202!*-*:6 r   c                       e Zd ZU dZee   ed<   d Zdej                  de
j                  de
j                  fdZdd	ej                  d
eddfdZy)EnumerableShardingSpeca@  
    This is a type of PlacementSpec that allows users to specify a generic
    sharding scheme by enumerating exactly how each shard is laid out.

    Args:
        shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
            each shard. Note that none of the shards should overlap.
    shardsc           	      `   t        | j                        dk(  rt        d| j                         d}| j                  D ]X  }|dk7  r<|t        |j                        k7  r$t        d| dt        |j                               t        |j                        }Z t	        | j                         y )Nr   zEmpty shard list provided: z%Found inconsistent ranks for shards: z and )lenrS   
ValueErrorshard_offsetsr   )r$   rankr5   s      r   r%   z$EnumerableShardingSpec.__post_init__   s    t{{q :4;;-HII [[Erzdc%*=*=&>> #HeTWX]XkXkTlSm!noou**+D !
 	1=r   r)   r*   r+   c                 p    t        | j                  |       t        j                  | j                  ||      S r   )r   rS   r8   r:   r-   s      r   r.   z%EnumerableShardingSpec.build_metadata   s2    
 	T[[,/"88KK
 	
r   Nr0   r1   r   c                     t        d      )Nz1EnumerableShardingSpec.shard not implemented yet!)NotImplementedErrorr3   s       r   r5   zEnumerableShardingSpec.shard   s    !"UVVr   r6   )r   r   r   r   r   r   r&   r%   r!   r7   r8   r9   r:   r.   r;   r<   r5   r   r   r   rR   rR   ~   sk     >

%*ZZ

*=*N*N

 0EE

WELL WC WTc Wr   rR   c           
      8   g }d}g }g }| D ]  }|j                  |j                         |j                  }|j                  t        |             |j                  |j                         t        |      D cg c]  \  }}|dk7  s| }	}}t        |	      dk(  rt        |	      dk7  rd} n|s|	d   }||	d   k7  sd} n |t        t        ||      d       D 
cg c]  \  }
}|	 }}
}ddl	m
}  |||      }t        |D cg c]  }||   	 c}      }t        |      }t        |      }t        ||      }t        t        t        |            D cg c]  }t        |||       c}      }||k(  r|S t        |       S c c}}w c c}}
w c c}w c c}w )a  
    Infer the sharding spec from the metadata of each shard of a ShardedTensor.
    If the tensor is sharded only on one dimension, we can then verify whether it's
    a ChunkShardingSpec or not. The way to verify it is to first get the total length
    and perform a chunk sharding with the given placements to see if we can have the
    same chunk size as the given shards_metadata. If not, we assume it's enum sharded.

    Args:
        shards_metadata (List[ShardMetadata]): List of Metadata of local shards.

    Returns:
        A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding
            spec for one sharded tensor.
    Nr   r
   c                     | d   S )Nr   r   )es    r   <lambda>z;_infer_sharding_spec_from_shards_metadata.<locals>.<lambda>   s    STUVSWr   )key)ChunkShardingSpec)dim
placements)append	placementrX   sumshard_sizes	enumeraterV   sortedzipchunk_sharding_specrb   r   ranger   rR   )shards_metadatard   chunk_sharding_dimchunk_offset_listshard_size_listshard_metadatalocal_offsetsidxr_   
shard_dims_xrb   
chunk_specrh   shard_total_lengthchunks
split_sizechunk_shard_sizess                      r   )_infer_sharding_spec_from_shards_metadatar}      s    JO).223&44  ]!34~99:(1-(@K(@fc1AFc(@
K z?a z?a!% "!+A:a=0!%+ *. % !%6
!CX
X$!QAX 	 
 	;&"!

 _M_a 23_MN -Z#$6?
" !Z11C %%7SI1
 ++!/22S L(
 N
s   8FF!F	FF)(abcr   r   dataclassesr   rM   typingr   r   r   r	   r!   
_internalsr   r   r   r   !torch.distributed._shard.metadatar   0torch.distributed._shard.sharded_tensor.metadatar"   _shardsharded_tensormetadatar8   *torch.distributed._shard.op_registry_utilsr   'torch.distributed._shard.sharded_tensorr   r   r   r(   r=   strr&   rC   rJ   rP   rR   r}   r   r   r   <module>r      s    # # !  6 6   < N N F F	C 	 H- H H'3 'T BD 4T(H*<%= => Cc48 4" 'W\ 'W 'WTB3r   