
    Phz                         U d dl Z d dlZd dlZd dlmZmZ d dlZd dlmZm	Z	  G d d      Z
dede
defd	Zd
 Zeaeed<   e j                   d        Z G d d      Zy)    N)CallableOptional)KernelRegistrationHandlec                   0    e Zd ZdZdefdZdededefdZy)	AbstractImplHolderz4A holder where one can register an abstract impl to.qualnamec                 .    || _         d | _        d | _        y N)r	   kernellib)selfr	   s     gC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/_library/abstract_impl.py__init__zAbstractImplHolder.__init__   s    %(,48    funcsourcereturnc                      j                   0t        d j                   d j                   j                   d      t        j
                  j                   j                  d      rt        d j                   d      t        j
                  j                   j                  d      rt        d j                   d      t        ||       _          j                  C j                  j                  d      d	   }t        j                  j                  |d
       _        t         j                         } j                  j                   j                  |d        fd}t        |      S )zRegister an abstract impl.

        Returns a RegistrationHandle that one can use to de-register this
        abstract impl.
        z!impl_abstract(...): the operator z, already has an abstract impl registered at .Metaz already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call impl_abstract.CompositeImplicitAutograda-   already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an abstract impl; instead, the operator will decompose into its constituents and those can have abstract impls defined on them.z::r   FRAGMENTc                  n     j                   r! j                   j                          d  _         d  _        y r   )r   _destroyr   )r   s   r   deregister_abstract_implz=AbstractImplHolder.register.<locals>.deregister_abstract_impl@   s(    xx!!#DKr   )r   RuntimeErrorr	   r   torch_C%_dispatch_has_kernel_for_dispatch_keyr   r   splitlibraryLibraryconstruct_meta_kernelimplr   )r   r   r   nsmeta_kernelr   s   `     r   registerzAbstractImplHolder.register   sL    ;;"3DMM? C>;;%%&a) 
 8899$--P3DMM? C! "  8899MM6
 3DMM? C; <
 
 T6* 88$$T*1-B}},,R<DH+DMM4@dmm[&9	 "":;;r   N)	__name__
__module____qualname____doc__strr   r   r   r(    r   r   r   r   
   s,    >9 9
4<X 4<s 4<7I 4<r   r   r	   abstract_impl_holderr   c                      j                   J t        j                  j                   j                         fd       }|S )Nc                      j                   J j                   j                  fd}t        |      5   j                   | i |cd d d        S # 1 sw Y   y xY w)Nc                  (    t        d  d d      )Nz<Attempted to call get_ctx() for the meta implementation for z (implemented at z)You have presumably called get_ctx() because the operator has a data-dependent output shape; if so, there is no such meta implementation and this error is the correct behavior.)r   )r	   r   s   r   error_on_ctxz@construct_meta_kernel.<locals>.meta_kernel.<locals>.error_on_ctxS   s+    j 1& : r   )r   r   set_ctx_getter)argskwargsr3   r   r/   r	   s      @r   r'   z*construct_meta_kernel.<locals>.meta_kernelN   sW    #**666%,,33	 L).'..?? *))s   AA)r   	functoolswrapsr   )r	   r/   r'   s   `` r   r$   r$   I   sI      &&222__)00556@ 7@" r   c                       y r   r.   r.   r   r   get_noner:   c   s    r   global_ctx_getterc              #   8   K   t         }	 | a d  |a y # |a w xY wwr   )r;   )
ctx_getterprevs     r   r4   r4   j   s'      D!& Ds    c                   f    e Zd ZdZd Zddddej                  fdZddddej                  fd	Zy)
AbstractImplCtxzS
    Context object for writing abstract implementations for custom operators.
    c                      || _         || _        y r   )
_shape_env_op)r   rB   rC   s      r   r   zAbstractImplCtx.__init__z   s    $r      Nminmaxr   c                R    t        j                  d       | j                  ||      S )NzIcreate_unbacked_symint is deprecated, please use new_dynamic_size insteadrE   )warningswarnnew_dynamic_size)r   rF   rG   s      r   create_unbacked_symintz&AbstractImplCtx.create_unbacked_symint~   s)    W	
 $$#$66r   r   c                   | j                   | j                   j                  s3t        j                  j                  j                  | j                        t        |t        j                        st        |t        j                        rt        d| d| d      |dk  rt        d| d      | j                   j                         }t        j                  j                  j                  j                  |||       |S )a=	  Constructs a new symint (symbolic int) representing a data-dependent value.

        This is useful for writing the abstract implementation (which is necessary
        for torch.compile) for a CustomOp where an output Tensor has a size
        that depends on the data of the input Tensors.

        Args:
            min (int): A statically known inclusive lower bound for this symint. Default: 0
            max (Optional[int]): A statically known inclusive upper bound for this
                symint. Default: None

        .. warning:

            It is important that the ``min`` and ``max`` (if not None) values are set
            correctly, otherwise, there will be undefined behavior under
            torch.compile. The default value of ``min`` is 2 due to torch.compile
            specializing on 0/1 sizes.

            You must also verify that your implementation on concrete Tensors
            (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
            to the symint also has respects these constraint.
            The easiest way to do this is to add an assertion in the CPU/CUDA/etc
            implementation that the size follows these bounds.

        Example::

            >>> # An operator with data-dependent output shape
            >>> lib = torch.library.Library("mymodule", "FRAGMENT")
            >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
            >>>
            >>> @torch.library.impl_abstract("mymodule::custom_nonzero")
            >>> def custom_nonzero_abstract(x):
            >>>     # Number of nonzero-elements is data-dependent.
            >>>     # Since we cannot peek at the data in an abstract impl,
            >>>     # we use the ctx object to construct a new symint that
            >>>     # represents the data-dependent size.
            >>>     ctx = torch.library.get_ctx()
            >>>     nnz = ctx.new_dynamic_size()
            >>>     shape = [nnz, x.dim()]
            >>>     result = x.new_empty(shape, dtype=torch.int64)
            >>>     return result
            >>>
            >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
            >>> def custom_nonzero_cpu(x):
            >>>     x_np = x.numpy()
            >>>     res = np.stack(np.nonzero(x_np), axis=1)
            >>>     return torch.tensor(res, device=x.device)

        zctx.new_dynamic_size(min=z, max=zZ): expected min and max to be statically known ints but got SymInt. This is not supported.r   zc, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.rE   )rB   allow_dynamic_output_shape_opsr   _subclassesfake_tensorDynamicOutputShapeExceptionrC   
isinstanceSymInt
ValueErrorrL   fxexperimentalsymbolic_shapes_constrain_range_for_size)r   rF   rG   results       r   rK   z AbstractImplCtx.new_dynamic_size   s    f OO#??AA##//KKDHHUUc5<<(JsELL,I+C5se <) *  7+C5 1& '  779--GG 	H 	
 r   )	r)   r*   r+   r,   r   r   rS   rL   rK   r.   r   r   r@   r@   u   s<     -.4 7ELL 7 '(T Jell Jr   r@   )
contextlibr7   rI   typingr   r   r   torch._library.utilsr   r   r   r-   r$   r:   r;   __annotations__contextmanagerr4   r@   r.   r   r   <module>r_      sw       %  ;<< <<~);4 ' 8 & ! !Y Yr   