
    Ph3              
       \   d dl mZmZmZmZ d dlZd dlmc m	Z
 g dZ	 ddej                  j                  deej                     deeeef      fdZ	 ddej                  j                  deej                     deeeef      defd	Z	 dd
ee   dee   deeef   fdZdee   fdZd Zy)    )AnyDictListOptionalN)compilelist_mode_optionslist_optionscudagraph_mark_step_begingmexample_inputsoptionsc                 $    ddl m }  || ||      S )au  
    Compile a given FX graph with TorchInductor.  This allows compiling
    FX graphs captured without using TorchDynamo.

    Args:
        gm: The FX graph to compile.
        example_inputs:  List of tensor inputs.
        options:  Optional dict of config options.  See `torch._inductor.config`.

    Returns:
        Callable with same behavior as gm but faster.
       
compile_fxconfig_patchesr   )r   r   r   r   s       cC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\torch/_inductor/__init__.pyr   r   	   s    " 'b.AA    returnc                    ddl m} d}d}t        | j                  j                  t
        j                  j                  j                        r| j                  j                  }t
        j                  j                  j                         | j                  _        | j                          |j                  j                  )t        j                  |j                  j                        }|j                  j                  )t        j                  |j                  j                        }|||dni |||d} || ||      S )aG  
    Ahead-of-time compile a given FX graph with TorchInductor into a shared library.

    Args:
        gm: The FX graph to compile.
        example_inputs:  List of tensor inputs.
        options:  Optional dict of config options.  See `torch._inductor.config`.

    Returns:
        Path to the generated shared library
    r   )compile_fx_aot )zaot_inductor.serialized_in_specz aot_inductor.serialized_out_specr   )r   r   
isinstancegraph_codegentorchfx_PyTreeCodeGenCodeGen	recompilepytree_infoin_specpytreetreespec_dumpsout_spec)r   r   r   r   serialized_in_specserialized_out_speccodegens          r   aot_compiler*   !   s     + "((##UXX^^%B%BC((##!HHNN224
&&2!'!6!6w7J7J7R7R!S''3"("7"78K8K8T8T"U ? 0B0C	



/A0C
  
 r   modedynamicc                 0    i ddiddidddd}| r||    S |S )a  Returns a dictionary describing the optimizations that each of the available
    modes passed to `torch.compile()` performs.

    Args:
        mode (str, optional): The mode to return the optimizations for.
        If None, returns optimizations for all modes
        dynamic (bool, optional): Whether dynamic shape is enabled.

    Example::
        >>> torch._inductor.list_mode_options()
    triton.cudagraphsTmax_autotune)r/   r.   )defaultzreduce-overheadzmax-autotune-no-cudagraphszmax-autotune )r+   r,   mode_optionss      r   r   r   U   sG        

 D'
 !!%
0L" "&<7<7r   c                  `    ddl m}  | j                         }t        |j	                               S )zReturns a dictionary describing the optimizations and debug configurations
    that are available to `torch.compile()`.

    The options are documented in `torch._inductor.config`.

    Example::

        >>> torch._inductor.list_options()
    r   )config)torch._inductorr4   shallow_copy_dictlistkeys)r4   current_configs     r   r	   r	   x   s)     '%+%=%=%?N##%&&r   c                      ddl m}   |         y)zJIndicates that a new iteration of inference or training is about to begin.r   mark_step_beginN)cudagraph_treesr<   r;   s    r   r
   r
      s    0r   )N)NN)typingr   r   r   r   torch.fxr   torch.utils._pytreeutils_pytreer$   __all__r   GraphModuleTensorstrr   r*   boolr   r	   r
   r1   r   r   <module>rH      s    , ,  $ $
W )-BB&B d38n%B6 )-11&1 d38n%1 		1j ;? 8
3- 8)1$ 8	#s(^ 8F'd3i '$r   