
    FPh+                         d dl Z d dlmZmZ d dlZd dlmZmZ d dlmZ  G d dej                        Z
 G d dej                        Z G d	 d
ej                        Zy)    N)TupleType)Tensornn)MLPBlockc                        e Zd ZdZej
                  dfdededededeej                     ded	d
f fdZ	de
de
de
d	ee
e
f   fdZ xZS )TwoWayTransformera  
    A Two-Way Transformer module that enables the simultaneous attention to both image and query points. This class
    serves as a specialized transformer decoder that attends to an input image using queries whose positional embedding
    is supplied. This is particularly useful for tasks like object detection, image segmentation, and point cloud
    processing.

    Attributes:
        depth (int): The number of layers in the transformer.
        embedding_dim (int): The channel dimension for the input embeddings.
        num_heads (int): The number of heads for multihead attention.
        mlp_dim (int): The internal channel dimension for the MLP block.
        layers (nn.ModuleList): The list of TwoWayAttentionBlock layers that make up the transformer.
        final_attn_token_to_image (Attention): The final attention layer applied from the queries to the image.
        norm_final_attn (nn.LayerNorm): The layer normalization applied to the final queries.
       depthembedding_dim	num_headsmlp_dim
activationattention_downsample_ratereturnNc                 `   t         |           || _        || _        || _        || _        t        j                         | _        t        |      D ]/  }| j                  j                  t        ||||||dk(               1 t        |||      | _        t        j                  |      | _        y)a  
        A transformer decoder that attends to an input image using queries whose positional embedding is supplied.

        Args:
          depth (int): number of layers in the transformer
          embedding_dim (int): the channel dimension for the input embeddings
          num_heads (int): the number of heads for multihead attention. Must
            divide embedding_dim
          mlp_dim (int): the channel dimension internal to the MLP block
          activation (nn.Module): the activation to use in the MLP block
        r   )r   r   r   r   r   skip_first_layer_pedownsample_rateN)super__init__r   r   r   r   r   
ModuleListlayersrangeappendTwoWayAttentionBlock	Attentionfinal_attn_token_to_image	LayerNormnorm_final_attn)	selfr   r   r   r   r   r   i	__class__s	           uC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\ultralytics/models/sam/modules/transformer.pyr   zTwoWayTransformer.__init__   s    ( 	
*"mmouAKK$"/'#).G)*a  *3=)]v)w&!||M:    image_embeddingimage_pepoint_embeddingc                 d   |j                   \  }}}}|j                  d      j                  ddd      }|j                  d      j                  ddd      }|}|}	| j                  D ]  }
 |
||	||      \  }}	 ||z   }|	|z   }| j	                  |||	      }||z   }| j                  |      }||	fS )a*  
        Args:
          image_embedding (torch.Tensor): image to attend to. Should be shape B x embedding_dim x h x w for any h and w.
          image_pe (torch.Tensor): the positional encoding to add to the image. Must have same shape as image_embedding.
          point_embedding (torch.Tensor): the embedding to add to the query points.
            Must have shape B x N_points x embedding_dim for any N_points.

        Returns:
          (torch.Tensor): the processed point_embedding
          (torch.Tensor): the processed image_embedding
        r
   r      )querieskeysquery_pekey_peqkv)shapeflattenpermuter   r   r    )r!   r&   r'   r(   bschwr+   r,   layerr0   r1   attn_outs                 r$   forwardzTwoWayTransformer.forwardF   s    $ &++Aq!)11!4<<Q1E##A&..q!Q7 " [[E!(	MGT ! o%8O11Ad1CH$&&w/}r%   )__name__
__module____qualname____doc__r   ReLUintr   Moduler   r   r   r<   __classcell__r#   s   @r$   r	   r	      s    , ')gg)*';'; '; 	';
 '; O'; $''; 
';R** *  	*
 
vv~	*r%   r	   c                        e Zd ZdZdej
                  ddfdedededeej                     d	ed
e	ddf fdZ
dededededeeef   f
dZ xZS )r   a  
    An attention block that performs both self-attention and cross-attention in two directions: queries to keys and
    keys to queries. This block consists of four main layers: (1) self-attention on sparse inputs, (2) cross-attention
    of sparse inputs to dense inputs, (3) an MLP block on sparse inputs, and (4) cross-attention of dense inputs to
    sparse inputs.

    Attributes:
        self_attn (Attention): The self-attention layer for the queries.
        norm1 (nn.LayerNorm): Layer normalization following the first attention block.
        cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys.
        norm2 (nn.LayerNorm): Layer normalization following the second attention block.
        mlp (MLPBlock): MLP block that transforms the query embeddings.
        norm3 (nn.LayerNorm): Layer normalization following the MLP block.
        norm4 (nn.LayerNorm): Layer normalization following the third attention block.
        cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries.
        skip_first_layer_pe (bool): Whether to skip the positional encoding in the first layer.
    i   r
   Fr   r   r   r   r   r   r   Nc                    t         |           t        ||      | _        t	        j
                  |      | _        t        |||      | _        t	        j
                  |      | _        t        |||      | _
        t	        j
                  |      | _        t	        j
                  |      | _        t        |||      | _        || _        y)a_  
        A transformer block with four layers: (1) self-attention of sparse inputs, (2) cross attention of sparse
        inputs to dense inputs, (3) mlp block on sparse inputs, and (4) cross attention of dense inputs to sparse
        inputs.

        Args:
          embedding_dim (int): the channel dimension of the embeddings
          num_heads (int): the number of heads in the attention layers
          mlp_dim (int): the hidden dimension of the mlp block
          activation (nn.Module): the activation of the mlp block
          skip_first_layer_pe (bool): skip the PE on the first layer
        r   N)r   r   r   	self_attnr   r   norm1cross_attn_token_to_imagenorm2r   mlpnorm3norm4cross_attn_image_to_tokenr   )r!   r   r   r   r   r   r   r#   s          r$   r   zTwoWayAttentionBlock.__init__   s    * 	"=)<\\-0
)2=)]v)w&\\-0
M7J?\\-0
\\-0
)2=)]v)w&#6 r%   r+   r,   r-   r.   c                    | j                   r| j                  |||      }n||z   }| j                  |||      }||z   }| j                  |      }||z   }||z   }| j                  |||      }||z   }| j	                  |      }| j                  |      }||z   }| j                  |      }||z   }||z   }| j                  |||      }||z   }| j                  |      }||fS )zaApply self-attention and cross-attention to queries and keys and return the processed embeddings.r/   )	r   rH   rI   rJ   rK   rL   rM   rO   rN   )	r!   r+   r,   r-   r.   r0   r;   r1   mlp_outs	            r$   r<   zTwoWayAttentionBlock.forward   s    ##nnw'WnEG("A~~Q'~:H(G**W% h6M11Ad1CH$**W% ((7#G#**W% h6M11Ag1Fhzz$}r%   )r=   r>   r?   r@   r   rA   rB   r   rC   boolr   r   r   r<   rD   rE   s   @r$   r   r   s   s    , &(gg)*$)"7"7 "7 	"7
 O"7 $'"7 ""7 
"7Hv V v v Z_`fhn`nZo r%   r   c            	            e Zd ZdZ	 ddedededdf fdZedededefd	       Zededefd
       Z	dedededefdZ
 xZS )r   zAn attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
    values.
    r   r   r   r   Nc                    t         |           || _        ||z  | _        || _        | j                  |z  dk(  sJ d       t        j                  || j                        | _        t        j                  || j                        | _        t        j                  || j                        | _	        t        j                  | j                  |      | _
        y)a  
        Initializes the Attention model with the given dimensions and settings.

        Args:
            embedding_dim (int): The dimensionality of the input embeddings.
            num_heads (int): The number of attention heads.
            downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1.

        Raises:
            AssertionError: If 'num_heads' does not evenly divide the internal dimension (embedding_dim / downsample_rate).
        r   z$num_heads must divide embedding_dim.N)r   r   r   internal_dimr   r   Linearq_projk_projv_projout_proj)r!   r   r   r   r#   s       r$   r   zAttention.__init__   s    " 	*)_<"  9,1Y3YY1iit/@/@Aiit/@/@Aiit/@/@A		$"3"3]Cr%   xc                 t    | j                   \  }}}| j                  |||||z        } | j                  dd      S )zGSeparate the input tensor into the specified number of attention heads.r*   r
   )r3   reshape	transpose)r[   r   bnr7   s        r$   _separate_headszAttention._separate_heads   s<     ''1aIIaIqI~6{{1a  r%   c                 t    | j                   \  }}}}| j                  dd      } | j                  ||||z        S )z=Recombine the separated attention heads into a single tensor.r*   r
   )r3   r^   r]   )r[   r_   n_headsn_tokens
c_per_heads        r$   _recombine_headszAttention._recombine_heads   s>     ,-77(7HjKK1yyHg
&:;;r%   r0   r1   r2   c                    | j                  |      }| j                  |      }| j                  |      }| j                  || j                        }| j                  || j                        }| j                  || j                        }|j
                  \  }}}}||j                  dddd      z  }|t        j                  |      z  }t        j                  |d      }||z  }| j                  |      }| j                  |      S )zKCompute the attention output given the input query, key, and value tensors.r   r*      r
   )dim)rW   rX   rY   ra   r   r3   r5   mathsqrttorchsoftmaxrf   rZ   )r!   r0   r1   r2   _re   attnouts           r$   r<   zAttention.forward   s     KKNKKNKKN   DNN3  DNN3  DNN3  gg1a199Q1a((dii
++}}Tr* Qh##C(}}S!!r%   )r*   )r=   r>   r?   r@   rB   r   staticmethodr   ra   rf   r<   rD   rE   s   @r$   r   r      s      !	DD D 	D
 
D8 !6 !c !f ! ! <F <v < <" "F "v "& "r%   r   )rk   typingr   r   rm   r   r   ultralytics.nn.modulesr   rC   r	   r   r    r%   r$   <module>rv      sN        +d		 dNV299 VrE"		 E"r%   