
    FPh                        d Z ddlZddlZddlmZ ddlmZ ddlZ	ddl
Z
ddlmZmZmZmZ  e	j                   g d      dz  Zd%dZd&d	Zd'd
Zd&dZd&dZd(dZ G d d      Zd)dZ e        ed      ddfd       Z e        ed      ddddfd       Zd Zdd e       dddfdZ G d de      Z G d de      Z G d d e      Z  G d! d"e       Z! G d# d$e      Z"y)*zModel validation metrics.    N)Path)LOGGERSimpleClass	TryExceptplt_settings)gp=
ף?      ?r   ffffff?r	   HzG?r
   
ףp=
?r   ףp=
?r   Q?r   ףp=
?r   {Gz?r   g      $@Fc                    | j                   \  }}}}|j                   \  }}	}
}t        j                  |dddf   |
      t        j                  |dddf   |      z
  j	                  d      t        j                  |dddf   |      t        j                  |dddf   |	      z
  j	                  d      z  }|
|z
  ||	z
  z  }|r||z
  ||z
  z  }||dddf   z   |z
  }|||z   z  S )a>  
    Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format.

    Args:
        box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes.
        box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes.
        iou (bool): Calculate the standard iou if True else return inter_area/box2_area.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.

    Returns:
        (np.array): A numpy array of shape (n, m) representing the intersection over box2 area.
    Nr   )Tnpminimummaximumclip)box1box2iouepsb1_x1b1_y1b1_x2b1_y2b2_x1b2_y1b2_x2b2_y2
inter_areaarea	box1_areas                  dC:\Users\daisl\Desktop\realtime-object-detection\venv\Lib\site-packages\ultralytics/utils/metrics.pybbox_ioar&      s     "&E5%!%E5% **U1d7^U3bjjq$wQV6WW]]^_`**U1d7^U3bjjq$wQV6WW]]^_`aJ EMeem,D
U]uu}5	i4((:5 $$    c                    | j                  d      j                  dd      |j                  d      j                  dd      c\  }}\  }}t        j                  ||      t        j                  ||      z
  j                  d      j                  d      }|||z
  j                  d      ||z
  j                  d      z   |z
  |z   z  S )a_  
    Calculate intersection-over-union (IoU) of boxes.
    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
    Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py

    Args:
        box1 (torch.Tensor): A tensor of shape (N, 4) representing N bounding boxes.
        box2 (torch.Tensor): A tensor of shape (M, 4) representing M bounding boxes.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.

    Returns:
        (torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2.
          r   )	unsqueezechunktorchminmaxclamp_prod)r   r   r   a1a2b1b2inters           r%   box_iour7   1   s      *00A6q8I8O8OPQST8UHRhr2YYr22r!22::1=BB1EE R"WNN1%bq(99EACGHHr'   c                    |ru| j                  dd      |j                  dd      c\  }}}	}
\  }}}}|	dz  |
dz  |dz  |dz  f\  }}}}||z
  ||z   ||z
  ||z   f\  }}}}||z
  ||z   ||z
  ||z   f\  }}}}nH| j                  dd      \  }}}}|j                  dd      \  }}}}||z
  ||z
  |z   }
}	||z
  ||z
  |z   }}|j                  |      |j                  |      z
  j                  d      |j                  |      |j                  |      z
  j                  d      z  }|	|
z  ||z  z   |z
  |z   }||z  }|s|s|r%|j                  |      |j                  |      z
  }|j                  |      |j                  |      z
  }|s|r|dz  |dz  z   |z   } ||z   |z
  |z
  dz  ||z   |z
  |z
  dz  z   dz  }!|rdt        j
                  dz  z  t        j                  ||z        t        j                  |	|
z        z
  j                  d      z  }"t        j                         5  |"|"|z
  d|z   z   z  }#ddd       ||!| z  |"#z  z   z
  S ||!| z  z
  S ||z  |z   }$||$|z
  |$z  z
  S |S # 1 sw Y   4xY w)a  
    Calculate Intersection over Union (IoU) of box1(1, 4) to box2(n, 4).

    Args:
        box1 (torch.Tensor): A tensor representing a single bounding box with shape (1, 4).
        box2 (torch.Tensor): A tensor representing n bounding boxes with shape (n, 4).
        xywh (bool, optional): If True, input boxes are in (x, y, w, h) format. If False, input boxes are in
                               (x1, y1, x2, y2) format. Defaults to True.
        GIoU (bool, optional): If True, calculate Generalized IoU. Defaults to False.
        DIoU (bool, optional): If True, calculate Distance IoU. Defaults to False.
        CIoU (bool, optional): If True, calculate Complete IoU. Defaults to False.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.

    Returns:
        (torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags.
       r*   r   r)   N)
r,   r   r   r0   mathpir-   atanpowno_grad)%r   r   xywhGIoUDIoUCIoUr   x1y1w1h1x2y2w2h2w1_h1_w2_h2_r   r   r   r   r   r    r   r!   r6   unionr   cwchc2rho2valphac_areas%                                        r%   bbox_iourX   H   s   & -1ZZ2->

1b@Q*RR*2r2r!VR!VR!VR!V;S#s%'#XrCxc28%K"ueU%'#XrCxc28%K"ueU%)ZZ2%6"ueU%)ZZ2%6"ueU 3B 3B ]]5!EMM%$88@@C]]5!EMM%$88@@CDE Gb2g%+E %-Ctt]]5!EMM%$88]]5!EMM%$884q27"S(BU]U*U2q8EEME<QTY<Y^_;__cddDA%%**R"W*=

2PR7@S*S)X)XYZ)[[]]_SAG!45E %dRi!e)344?"b3fun...J %_s   'I))I2c                     t        j                  | |j                        j                  d      }| j	                  d      dddf   |j	                  d      d   z   |z
  }|||z   z  S )a1  
    Calculate masks IoU.

    Args:
        mask1 (torch.Tensor): A tensor of shape (N, n) where N is the number of ground truth objects and n is the
                        product of image width and height.
        mask2 (torch.Tensor): A tensor of shape (M, n) where M is the number of predicted objects and n is the
                        product of image width and height.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.

    Returns:
        (torch.Tensor): A tensor of shape (N, M) representing masks IoU.
    r   r)   N)r-   matmulr   r0   sum)mask1mask2r   intersectionrP   s        r%   mask_iour_      s_     <<uww/66q9LYYq\!T'"UYYq\$%77<GE53;''r'   c                    | ddddddf   |d   z
  dz  | ddddddf   |d   z
  dz  z   }t        j                  || j                  | j                        }| d   dk7  }|d|z  dz  z  |ddddf   |z   z  dz  }t        j                  |       |dddf   z  j                  d	      |j                  d	      dddf   |z   z  S )
ab  
    Calculate Object Keypoint Similarity (OKS).

    Args:
        kpt1 (torch.Tensor): A tensor of shape (N, 17, 3) representing ground truth keypoints.
        kpt2 (torch.Tensor): A tensor of shape (M, 17, 3) representing predicted keypoints.
        area (torch.Tensor): A tensor of shape (N,) representing areas from ground truth.
        sigma (list): A list containing 17 values representing keypoint scales.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.

    Returns:
        (torch.Tensor): A tensor of shape (N, M) representing keypoint similarities.
    Nr   ).r   r*   r)   ).r)   )devicedtype).r*   r:   )r-   tensorra   rb   expr[   )kpt1kpt2r#   sigmar   dkpt_maskes           r%   kpt_iourk      s     
aq!m	tF|	+1T!T1a-5H4PV<5W\]4]]ALLt{{$**EEF|q H	QY1Qd] 3c 9:Q>AIIqbMHQW--2226(,,r:J1d7:SVY:YZZr'   c                     dd| z  z
  d| z  fS )a  
    Computes smoothed positive and negative Binary Cross-Entropy targets.

    This function calculates positive and negative label smoothing BCE targets based on a given epsilon value.
    For implementation details, refer to https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441.

    Args:
        eps (float, optional): The epsilon value for label smoothing. Defaults to 0.1.

    Returns:
        (tuple): A tuple containing the positive and negative label smoothing BCE targets.
          ?g      ? )r   s    r%   
smooth_BCEro      s     s?C#I%%r'   c                   h    e Zd ZdZddZd Zd Zd Zd Z e	d       e
       dd	              Zd
 Zy)ConfusionMatrixa  
    A class for calculating and updating a confusion matrix for object detection and classification tasks.

    Attributes:
        task (str): The type of task, either 'detect' or 'classify'.
        matrix (np.array): The confusion matrix, with dimensions depending on the task.
        nc (int): The number of classes.
        conf (float): The confidence threshold for detections.
        iou_thres (float): The Intersection over Union threshold.
    c                     || _         | j                   dk(  rt        j                  |dz   |dz   f      nt        j                  ||f      | _        || _        |dv rdn|| _        || _        y)z)Initialize attributes for the YOLO model.detectr)   )NgMbP?r   N)taskr   zerosmatrixncconf	iou_thres)selfrw   rx   ry   rt   s        r%   __init__zConfusionMatrix.__init__   sa    	48II4IbhhQQ/0rxxY[]_X`Oa M1Dt	"r'   c                 2   t        j                  |      dddf   t        j                  |      }}t        |j                         j	                         |j                         j	                               D ]  \  }}| j
                  |   |xx   dz  cc<   ! y)z
        Update confusion matrix for classification task.

        Args:
            preds (Array[N, min(nc,5)]): Predicted class labels.
            targets (Array[N, 1]): Ground truth class labels.
        Nr   r)   )r-   catzipcpunumpyrv   )rz   predstargetspts        r%   process_cls_predsz!ConfusionMatrix.process_cls_preds   st     5)!Q$/71Cw		))+W[[]-@-@-BCDAqKKN1" Dr'   c                    |;|j                         }|D ]%  }| j                  | j                  |fxx   dz  cc<   ' y||dddf   | j                  kD     }|dddf   j                         }|dddf   j                         }t	        |ddddf   |ddddf         }t        j                  || j                  kD        }|d   j                  d   rt        j                  t        j                  |d      ||d   |d   f   dddf   fd      j                         j                         }|d   j                  d   dkD  r||dddf   j                         ddd      }|t        j                  |dddf   d	      d      }||dddf   j                         ddd      }|t        j                  |dddf   d	      d      }nt        j                   d
      }|j                  d   dkD  }	|j#                         j%                  t               \  }
}}t'        |      D ]]  \  }}|
|k(  }|	r.t)        |      dk(  r | j                  |||      |fxx   dz  cc<   ;| j                  | j                  |fxx   dz  cc<   _ |	rFt'        |      D ]7  \  }}t+        ||k(        r| j                  || j                  fxx   dz  cc<   9 yy)a  
        Update confusion matrix for object detection task.

        Args:
            detections (Array[N, 6]): Detected bounding boxes and their associated information.
                                      Each row should contain (x1, y1, x2, y2, conf, class).
            labels (Array[M, 5]): Ground truth bounding boxes and their associated class labels.
                                  Each row should contain (class, x1, y1, x2, y2).
        Nr)   r9   r      r*   r:   T)return_index)r      )intrv   rw   rx   r7   r-   wherery   shaper}   stackr   r   argsortr   uniqueru   	transposeastype	enumerater[   any)rz   
detectionslabels
gt_classesgcdetection_classesr   xmatchesnm0m1_ijdcs                   r%   process_batchzConfusionMatrix.process_batch   s    J DGGRK(A-( !
1a4 0499 <=
AqD\%%'
&q!t,002fQUmZ2A2%67KKdnn,-Q4::a=iiQ!2C!ad
OAtG4L MqQUUW]]_Gtzz!}q !'!Q$-"7"7"9$B$"?@!"))GAqDM"Ma"PQ!'!Q$-"7"7"9$B$"?@!"))GAqDM"Ma"PQhhv&GMM!q %%'..s3	Bz*EAraASVq[-be4b89Q>9DGGRK(A-( + "#45227|KKDGG,1, 6 r'   c                     | j                   S )zReturns the confusion matrix.)rv   rz   s    r%   rv   zConfusionMatrix.matrix  s    {{r'   c                     | j                   j                         }| j                   j                  d      |z
  }| j                  dk(  r
|dd |dd fS ||fS )z+Returns true positives and false positives.r)   rs   Nr:   )rv   diagonalr[   rt   )rz   tpfps      r%   tp_fpzConfusionMatrix.tp_fp  sV    [[!!#[[__Q"$%)YY(%:3BCR!HRHr'   u+   WARNING ⚠️ ConfusionMatrix plot failureNc                    ddl }| j                  |r.| j                  j                  d      j                  dd      dz   ndz  }t        j
                  ||dk  <   t        j                  dddd	      \  }}| j                  t        |      }
}	|j                  |	d
k  rdnd       d|
cxk  xr dk  nc xr |
|	k(  }|rt        |      dgz   nd}t        j                         5  t        j                  d       |j                  |||	dk  ddid|rdnddd||
      j!                  d       ddd       dd|z  z   }|j#                  d       |j%                  d       |j'                  |       t)        |      |j+                         j-                  dd        d!z  }|j/                  |d"#       t        j0                  |       |r	 ||       yy# 1 sw Y   xY w)$a  
        Plot the confusion matrix using seaborn and save it to a file.

        Args:
            normalize (bool): Whether to normalize the confusion matrix.
            save_dir (str): Directory where the plot will be saved.
            names (tuple): Names of classes, used as labels on the plot.
            on_plot (func): An optional callback to pass plots path and data when they are rendered.
        r   Nr)   r:   g&.>g{Gzt?)   	   Tfigsizetight_layout2   rm   g?)
font_scalec   
backgroundautoignore   size   Blues.2fz.0f        )	axannot	annot_kwscmapfmtsquarevminxticklabelsyticklabels)r)   r)   r)   zConfusion Matrixz NormalizedTrue	Predicted r   z.png   dpi)seabornrv   r[   reshaper   nanpltsubplotsrw   lensetlistwarningscatch_warningssimplefilterheatmapset_facecolor
set_xlabel
set_ylabel	set_titler   lowerreplacesavefigclose)rz   	normalizesave_dirnameson_plotsnarrayfigr   rw   nnr   
ticklabelstitle
plot_fnames                  r%   plotzConfusionMatrix.plot  s    	Y 2 : :1b AD H\]^!vveem,,q!W4HR#e*B
b#c2b+2+-B"H7=d5k\N26
$$&!!(+JJu"W!1"&#$-55"#-#-  
/ 0=}Y/G ' #]Y%>>
f
k"
U(^)>)>sC)H(I&NN
JC(		#J ) '&s   $A	GGc           
          t        | j                  dz         D ]A  }t        j                  dj	                  t        t        | j                  |                      C y)z*Print the confusion matrix to the console.r)   r   N)rangerw   r   infojoinmapstrrv   rz   r   s     r%   printzConfusionMatrix.print?  s=    tww{#AKKS$++a.!9:; $r'   )r   g?rs   )T rn   N)__name__
__module____qualname____doc__r{   r   r   rv   r   r   r   r   r   rn   r'   r%   rq   rq      sL    	#
#,2\I <=^)   >) V<r'   rq   c                    t        t        |       |z  dz        dz  dz   }t        j                  |dz        }t        j                  || d   z  | || d   z  fd      }t        j
                  |t        j                  |      |z  d      S )zBox filter of fraction f.r*   r)   r   r:   valid)mode)roundr   r   onesconcatenateconvolve)yfnfr   yps        r%   smoothr  E  s{    	s1vzA~	!	#a	'B
aA	QqT1a!B%i0!	4B;;r2772;+'::r'   zpr_curve.pngrn   c                    t        j                  dddd      \  }}t        j                  |d      }dt	        |      cxk  rdk  rDn nAt        |j                        D ](  \  }}	|j                  | |	d||    d||df   d	
       * n|j                  | |dd       |j                  | |j                  d      ddd|dddf   j                         z         |j                  d       |j                  d       |j                  dd       |j                  dd       |j                  dd       |j                  d       |j                  |d       t        j                   |       |r	 ||       yy)zPlots a precision-recall curve.r)   r      Tr   )axisr      r   .3f	linewidthlabelgreyr  colorr   bluezall classes %.3f mAP@0.5Nr  r  r  Recall	Precisiongp=
ף?r)   
upper leftbbox_to_anchorloczPrecision-Recall Curver   r   )r   r   r   r   r   r   r   r   meanr   r   set_xlimset_ylimlegendr   r   r   )
pxpyapr   r   r   r   r   r   r   s
             r%   plot_pr_curver   M  sL    ll1adCGC	"1	B3u:bddODAqGGBQq
!Bq!tHS>.JGK $ 	B!62GGB
av=WZ\]^`a]aZbZgZgZi=iGjMM(MM+KK1KK1IIYLI9LL)*KKcK"IIcN r'   zmc_curve.png
ConfidenceMetricc                    t        j                  dddd      \  }}dt        |      cxk  rdk  r0n n-t        |      D ]  \  }	}
|j	                  | |
d||	             n|j	                  | |j
                  dd	       t        |j                  d      d
      }
|j	                  | |
ddd|
j                         dd| |
j                            d       |j                  |       |j                  |       |j                  dd       |j                  dd       |j                  dd       |j                  | d       |j!                  |d       t        j"                  |       |r	 ||       yy)z Plots a metric-confidence curve.r)   r  Tr   r   r  r
  r  r  皙?r   r  zall classes r   z at r	  r  r  r  r  z-Confidence Curver   r   N)r   r   r   r   r   r   r  r  r/   argmaxr   r   r  r  r  r   r   r   )r  r  r   r   xlabelylabelr   r   r   r   r   s              r%   plot_mc_curver(  f  sI    ll1adCGC3u:bMDAqGGBQq
G< " 	BDDAV4rwwqz4 AGGBQfl15573-tTVWXW_W_WaTbcfSg4hGiMM&MM&KK1KK1IIYLI9LLF8,-.KKcK"IIcN r'   c                 (   t        j                  dg| dgf      }t        j                  dg|dgf      }t        j                  t         j                  j	                  t        j                  |                  }d}|dk(  rCt        j
                  ddd      }t        j                  t        j                  |||      |      }nKt        j                  |dd |dd k7        d   }t        j                  ||dz      ||   z
  ||dz      z        }|||fS )	ay  
    Compute the average precision (AP) given the recall and precision curves.

    Args:
        recall (list): The recall curve.
        precision (list): The precision curve.

    Returns:
        (float): Average precision.
        (np.ndarray): Precision envelope curve.
        (np.ndarray): Modified recall curve with sentinel values added at the beginning and end.
    r   rm   interpr   r)   e   Nr:   )
r   r   flipr   
accumulatelinspacetrapzr*  r   r[   )recall	precisionmrecmpremethodr   r  r   s           r%   
compute_apr5    s     >>C5&3%01D>>C5)cU34D 772::((78D FKK1c"XXbii4.2HHT!"Xcr*+A.VVT!a%[47*d1q5k9:tT>r'   gؗҜ<r   c
                    t        j                  |       }
| |
   ||
   ||
   }}} t        j                  |d      \  }}|j                  d   }t        j                  ddd      g }}t        j
                  || j                  d   f      t        j
                  |df      t        j
                  |df      }}}t        |      D ]%  \  }}||k(  }
||   }|
j                         }|dk(  s|dk(  r,d| |
   z
  j                  d      }| |
   j                  d      }|||z   z  }t        j                  | ||
    |dddf   d      ||<   |||z   z  }t        j                  | ||
    |dddf   d      ||<   t        | j                  d         D ]T  }t        |dd|f   |dd|f         \  |||f<   }}|s)|dk(  s/|j                  t        j                  |||             V ( t        j                  |      }d|z  |z  ||z   |z   z  }|j                         D  cg c]  \  }} ||v s|  }}} t        t        |            }|r\t!        |||||	 d	z  ||
       t#        ||||	 dz  |d|       t#        ||||	 dz  |d|       t#        ||||	 dz  |d|       t%        |j'                  d      d      j)                         }
|dd|
f   |dd|
f   |dd|
f   }#}"}!|"|z  j+                         } | |!|z   z  | z
  j+                         }$| |$|!|"|#||j-                  t.              |||||fS c c} }w )a  
    Computes the average precision per class for object detection evaluation.

    Args:
        tp (np.ndarray): Binary array indicating whether the detection is correct (True) or not (False).
        conf (np.ndarray): Array of confidence scores of the detections.
        pred_cls (np.ndarray): Array of predicted classes of the detections.
        target_cls (np.ndarray): Array of true classes of the detections.
        plot (bool, optional): Whether to plot PR curves or not. Defaults to False.
        on_plot (func, optional): A callback to pass plots path and data when they are rendered. Defaults to None.
        save_dir (Path, optional): Directory to save the PR curves. Defaults to an empty path.
        names (tuple, optional): Tuple of class names to plot PR curves. Defaults to an empty tuple.
        eps (float, optional): A small value to avoid division by zero. Defaults to 1e-16.
        prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string.

    Returns:
        (tuple): A tuple of six arrays and one array of unique classes, where:
            tp (np.ndarray): True positive counts at threshold given by max F1 metric for each class.Shape: (nc,).
            fp (np.ndarray): False positive counts at threshold given by max F1 metric for each class. Shape: (nc,).
            p (np.ndarray): Precision values at threshold given by max F1 metric for each class. Shape: (nc,).
            r (np.ndarray): Recall values at threshold given by max F1 metric for each class. Shape: (nc,).
            f1 (np.ndarray): F1-score values at threshold given by max F1 metric for each class. Shape: (nc,).
            ap (np.ndarray): Average precision for each class at different IoU thresholds. Shape: (nc, 10).
            unique_classes (np.ndarray): An array of unique classes that have data. Shape: (nc,).
            p_curve (np.ndarray): Precision curves for each class. Shape: (nc, 1000).
            r_curve (np.ndarray): Recall curves for each class. Shape: (nc, 1000).
            f1_curve (np.ndarray): F1-score curves for each class. Shape: (nc, 1000).
            x (np.ndarray): X-axis values for the curves. Shape: (1000,).
            prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000).
    T)return_countsr   r)   i  N)leftr*   zPR_curve.png)r   zF1_curve.pngF1)r'  r   zP_curve.pngr  zR_curve.pngr  皙?)r   r   r   r   r.  ru   r   r[   cumsumr*  r   r5  appendr   itemsdictr   r(  r  r  r%  r   r   r   )%r   rx   pred_cls
target_clsr   r   r   r   r   prefixr   unique_classesntrw   r   prec_valuesr  p_curver_curvecicn_ln_pfpctpcr0  r1  r   r3  r2  f1_curvekrU   r   rf1r   s%                                        r%   ap_per_classrQ    sw   T 	

D5AAQ!hB :TBNB			a	 B [[At,b{A 88R!$56"d8LbhhXZ\`WaNbB>*AMfeeg!8sax 2a5y  #ell1o c	"iiT!WHfQTlC 39%	iiT!WHi1oAF rxx{#A$.vad|Yq!t_$M!Br1uItTQ""299Qd#;< $) +2 ((;'K 7{W$'(9C(?@H ;;=@=41aA,?Q=E@5!"Eab(x|5L*Le]dea8.E#EuUYcjka(x{-C"CUS^hopa(x{-C"CUS[elmx}}Q%,,.Aq!t}gadmXad^"qA
b&	B
C.2
	$	$	&Br1aR!6!6s!;WgxYZ\ggg As   'L>4L>c                       e Zd ZdZddZed        Zed        Zed        Zed        Z	ed        Z
ed	        Zed
        Zd Zd Zed        Zd Zd Zed        Zed        Zy)r"  a  
    Class for computing evaluation metrics for YOLOv8 model.

    Attributes:
        p (list): Precision for each class. Shape: (nc,).
        r (list): Recall for each class. Shape: (nc,).
        f1 (list): F1 score for each class. Shape: (nc,).
        all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10).
        ap_class_index (list): Index of class for each AP score. Shape: (nc,).
        nc (int): Number of classes.

    Methods:
        ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or [].
        ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or [].
        mp(): Mean precision of all classes. Returns: Float.
        mr(): Mean recall of all classes. Returns: Float.
        map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float.
        map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float.
        map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float.
        mean_results(): Mean of results, returns mp, mr, map50, map.
        class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i].
        maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,).
        fitness(): Model fitness as a weighted combination of metrics. Returns: Float.
        update(results): Update metric attributes with new evaluation results.
    Nc                 X    g | _         g | _        g | _        g | _        g | _        d| _        y)zTInitializes a Metric instance for computing evaluation metrics for the YOLOv8 model.r   N)r   rO  rP  all_apap_class_indexrw   r   s    r%   r{   zMetric.__init__  s-     r'   c                 V    t        | j                        r| j                  dddf   S g S )z
        Returns the Average Precision (AP) at an IoU threshold of 0.5 for all classes.

        Returns:
            (np.ndarray, list): Array of shape (nc,) with AP50 values per class, or an empty list if not available.
        Nr   )r   rT  r   s    r%   ap50zMetric.ap50&  s'     %($4t{{1a4 <"<r'   c                 f    t        | j                        r| j                  j                  d      S g S )z
        Returns the Average Precision (AP) at an IoU threshold of 0.5-0.95 for all classes.

        Returns:
            (np.ndarray, list): Array of shape (nc,) with AP50-95 values per class, or an empty list if not available.
        r)   r   rT  r  r   s    r%   r  z	Metric.ap0  s)     '*$++&6t{{">B>r'   c                 d    t        | j                        r| j                  j                         S dS )z
        Returns the Mean Precision of all classes.

        Returns:
            (float): The mean precision of all classes.
        r   )r   r   r  r   s    r%   mpz	Metric.mp:  #     !$DFFtvv{{}44r'   c                 d    t        | j                        r| j                  j                         S dS )z
        Returns the Mean Recall of all classes.

        Returns:
            (float): The mean recall of all classes.
        r   )r   rO  r  r   s    r%   mrz	Metric.mrD  r\  r'   c                 r    t        | j                        r!| j                  dddf   j                         S dS )z
        Returns the mean Average Precision (mAP) at an IoU threshold of 0.5.

        Returns:
            (float): The mAP50 at an IoU threshold of 0.5.
        Nr   r   rY  r   s    r%   map50zMetric.map50N  0     ,/t{{+;t{{1a4 %%'DDr'   c                 r    t        | j                        r!| j                  dddf   j                         S dS )z
        Returns the mean Average Precision (mAP) at an IoU threshold of 0.75.

        Returns:
            (float): The mAP50 at an IoU threshold of 0.75.
        Nr   r   rY  r   s    r%   map75zMetric.map75X  ra  r'   c                 d    t        | j                        r| j                  j                         S dS )z
        Returns the mean Average Precision (mAP) over IoU thresholds of 0.5 - 0.95 in steps of 0.05.

        Returns:
            (float): The mAP over IoU thresholds of 0.5 - 0.95 in steps of 0.05.
        r   rY  r   s    r%   r   z
Metric.mapb  s'     &)%5t{{!>3>r'   c                 ^    | j                   | j                  | j                  | j                  gS )z+Mean of results, return mp, mr, map50, map.)r[  r^  r`  r   r   s    r%   mean_resultszMetric.mean_resultsl  s!    $**dhh77r'   c                 v    | j                   |   | j                  |   | j                  |   | j                  |   fS )z6Class-aware result, return p[i], r[i], ap50[i], ap[i].)r   rO  rW  r  r   s     r%   class_resultzMetric.class_resultp  s1    vvay$&&)TYYq\4771:==r'   c                     t        j                  | j                        | j                  z   }t	        | j
                        D ]  \  }}| j                  |   ||<    |S )zMAP of each class.)r   ru   rw   r   r   rU  r  )rz   mapsr   rH  s       r%   rj  zMetric.mapst  sN     xx 488+d112DAqggajDG 3r'   c                 r    g d}t        j                  | j                               |z  j                         S )z3Model fitness as a weighted combination of metrics.)r   r   r:  g?)r   r   rf  r[   )rz   ws     r%   fitnesszMetric.fitness|  s-     **,-16688r'   c                     |\
  | _         | _        | _        | _        | _        | _        | _        | _        | _        | _	        y)a  
        Updates the evaluation metrics of the model with a new set of results.

        Args:
            results (tuple): A tuple containing the following evaluation metrics:
                - p (list): Precision for each class. Shape: (nc,).
                - r (list): Recall for each class. Shape: (nc,).
                - f1 (list): F1 score for each class. Shape: (nc,).
                - all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10).
                - ap_class_index (list): Index of class for each AP score. Shape: (nc,).

        Side Effects:
            Updates the class attributes `self.p`, `self.r`, `self.f1`, `self.all_ap`, and `self.ap_class_index` based
            on the values provided in the `results` tuple.
        N)
r   rO  rP  rT  rU  rE  rF  rM  r  rD  )rz   resultss     r%   updatezMetric.update  s=    " %	$+t/BDLRVR^`d`mosov		r'   c                     g S ?Returns a list of curves for accessing specific metrics curves.rn   r   s    r%   curveszMetric.curves  	     	r'   c                     | j                   | j                  ddg| j                   | j                  ddg| j                   | j                  ddg| j                   | j                  ddggS )rs  r  r  r!  r9  )r  rD  rM  rE  rF  r   s    r%   curves_resultszMetric.curves_results  sd     $**HkBTWWdmm]ikoDp$,,kBTWWdll\hjrDsu 	ur'   returnN)r   r   r   r   r{   propertyrW  r  r[  r^  r`  rc  r   rf  rh  rj  rm  rp  rt  rw  rn   r'   r%   r"  r"    s    4 = = ? ? 5 5 5 5 E E E E ? ?8>  9
%&   u ur'   c                       e Zd ZdZ ed      dddfddZd Zed        Zd	 Z	d
 Z
ed        Zed        Zed        Zed        Zed        Zed        Zy)
DetMetricsa9  
    This class is a utility class for computing detection metrics such as precision, recall, and mean average precision
    (mAP) of an object detection model.

    Args:
        save_dir (Path): A path to the directory where the output plots will be saved. Defaults to current directory.
        plot (bool): A flag that indicates whether to plot precision-recall curves for each class. Defaults to False.
        on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None.
        names (tuple of str): A tuple of strings that represents the names of the classes. Defaults to an empty tuple.

    Attributes:
        save_dir (Path): A path to the directory where the output plots will be saved.
        plot (bool): A flag that indicates whether to plot the precision-recall curves for each class.
        on_plot (func): An optional callback to pass plots path and data when they are rendered.
        names (tuple of str): A tuple of strings that represents the names of the classes.
        box (Metric): An instance of the Metric class for storing the results of the detection metrics.
        speed (dict): A dictionary for storing the execution time of different parts of the detection process.

    Methods:
        process(tp, conf, pred_cls, target_cls): Updates the metric results with the latest batch of predictions.
        keys: Returns a list of keys for accessing the computed detection metrics.
        mean_results: Returns a list of mean values for the computed detection metrics.
        class_result(i): Returns a list of values for the computed detection metrics for a specific class.
        maps: Returns a dictionary of mean average precision (mAP) values for different IoU thresholds.
        fitness: Computes the fitness score based on the computed detection metrics.
        ap_class_index: Returns a list of class indices sorted by their average precision (AP) values.
        results_dict: Returns a dictionary that maps detection metric keys to their computed values.
        curves: TODO
        curves_results: TODO
    .FNrn   c                     || _         || _        || _        || _        t	               | _        ddddd| _        d| _        y)zfInitialize a DetMetrics instance with a save directory, plot flag, callback function, and class names.r   
preprocess	inferencelosspostprocessrs   N)r   r   r   r   r"  boxspeedrt   rz   r   r   r   r   s        r%   r{   zDetMetrics.__init__  s?     	
8$'c3WZ[
	r'   c           
          t        ||||| j                  | j                  | j                  | j                        dd }t        | j                        | j                  _        | j                  j                  |       y)zBProcess predicted results for object detection and update metrics.)r   r   r   r   r*   N)	rQ  r   r   r   r   r   r  rw   rp  )rz   r   rx   r?  r@  ro  s         r%   processzDetMetrics.process  sd    r#')$(II(,%)ZZ'+||5 67R9 $**o r'   c                 
    g dS )z6Returns a list of keys for accessing specific metrics.)metrics/precision(B)metrics/recall(B)metrics/mAP50(B)metrics/mAP50-95(B)rn   r   s    r%   keyszDetMetrics.keys  s     hgr'   c                 6    | j                   j                         S )zSCalculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.)r  rf  r   s    r%   rf  zDetMetrics.mean_results  s    xx$$&&r'   c                 8    | j                   j                  |      S )zaReturn the result of evaluating the performance of an object detection model on a specific class.)r  rh  r   s     r%   rh  zDetMetrics.class_result  s    xx$$Q''r'   c                 .    | j                   j                  S )z6Returns mean Average Precision (mAP) scores per class.)r  rj  r   s    r%   rj  zDetMetrics.maps  s     xx}}r'   c                 6    | j                   j                         S )z"Returns the fitness of box object.)r  rm  r   s    r%   rm  zDetMetrics.fitness  s     xx!!r'   c                 .    | j                   j                  S )z.Returns the average precision index per class.r  rU  r   s    r%   rU  zDetMetrics.ap_class_index       xx&&&r'   c                     t        t        | j                  dgz   | j                         | j                  gz               S )BReturns dictionary of computed performance metrics and statistics.rm  r>  r~   r  rf  rm  r   s    r%   results_dictzDetMetrics.results_dict  5     C		YK/1B1B1D~1UVWWr'   c                 
    g dS )rs  )Precision-Recall(B)F1-Confidence(B)Precision-Confidence(B)Recall-Confidence(B)rn   r   s    r%   rt  zDetMetrics.curves  s     nmr'   c                 .    | j                   j                  S r  )r  rw  r   s    r%   rw  zDetMetrics.curves_results  r  r'   rx  r   r   r   r   r   r{   r  rz  r  rf  rh  rj  rm  rU  r  rt  rw  rn   r'   r%   r|  r|    s    > !%S	t2 ! h h'(   " " ' ' X X n n ' 'r'   r|  c                       e Zd ZdZ ed      dddfddZd Zed        Zd	 Z	d
 Z
ed        Zed        Zed        Zed        Zed        Zed        Zy)SegmentMetricsa3  
    Calculates and aggregates detection and segmentation metrics over a given set of classes.

    Args:
        save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory.
        plot (bool): Whether to save the detection and segmentation plots. Default is False.
        on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None.
        names (list): List of class names. Default is an empty list.

    Attributes:
        save_dir (Path): Path to the directory where the output plots should be saved.
        plot (bool): Whether to save the detection and segmentation plots.
        on_plot (func): An optional callback to pass plots path and data when they are rendered.
        names (list): List of class names.
        box (Metric): An instance of the Metric class to calculate box detection metrics.
        seg (Metric): An instance of the Metric class to calculate mask segmentation metrics.
        speed (dict): Dictionary to store the time taken in different phases of inference.

    Methods:
        process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions.
        mean_results(): Returns the mean of the detection and segmentation metrics over all the classes.
        class_result(i): Returns the detection and segmentation metrics of class `i`.
        maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95.
        fitness: Returns the fitness scores, which are a single weighted combination of metrics.
        ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP).
        results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score.
    r}  FNrn   c                     || _         || _        || _        || _        t	               | _        t	               | _        ddddd| _        d| _        y)zjInitialize a SegmentMetrics instance with a save directory, plot flag, callback function, and class names.r   r  segmentN)	r   r   r   r   r"  r  segr  rt   r  s        r%   r{   zSegmentMetrics.__init__   sH     	
88$'c3WZ[
	r'   c                    t        ||||| j                  | j                  | j                  | j                  d	      dd }t        | j                        | j                  _        | j                  j                  |       t        ||||| j                  | j                  | j                  | j                  d	      dd }t        | j                        | j                  _        | j                  j                  |       y)a  
        Processes the detection and segmentation metrics over the given set of predictions.

        Args:
            tp_b (list): List of True Positive boxes.
            tp_m (list): List of True Positive masks.
            conf (list): List of confidence scores.
            pred_cls (list): List of predicted classes.
            target_cls (list): List of target classes.
        Maskr   r   r   r   rA  r*   NBox)
rQ  r   r   r   r   r   r  rw   rp  r  )rz   tp_btp_mrx   r?  r@  results_maskresults_boxs           r%   r  zSegmentMetrics.process+  s     $D$($,$.)-,0LL-1]]*.**+13 4527 $**o%"4#'#+#-(,		+/<<,0MM)-*/1 235 $**o$r'   c                 
    g dS )z-Returns a list of keys for accessing metrics.)r  r  r  r  zmetrics/precision(M)zmetrics/recall(M)zmetrics/mAP50(M)zmetrics/mAP50-95(M)rn   r   s    r%   r  zSegmentMetrics.keysN      d 	dr'   c                 l    | j                   j                         | j                  j                         z   S )zBReturn the mean metrics for bounding box and segmentation results.)r  rf  r  r   s    r%   rf  zSegmentMetrics.mean_resultsU  s'    xx$$&)>)>)@@@r'   c                 p    | j                   j                  |      | j                  j                  |      z   S )z;Returns classification results for a specified class index.)r  rh  r  r   s     r%   rh  zSegmentMetrics.class_resultY  s+    xx$$Q'$((*?*?*BBBr'   c                 \    | j                   j                  | j                  j                  z   S )zIReturns mAP scores for object detection and semantic segmentation models.)r  rj  r  r   s    r%   rj  zSegmentMetrics.maps]  s     xx}}txx}},,r'   c                 l    | j                   j                         | j                  j                         z   S )zDGet the fitness score for both segmentation and bounding box models.)r  rm  r  r   s    r%   rm  zSegmentMetrics.fitnessb  s)     xx!DHH$4$4$666r'   c                 .    | j                   j                  S )z-Boxes and masks have the same ap_class_index.r  r   s    r%   rU  zSegmentMetrics.ap_class_indexg  r  r'   c                     t        t        | j                  dgz   | j                         | j                  gz               S )z9Returns results of object detection model for evaluation.rm  r  r   s    r%   r  zSegmentMetrics.results_dictl  r  r'   c                 
    g dS )rs  )r  r  r  r  zPrecision-Recall(M)zF1-Confidence(M)zPrecision-Confidence(M)zRecall-Confidence(M)rn   r   s    r%   rt  zSegmentMetrics.curvesq      j 	jr'   c                 \    | j                   j                  | j                  j                  z   S r  )r  rw  r  r   s    r%   rw  zSegmentMetrics.curves_resultsx  s#     xx&&)@)@@@r'   rx  r  rn   r'   r%   r  r    s    8 !%S	t2 	!%F d dAC - - 7 7 ' ' X X j j A Ar'   r  c                        e Zd ZdZ ed      dddfd fdZd Zed        Zd	 Z	d
 Z
ed        Zed        Zed        Zed        Z xZS )PoseMetricsa,  
    Calculates and aggregates detection and pose metrics over a given set of classes.

    Args:
        save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory.
        plot (bool): Whether to save the detection and segmentation plots. Default is False.
        on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None.
        names (list): List of class names. Default is an empty list.

    Attributes:
        save_dir (Path): Path to the directory where the output plots should be saved.
        plot (bool): Whether to save the detection and segmentation plots.
        on_plot (func): An optional callback to pass plots path and data when they are rendered.
        names (list): List of class names.
        box (Metric): An instance of the Metric class to calculate box detection metrics.
        pose (Metric): An instance of the Metric class to calculate mask segmentation metrics.
        speed (dict): Dictionary to store the time taken in different phases of inference.

    Methods:
        process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions.
        mean_results(): Returns the mean of the detection and segmentation metrics over all the classes.
        class_result(i): Returns the detection and segmentation metrics of class `i`.
        maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95.
        fitness: Returns the fitness scores, which are a single weighted combination of metrics.
        ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP).
        results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score.
    r}  FNrn   c                     t         |   |||       || _        || _        || _        || _        t               | _        t               | _        ddddd| _	        d| _
        y)zXInitialize the PoseMetrics class with directory path, class names, and plotting options.r   r  poseN)superr{   r   r   r   r   r"  r  r  r  rt   )rz   r   r   r   r   	__class__s        r%   r{   zPoseMetrics.__init__  sZ    4/ 	
8H	$'c3WZ[
	r'   c                    t        ||||| j                  | j                  | j                  | j                  d	      dd }t        | j                        | j                  _        | j                  j                  |       t        ||||| j                  | j                  | j                  | j                  d	      dd }t        | j                        | j                  _        | j                  j                  |       y)a  
        Processes the detection and pose metrics over the given set of predictions.

        Args:
            tp_b (list): List of True Positive boxes.
            tp_p (list): List of True Positive keypoints.
            conf (list): List of confidence scores.
            pred_cls (list): List of predicted classes.
            target_cls (list): List of target classes.
        Poser  r*   Nr  )
rQ  r   r   r   r   r   r  rw   rp  r  )rz   r  tp_prx   r?  r@  results_poser  s           r%   r  zPoseMetrics.process  s     $D$($,$.)-,0LL-1]]*.**+13 4527 4::				&"4#'#+#-(,		+/<<,0MM)-*/1 235 $**o$r'   c                 
    g dS )z'Returns list of evaluation metric keys.)r  r  r  r  zmetrics/precision(P)zmetrics/recall(P)zmetrics/mAP50(P)zmetrics/mAP50-95(P)rn   r   s    r%   r  zPoseMetrics.keys  r  r'   c                 l    | j                   j                         | j                  j                         z   S )z(Return the mean results of box and pose.)r  rf  r  r   s    r%   rf  zPoseMetrics.mean_results  s'    xx$$&)?)?)AAAr'   c                 p    | j                   j                  |      | j                  j                  |      z   S )z?Return the class-wise detection results for a specific class i.)r  rh  r  r   s     r%   rh  zPoseMetrics.class_result  s+    xx$$Q'$))*@*@*CCCr'   c                 \    | j                   j                  | j                  j                  z   S )zTReturns the mean average precision (mAP) per class for both box and pose detections.)r  rj  r  r   s    r%   rj  zPoseMetrics.maps  s     xx}}tyy~~--r'   c                 l    | j                   j                         | j                  j                         z   S )zPComputes classification metrics and speed using the `targets` and `pred` inputs.)r  rm  r  r   s    r%   rm  zPoseMetrics.fitness  s)     yy  "TXX%5%5%777r'   c                 
    g dS )rs  )r  r  r  r  zPrecision-Recall(P)zF1-Confidence(P)zPrecision-Confidence(P)zRecall-Confidence(P)rn   r   s    r%   rt  zPoseMetrics.curves  r  r'   c                 \    | j                   j                  | j                  j                  z   S r  )r  rw  r  r   s    r%   rw  zPoseMetrics.curves_results  s#     xx&&)A)AAAr'   rx  )r   r   r   r   r   r{   r  rz  r  rf  rh  rj  rm  rt  rw  __classcell__)r  s   @r%   r  r  ~  s    8 !%S	t2 
!%F d dBD . . 8 8 j j B Br'   r  c                   n    e Zd ZdZd
dZd Zed        Zed        Zed        Z	ed        Z
ed	        Zy)ClassifyMetricsa  
    Class for computing classification metrics including top-1 and top-5 accuracy.

    Attributes:
        top1 (float): The top-1 accuracy.
        top5 (float): The top-5 accuracy.
        speed (Dict[str, float]): A dictionary containing the time taken for each step in the pipeline.

    Properties:
        fitness (float): The fitness of the model, which is equal to top-5 accuracy.
        results_dict (Dict[str, Union[float, str]]): A dictionary containing the classification metrics and fitness.
        keys (List[str]): A list of keys for the results_dict.

    Methods:
        process(targets, pred): Processes the targets and predictions to compute classification metrics.
    Nc                 F    d| _         d| _        ddddd| _        d| _        y)z&Initialize a ClassifyMetrics instance.r   r   r  classifyN)top1top5r  rt   r   s    r%   r{   zClassifyMetrics.__init__  s(    		$'c3WZ[
	r'   c                 V   t        j                  |      t        j                  |      }}|dddf   |k(  j                         }t        j                  |dddf   |j	                  d      j
                  fd      }|j                  d      j                         \  | _        | _	        y)z%Target classes and predicted classes.Nr   r)   )dim)
r-   r}   floatr   r/   valuesr  tolistr  r  )rz   r   predcorrectaccs        r%   r  zClassifyMetrics.process	  s    		$7);g1d7#t+224kk71a4='++a.*?*?@aH"xx{113	49r'   c                 :    | j                   | j                  z   dz  S )z<Returns mean of top-1 and top-5 accuracies as fitness score.r*   )r  r  r   s    r%   rm  zClassifyMetrics.fitness  s     		DII%**r'   c                     t        t        | j                  dgz   | j                  | j                  | j
                  g            S )zHReturns a dictionary with model's performance metrics and fitness score.rm  )r>  r~   r  r  r  rm  r   s    r%   r  zClassifyMetrics.results_dict  s4     C		YK/$))TYY1UVWWr'   c                 
    ddgS )z5Returns a list of keys for the results_dict property.zmetrics/accuracy_top1zmetrics/accuracy_top5rn   r   s    r%   r  zClassifyMetrics.keys  s     ()@AAr'   c                     g S rr  rn   r   s    r%   rt  zClassifyMetrics.curves  ru  r'   c                     g S rr  rn   r   s    r%   rw  zClassifyMetrics.curves_results$  ru  r'   rx  )r   r   r   r   r{   r  rz  rm  r  r  rt  rw  rn   r'   r%   r  r    sy    "4 + + X X B B    r'   r  )FHz>)r  )TFFFr  )r:  )r$  )#r   r;   r   pathlibr   matplotlib.pyplotpyplotr   r   r   r-   ultralytics.utilsr   r   r   r   r   	OKS_SIGMAr&   r7   rX   r_   rk   ro   rq   r  r   r(  r5  rQ  r"  r|  r  r  r  rn   r'   r%   <module>r     s'          J JBHHlmptt	%@I.5p(&[,& I< I<X; '+N';2t  0 #'#7r,_gqu  0J &_hD[u[ [u|`' `'FxA[ xAvoB. oBd7k 7r'   