target detection Related:
The corresponding evaluation index in target detection: ap,map,top-1,top-5,p-r, etc. list
Http://blog.sina.com.cn/s/blog_9db078090102whzw.html
How mean average precision (MAP) is calculated and applied in computer vision.
https://www.zhihu.com/question/41540197/answer/91698989
Accuracy and recall ratio, ROC curve and PR curve
http://www.ppvke.com/Blog/archives/45988
What is the Top-5 error rate for image classification.
https://www.zhihu.com/question/36463511
Image Classification Top-5 Error rate
Https://www.jianshu.com/p/355785bd77cb
Evaluation & Calculate top-n accuracy:top 1 and Top 5
Https://stackoverflow.com/questions/37668902/evaluation-calculate-top-n-accuracy-top-1-and-top-5
AP Calculation code: (https://github.com/facebookresearch/Detectron/blob/05d04d3a024f0991339de45872d02f2f50669b3d/lib/ DATASETS/VOC_EVAL.PY#L54)
def voc_ap (REC, PREC, use_07_metric=false): "" "Compute VOC AP given precision and recall.
If Use_07_metric is True, uses the VOC 11-point method (Default:false).
"" "If Use_07_metric: # one point metric AP = 0.
For T in Np.arange (0., 1.1, 0.1): If Np.sum (rec >= t) = = 0:p = 0 Else:
p = Np.max (Prec[rec >= t]) AP = ap + P/11. else: # correct AP calculation # first Append sentinel values at the end mrec = Np.concatenate ([0
.], REC, [1.])
Mpre = Np.concatenate ([0.], prec, [0.])) # Compute the precision envelope for I in range (mpre.size-1, 0,-1): mpre[i-1] = Np.maximum (mpre[i
-1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = Np.where (mrec[1:]! = mrec[:-1]) [0] # and SUM (\delta recall) * Prec ap = Np.sum ((Mrec[i + 1]-mrec[i]) * mpre[i + 1]) return AP def voc_eval (Detpath, Annopath, Imagesetfi Le, classname, Cachedir, ovthresh=0.5, Use_07_metric=false): "" "Re
C, PREC, AP = Voc_eval (Detpath, Annopath, Imagesetfile, ClassName, [Ovthresh], [use_
07_metric]) Top level function that does the PASCAL VOC evaluation.
Detpath:path to detections Detpath.format (classname) should produce the detection results file.
Annopath:path to Annotations Annopath.format (imagename) should is the XML annotations file.
Imagesetfile:text file containing the list of images, one image per line. Classname:category name (duh) cachedir:directory for caching the annotations [Ovthresh]: overlap threshold (DEFA Ult = 0.5) [Use_07_metric]: Whether to use VOC07 ' s one point AP computation (default False) "" "# assumes detections is in Detpath.format (classname) # assumes annotations be in Annopath.format (imagename) # assumes Imagesetfile is a text File with all line a image name # Cachedir caches the annotations in a pickle file # First load GT if not
Os.path.isdir (Cachedir): Os.mkdir (cachedir) ImageSet = Os.path.splitext (Os.path.basename (imagesetfile)) [0] Cachefile = Os.path.join (cachedir, ImageSet + ' _annots.pkl ') # Read List of images with open (Imagesetfile, ' R ') a
s f:lines = F.readlines () imagenames = [X.strip () for X "lines] if not Os.path.isfile (cachefile): # load Annots RECs = {} for I, imagename in Enumerate (imagenames): recs[imagename] = Parse _rec (Annopath.format (imagename)) if I% = = 0:logger.info (' Reading an notation for {:d}/{:d} '.Format (i + 1, len (imagenames))) # Save Logger.info (' saving cached annotations to {: s} '. Format (cachefile)) with open (Cachefile, ' W ') as F:cpickle.dump (RECs, F) Else: # LOA
D with open (Cachefile, ' R ') as F:recs = Cpickle.load (f) # Extract GT objects for this class Class_recs = {} NPOs = 0 for imagename in imagenames:r = [obj for obj in recs[imagename] if obj[' name '] = = ClassName] Bbox = Np.array ([x[' Bbox '] for x in R]) difficult = Np.array ([x[' difficult '] for X in R]). AST Ype (np.bool) det = [False] * len (R) NPOs = NPOs + sum (~difficult) class_recs[imagename] = {' Bbox ': Bbox, ' difficult ': difficult, ' det ': det} # Read D ETS detfile = Detpath.format (classname) with open (Detfile, ' R ') as F:lines = F.readlines () splitline s = [X.strip (). Split (') for x in lines] Image_ids = [x[0] for x in splitlines] confidence = Np.array ([float (x[1]) for x in Splitlines]) BB = Np.array ([[Float (z) for z in X[2:]] "for X" Splitlines]) # Sort by confidence Sorted_ind = Np.argsort (- Confidence) BB = Bb[sorted_ind,:] Image_ids = [image_ids[x] for x in Sorted_ind] # go down dets and Mark TPs and FPs nd = Len (image_ids) TP = Np.zeros (nd) fp = Np.zeros (nd) for D in range (nd): R = Class_rec
S[IMAGE_IDS[D]] bb = bb[d,:].astype (float) Ovmax =-np.inf BBGT = r[' bbox '].astype (float) If bbgt.size > 0: # Compute overlaps # intersection ixmin = Np.maximum (bbgt[:, 0] , bb[0]) iymin = Np.maximum (bbgt[:, 1], bb[1]) Ixmax = Np.minimum (bbgt[:, 2], bb[2]) I
Ymax = Np.minimum (bbgt[:, 3], bb[3]) IW = np.maximum (ixmax-ixmin + 1., 0.)
IH = np.maximum (iymax-iymin + 1., 0.) Inters = IW * IH # union Uni = ((bb[2]-bb[0] + 1.) * (Bb[3]-bb[1] + 1.) + (bbgt[:, 2]-bbgt[:, 0] + 1.) * (bbgt[:, 3]-bbgt[:, 1] + 1.)
-inters) overlaps = inters/uni Ovmax = Np.max (overlaps) jmax = Np.argmax (overlaps)
If Ovmax > ovthresh:if not r[' difficult '][jmax]: If not r[' det '][jmax]:
TP[D] = 1.
r[' det '][jmax] = 1 Else:fp[d] = 1.
ELSE:FP[D] = 1. # COMPUTE Precision Recall fp = np.cumsum (FP) TP = np.cumsum (TP) rec = tp/float (NPOs) # Avoid divide by Zero in case the first detection matches a difficult # ground truth Prec = tp/np.maximum (tp + FP, Np.finfo (NP.FL Oat64). eps) ap = Voc_ap (rec, PREC, use_07_metric) return REC, PREC, AP