123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292 |
- import os, torch, cv2, math, tqdm, time, caffe, shutil, argparse
- import numpy as np
- from quantization import *
- from yolo import non_max_suppression, plot_one_box, yolov8layer, letterbox, xywh2xyxy, box_iou
- from config import image_base_path, label_base_path, net_shape
- PROJECT_NAME = os.environ['PROJECT_NAME']
- def clip_boxes(boxes, shape):
- # Clip boxes (xyxy) to image shape (height, width)
- if isinstance(boxes, torch.Tensor): # faster individually
- boxes[..., 0].clamp_(0, shape[1]) # x1
- boxes[..., 1].clamp_(0, shape[0]) # y1
- boxes[..., 2].clamp_(0, shape[1]) # x2
- boxes[..., 3].clamp_(0, shape[0]) # y2
- else: # np.array (faster grouped)
- boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
- boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
- def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
- # Rescale boxes (xyxy) from img1_shape to img0_shape
- if ratio_pad is None: # calculate from img0_shape
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
- else:
- gain = ratio_pad[0][0]
- pad = ratio_pad[1]
- boxes[..., [0, 2]] -= pad[0] # x padding
- boxes[..., [1, 3]] -= pad[1] # y padding
- boxes[..., :4] /= gain
- clip_boxes(boxes, img0_shape)
- return boxes
- def process_batch(detections, labels, iouv):
- """
- Return correct prediction matrix
- Arguments:
- detections (array[N, 6]), x1, y1, x2, y2, conf, class
- labels (array[M, 5]), class, x1, y1, x2, y2
- Returns:
- correct (array[N, 10]), for 10 IoU levels
- """
- correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
- iou = box_iou(labels[:, 1:], detections[:, :4])
- correct_class = labels[:, 0:1] == detections[:, 5]
- for i in range(len(iouv)):
- x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
- if x[0].shape[0]:
- matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
- if x[0].shape[0] > 1:
- matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
- # matches = matches[matches[:, 2].argsort()[::-1]]
- matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
- correct[matches[:, 1].astype(int), i] = True
- return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
- def smooth(y, f=0.05):
- # Box filter of fraction f
- nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd)
- p = np.ones(nf // 2) # ones padding
- yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded
- return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed
- def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
- # Arguments
- tp: True positives (nparray, nx1 or nx10).
- conf: Objectness value from 0-1 (nparray).
- pred_cls: Predicted object classes (nparray).
- target_cls: True object classes (nparray).
- plot: Plot precision-recall curve at mAP@0.5
- save_dir: Plot save directory
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
- # Sort by objectness
- i = np.argsort(-conf)
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
- # Find unique classes
- unique_classes, nt = np.unique(target_cls, return_counts=True)
- nc = unique_classes.shape[0] # number of classes, number of detections
- # Create Precision-Recall curve and compute AP for each class
- px, py = np.linspace(0, 1, 1000), [] # for plotting
- ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
- for ci, c in enumerate(unique_classes):
- i = pred_cls == c
- n_l = nt[ci] # number of labels
- n_p = i.sum() # number of predictions
- if n_p == 0 or n_l == 0:
- continue
- # Accumulate FPs and TPs
- fpc = (1 - tp[i]).cumsum(0)
- tpc = tp[i].cumsum(0)
- # Recall
- recall = tpc / (n_l + eps) # recall curve
- r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
- # Precision
- precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
- # AP from recall-precision curve
- for j in range(tp.shape[1]):
- ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
- if plot and j == 0:
- py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
- # Compute F1 (harmonic mean of precision and recall)
- f1 = 2 * p * r / (p + r + eps)
- i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
- p, r, f1 = p[:, i], r[:, i], f1[:, i]
- tp = (r * nt).round() # true positives
- fp = (tp / (p + eps) - tp).round() # false positives
- return tp, fp, p, r, f1, ap, unique_classes.astype(int)
- def compute_ap(recall, precision):
- """ Compute the average precision, given the recall and precision curves
- # Arguments
- recall: The recall curve (list)
- precision: The precision curve (list)
- # Returns
- Average precision, precision curve, recall curve
- """
- # Append sentinel values to beginning and end
- mrec = np.concatenate(([0.0], recall, [1.0]))
- mpre = np.concatenate(([1.0], precision, [0.0]))
- # Compute the precision envelope
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
- # Integrate area under curve
- method = 'interp' # methods: 'continuous', 'interp'
- if method == 'interp':
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
- else: # 'continuous'
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
- return ap, mpre, mrec
- class CustomDataset(BaseDataset):
- def __init__(self, image_path_list):
- super().__init__()
- self.image_path_list = image_path_list
- def __getitem__(self, item):
- print(item, end=' ')
- # read image
- ori_image = cv2.imdecode(np.fromfile(self.image_path_list[item], np.uint8), cv2.IMREAD_COLOR)
- # letterbox
- process_image, ratio, (dw, dh) = letterbox(ori_image, new_shape=net_shape, auto=False)
- # process_image = cv2.resize(ori_image, (640, 640))
- srcnp = cv2.cvtColor(process_image, cv2.COLOR_BGR2RGB)
- srcnp = srcnp.astype(np.float32) / 256
- srcnp = np.array(srcnp)
- srcnp = np.transpose(srcnp, [2,0,1])
- return srcnp
- def __len__(self):
- return len(self.image_path_list)
- def get_result_from_caffe(image_path_list, im_shape=(640, 640) ,conf_thres=0.01, iou_thres=0.45):
- prototxt_file = f'{PROJECT_NAME}/model_caffe/model_0.prototxt'
- caffemodel_file = f'{PROJECT_NAME}/model_caffe/model_0.caffemodel'
-
- dataset = CustomDataset(image_path_list)
- print('begin caffe_forward....')
- since = time.time()
- pred = net.src_forward(prototxt_file, caffemodel_file, dataset, 64)
- print(f'\ncaffe_forward finish. using time:{time.time() - since:.5f}.')
-
- out = yolov8layer([pred['output0'].copy(), pred['output1'].copy(), pred['output2'].copy()])
- out = non_max_suppression([torch.from_numpy(out)], conf_thres=conf_thres, iou_thres=iou_thres)
- for i in tqdm.tqdm(range(len(out)), desc='processing scale_boxes.'):
- ori_image = cv2.imread(image_path_list[i])
- out[i][:, :4] = scale_boxes(im_shape, out[i][:, :4], ori_image.shape[:2])
-
- # vis pred result for debug
- # for j in out[i]:
- # cv2.rectangle(ori_image, (int(j[0]), int(j[1])), (int(j[2]), int(j[3])), (0, 0, 255), thickness=2)
- # cv2.imwrite('./test.png', ori_image)
- return out
- def get_result_from_qkqb(image_path_list, im_shape=(640, 640) ,conf_thres=0.01, iou_thres=0.45):
- qk_file = f'{PROJECT_NAME}/model_quantization/checkpoint_quan.qk'
- qb_file = f'{PROJECT_NAME}/model_quantization/checkpoint_quan.qb'
-
- dataset = CustomDataset(image_path_list)
- print('begin easy_forward....')
- since = time.time()
- pred = net.easy_forward(qk_file, qb_file, dataset, 64)
- print(f'easy_forward finish. using time:{time.time() - since:.5f}.')
-
- out = yolov8layer([pred['/model.22/Concat'].copy(), pred['/model.22/Concat_1'].copy(), pred['/model.22/Concat_2'].copy()])
- out = non_max_suppression([torch.from_numpy(out)], conf_thres=conf_thres,iou_thres=iou_thres)
- for i in tqdm.tqdm(range(len(out)), desc='processing scale_boxes.'):
- ori_image = cv2.imdecode(np.fromfile(image_path_list[i], np.uint8), cv2.IMREAD_COLOR)
- out[i][:, :4] = scale_boxes(im_shape, out[i][:, :4], ori_image.shape[:2])
-
- # vis pred result for debug
- # for j in out[i]:
- # cv2.rectangle(ori_image, (int(j[0]), int(j[1])), (int(j[2]), int(j[3])), (0, 0, 255), thickness=2)
- # cv2.imwrite('./test.png', ori_image)
- return out
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--type', type=str, choices=['caffe', 'qkqb'], required=True, help='caffe or qkqb')
- parser.add_argument('--iou', type=float, default=0.45, help='iou threshold')
- parser.add_argument('--conf', type=float, default=0.01, help='conf threshold')
- parser.add_argument('--save_path', type=str, default=f'{PROJECT_NAME}/map_img_save', help='image save path')
- parser.add_argument('--device', type=int, default=1, help='device')
- opt = parser.parse_known_args()[0]
- return opt
-
- if __name__ == '__main__':
- opt = parse_opt()
- net = Net(opt.device)
-
- iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95
- niou = iouv.numel()
- stats = []
-
- name_list = [os.path.splitext(i) for i in os.listdir(image_base_path)]
- image_listdir = [f'{image_base_path}/{i[0]}{i[1]}' for i in name_list]
- label_listdir = [f'{label_base_path}/{i[0]}.txt' for i in name_list]
-
- if os.path.exists(opt.save_path):
- shutil.rmtree(opt.save_path)
- os.makedirs(opt.save_path, exist_ok=True)
-
- if opt.type == 'caffe':
- preds = get_result_from_caffe(image_listdir, net_shape, conf_thres=opt.conf, iou_thres=opt.iou)
- elif opt.type == 'qkqb':
- preds = get_result_from_qkqb(image_listdir, net_shape, conf_thres=opt.conf, iou_thres=opt.iou)
- net.release()
-
- for idx, path in enumerate(tqdm.tqdm(name_list)):
- image_path = f'{image_base_path}/{path[0]}{path[1]}'
- label_path = f'{label_base_path}/{path[0]}.txt'
-
- # read and processing
- ori_image = cv2.imread(image_path)
- height, width, _ = ori_image.shape
-
- # read label
- with open(label_path) as f:
- label = f.readlines()
- if len(label) == 0:
- label = np.empty(shape=(0, 5))
- else:
- label = np.array(list(map(lambda x:np.array(x.strip().split(), dtype=np.float), label)))
- label[:, 1:] *= np.array([width, height, width, height])
- label[:, 1:] = xywh2xyxy(label[:, 1:])
-
- pred = preds[idx]
- # vis label result for debug
- for i in label:
- cv2.rectangle(ori_image, (int(i[1]), int(i[2])), (int(i[3]), int(i[4])), (0, 0, 255), thickness=2) # red label
- for i in pred:
- cv2.rectangle(ori_image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), thickness=2) # green pred
- cv2.imwrite(f'{opt.save_path}/{path[0]}{path[1]}', ori_image)
-
- nl, npr = label.shape[0], pred.shape[0]
- correct = torch.zeros(npr, niou, dtype=torch.bool)
- if npr == 0:
- if nl:
- stats.append((correct, *torch.zeros((2, 0)), torch.from_numpy(label[:, 0])))
- continue
-
- if nl:
- correct = process_batch(pred, torch.from_numpy(label), iouv)
- stats.append((correct, pred[:, 4], pred[:, 5], torch.from_numpy(label[:, 0])))
-
- stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]
- tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats)
- print(f'precision:{p}')
- print(f'recall:{r}')
- print(f'mAP@0.5:{ap[:, 0]}')
|