123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105 |
- import os, torch, cv2, math, tqdm, time, caffe, shutil, argparse
- import numpy as np
- from quantization import *
- from config import image_base_path, label_base_path
- from sklearn.preprocessing import normalize
- PROJECT_NAME = os.environ['PROJECT_NAME']
- def get_caffe_name():
- with open(f'{PROJECT_NAME}/model_caffe/model_0.prototxt') as f:
- data = list(map(lambda x:x.strip(), f.readlines()))
- data = [i.split(':')[1] for i in data if 'top' in i]
- data = [i for i in data if 'output_0' in i][1:]
- data = [i for i in data if 'crop' not in i]
- data = [i[2:-1] for i in data]
- return data + ['output0', 'output1', 'output2']
- def get_qkqb_name():
- with open(f'{PROJECT_NAME}/model_caffe/model_0.prototxt') as f:
- data = list(map(lambda x:x.strip(), f.readlines()))
- data = [i.split(':')[1] for i in data if 'name' in i]
- data = [i for i in data if 'output' not in i][1:]
- data = [i for i in data if 'crop' not in i]
- data = [i[2:-1] for i in data]
- return data
- class CustomDataset(BaseDataset):
- def __init__(self, image_path_list):
- super().__init__()
- self.image_path_list = image_path_list
- def __getitem__(self, item):
- print(item, end=' ')
- # read image
- ori_image = cv2.imdecode(np.fromfile(self.image_path_list[item], np.uint8), cv2.IMREAD_COLOR)
- # letterbox
- process_image, ratio, (dw, dh) = letterbox(ori_image, auto=False)
- # process_image = cv2.resize(ori_image, (640, 640))
- srcnp = cv2.cvtColor(process_image, cv2.COLOR_BGR2RGB)
- srcnp = srcnp.astype(np.float32) / 256
- srcnp = np.array(srcnp)
- srcnp = np.transpose(srcnp, [2,0,1])
- return srcnp
- def __len__(self):
- return len(self.image_path_list)
- def get_result_from_caffe(image_path_list, im_shape=(640, 640)):
- prototxt_file = f'{PROJECT_NAME}/model_caffe/model_0.prototxt'
- caffemodel_file = f'{PROJECT_NAME}/model_caffe/model_0.caffemodel'
-
- dataset = CustomDataset(image_path_list)
- print('begin caffe_forward....')
- since = time.time()
- pred = net.src_forward(prototxt_file, caffemodel_file, dataset, 64, get_caffe_name())
- print(f'src_forward finish. using time:{time.time() - since:.5f}.')
- return pred
- def get_result_from_qkqb(image_path_list, im_shape=(640, 640)):
- qk_file = f'{PROJECT_NAME}/model_quantization/checkpoint_quan.qk'
- qb_file = f'{PROJECT_NAME}/model_quantization/checkpoint_quan.qb'
-
- dataset = CustomDataset(image_path_list)
- print('begin easy_forward....')
- since = time.time()
- pred = net.easy_forward(qk_file, qb_file, dataset, 1, get_qkqb_name())
- print(f'easy_forward finish. using time:{time.time() - since:.5f}.')
- return pred
-
- if __name__ == '__main__':
-
- name_list = [os.path.splitext(i) for i in os.listdir(image_base_path)]
- image_listdir = [f'{image_base_path}/{i[0]}{i[1]}' for i in name_list][:20]
- label_listdir = [f'{label_base_path}/{i[0]}.txt' for i in name_list]
-
- net = Net(1)
- caffe_preds = get_result_from_caffe(image_listdir, (640, 640))
- # print(caffe_preds.keys())
- net.release()
-
- net = Net(1)
- qkqb_preds = get_result_from_qkqb(image_listdir, (640, 640))
- # print(qkqb_preds.keys())
- net.release()
-
- arr = []
- for i in caffe_preds:
- if i[:-9] in qkqb_preds:
- print(i, i[:-9])
- arr.append([i[:-9], np.mean(np.abs(caffe_preds[i] - qkqb_preds[i[:-9]]))])
-
- arr = sorted(arr, key=lambda x:x[-1])
- for i in arr:
- print(f'{i[0]} {i[1]}')
-
- print(f'output0 diff:{np.mean(np.abs(caffe_preds["output0"] - qkqb_preds["/model.22/Concat"])):.5f}')
- print(f'output1 diff:{np.mean(np.abs(caffe_preds["output1"] - qkqb_preds["/model.22/Concat_1"])):.5f}')
- print(f'output2 diff:{np.mean(np.abs(caffe_preds["output2"] - qkqb_preds["/model.22/Concat_2"])):.5f}')
- # caffe_out, qkqb_out = caffe_preds["output0"], qkqb_preds["/model.22/Concat_4"]
- # n = caffe_out.shape[0]
- # similarity = 0.0
- # similarity += (normalize(caffe_preds["output0"].reshape((n, -1))[:, :4]) - normalize(qkqb_preds["/model.22/Concat_4"].reshape((n, -1))[:, :4])).sum(1).mean() * 0.6
- # similarity += (normalize(caffe_preds["output0"].reshape((n, -1))[:, 4:]) - normalize(qkqb_preds["/model.22/Concat_4"].reshape((n, -1))[:, 4:])).sum(1).mean() * 0.4
- # print(f'output similarity:{similarity:.5f}')
|