123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186 |
- import sys, cv2, os
- sys.path.insert(0, "../")
- from quantization import *
- from metrics import *
- import os
- from get_map import letterbox
- from config import net_shape
- class CustomDataset(BaseDataset):
- def __init__(self, image_path_list):
- super().__init__()
- self.image_path_list = image_path_list
- def __getitem__(self, item):
- # print(item, end=' ')
- # read image
- ori_image = cv2.imread(self.image_path_list[item], cv2.IMREAD_COLOR)
- # letterbox
- process_image, ratio, (dw, dh) = letterbox(ori_image, new_shape=net_shape, auto=False)
- # process_image = cv2.resize(ori_image, (640, 640))
- srcnp = cv2.cvtColor(process_image, cv2.COLOR_BGR2RGB)
- srcnp = srcnp.astype(np.float32) / 256
- srcnp = np.array(srcnp)
- srcnp = np.transpose(srcnp, [2, 0, 1])
- return srcnp
- def __len__(self):
- return len(self.image_path_list)
- PROJECT_NAME = os.environ['PROJECT_NAME']
- CAFFE_PROTO = PROJECT_NAME+'/model_caffe/model_0.prototxt'
- CAFFE_MODEL= PROJECT_NAME+'/model_caffe/model_0.caffemodel'
- wksp = PROJECT_NAME+"/model_quantization"
- if not os.path.exists(wksp):
- os.makedirs(wksp, exist_ok=True)
- image_path = f'{PROJECT_NAME}/img_train'
- image_listdir = [f'{image_path}/{path}' for path in os.listdir(image_path)]
- fp16 = {'/model.22/cv2.1/cv2.1.2/Conv', '/model.22/cv3.2/cv3.2.2/Conv', '/model.22/cv2.0/cv2.0.2/Conv', '/model.22/cv3.1/cv3.1.2/Conv',
- '/model.22/cv2.2/cv2.2.2/Conv', '/model.22/cv3.0/cv3.0.2/Conv'}
- # fp16 = False
- resplit = True
- def quan():
- def generate_cfg():
- cfg = EasyDict()
- cfg.workspace = wksp
- cfg.logger_filename = "log_quan.txt"
- cfg.ctx = [1] # GPUs ID
- cfg.num_workers = 8 # num workers
- cfg.batch_size = 64 # batch size
- cfg.proto =CAFFE_PROTO
- cfg.model =CAFFE_MODEL
- cfg.dataset = CustomDataset(image_listdir) # *Dataset instance
- # cfg.outputs_name = ['/model.22/Concat_4'] # output layers name, not blobs name !
- cfg.outputs_name = ['/model.22/Concat', '/model.22/Concat_1', '/model.22/Concat_2']
- cfg.checkpoint = "checkpoint_quan"
- cfg.deploy = False # set cfg.deploy = False if we want to run finetune.py
- # cfg.in_cfg = EasyDict()
- # cfg.in_cfg.do = True
- # cfg.weight_cfg = EasyDict()
- # cfg.weight_cfg.do = True
- # cfg.weight_cfg.level_cfg = generate_level_5_cfg(sim_threshold=1e-4, select_mode="median")
- # if fp16:
- # cfg.weight_cfg.f16 = True
-
- # cfg.out_cfg = EasyDict()
- # cfg.out_cfg.do = True
- # if fp16:
- # cfg.out_cfg.f16 = True
-
- cfg.merge_cfg = EasyDict()
- cfg.merge_cfg.resplit = resplit
-
- cfg.in_cfg = EasyDict()
- cfg.in_cfg.level = 6
- cfg.in_cfg.no_label = True
- cfg.in_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6,search_fls=range(-9,45))
-
- cfg.weight_cfg = EasyDict()
- cfg.weight_cfg.level = 6
- cfg.weight_cfg.no_label = True
- cfg.weight_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6,search_fls=range(-9,45))
- cfg.weight_cfg.f16 = fp16
-
- cfg.out_cfg = EasyDict()
- cfg.out_cfg.level = 6
- cfg.out_cfg.no_label = True
- cfg.out_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6,sim_threshold = 1e-10,search_fls=range(-9,45))
- cfg.out_cfg.f16 = fp16
-
- return cfg
- run(generate_cfg())
- def finetune():
- def generate_cfg():
- cfg = EasyDict()
- # global config
- cfg.workspace = wksp
- cfg.logger_filename = "log_quan2.txt"
- cfg.ctx = [1] # GPUs ID
- cfg.num_workers = 8 # num workers
- cfg.batch_size = 64 # batch size
- cfg.dataset = CustomDataset(image_listdir) # *Dataset instance
- cfg.proto =CAFFE_PROTO
- cfg.model =CAFFE_MODEL
- # cfg.outputs_name = ['/model.22/Concat_4'] # output layers name, not blobs name !
- cfg.outputs_name = ['/model.22/Concat', '/model.22/Concat_1', '/model.22/Concat_2']
- cfg.checkpoint = "checkpoint_quan"
- cfg.finetune = True
- cfg.deploy = True
-
- cfg.merge_cfg = EasyDict()
- cfg.merge_cfg.resplit = resplit
- cfg.in_cfg = EasyDict()
- cfg.in_cfg.level = 6
- cfg.in_cfg.no_label = True
- cfg.in_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6, search_fls=range(-9,45))
-
- cfg.weight_cfg = EasyDict()
- cfg.weight_cfg.level = 6
- cfg.weight_cfg.no_label = True
- cfg.weight_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6, search_fls=range(-9,45))
- cfg.weight_cfg.f16 = fp16
- cfg.out_cfg = EasyDict()
- cfg.out_cfg.level = 6
- cfg.out_cfg.no_label = True
- cfg.out_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6, sim_threshold = 1e-10, search_fls=range(-9,45))
- cfg.out_cfg.f16 = fp16
-
- return cfg
- run(generate_cfg())
- def quan_hjj():
- def generate_cfg():
- cfg = EasyDict()
- cfg.workspace = wksp
- cfg.logger_filename = "log_quan.txt"
- cfg.ctx = [1] # GPUs ID
- cfg.num_workers = 8 # num workers
- cfg.batch_size = 32 # batch size
- cfg.proto = CAFFE_PROTO
- cfg.model = CAFFE_MODEL
- cfg.dataset = CustomDataset(image_listdir) # *Dataset instance
- cfg.outputs_name = ['/model.24/m.0/Conv','/model.24/m.1/Conv','/model.24/m.2/Conv'] # output layers name, not blobs name !
- cfg.checkpoint = "checkpoint_quan"
-
- # cfg.weight_cfg = EasyDict()
- # cfg.weight_cfg.level_cfg = generate_level_5_cfg(sim_threshold=1e-4, select_mode="median")
-
- cfg.weight_cfg = EasyDict()
- cfg.weight_cfg.level = 6
- cfg.weight_cfg.no_label = True
- cfg.weight_cfg.level_cfg = generate_level_6_cfg(yolo_metric_hjj, [], margin=6, search_fls=range(-9,45), select_mode="median")
-
- return cfg
- run(generate_cfg())
-
- if __name__ == "__main__":
- # conda activate eeasy_caffe_v1.2.0
- # export PROJECT_NAME='model_fire'
- # nohup python py/quan.py > quan.log 2>&1 & tail -f quan.log
-
- os.system('rm -r ' + wksp)
- os.system('rm /dev/shm/*eeasy*')
- print('running quan()....')
- quan()
- os.system('rm /dev/shm/*eeasy*')
- os.system('rm /dev/shm/*eeasy*')
- print('running finetune()....')
- finetune()
- os.system('rm /dev/shm/*eeasy*')
- # os.system('rm -r ' + wksp)
- # os.system('rm /dev/shm/*eeasy*')
- # print('running quan_hjj()....')
- # quan_hjj()
- # os.system('rm /dev/shm/*eeasy*')
|