process_bin_result.py 3.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. from quantization import *
  2. import os, argparse, math, torch
  3. import numpy as np
  4. from yolo import non_max_suppression
  5. PROJECT_NAME = os.environ['PROJECT_NAME']
  6. qk_file = PROJECT_NAME+'/model_quantization/checkpoint_quan.qk'
  7. # qk_file = '/root/eeasy/eeasy_quan/yolov8toezb_ygy/model_quantization/checkpoint_quan.qk'
  8. def make_anchor(input_shape=(640, 640), grid_cell_offset=0.5):
  9. anchor_points = []
  10. for i in [8, 16, 32]:
  11. h, w = input_shape[0] // i, input_shape[1] // i
  12. sx = np.arange(w) + grid_cell_offset
  13. sy = np.arange(h) + grid_cell_offset
  14. sy, sx = np.meshgrid(sy, sx)
  15. anchor_points.append(np.stack((sy, sx), -1).reshape((-1, 2)))
  16. return np.transpose(np.concatenate(anchor_points), axes=[1, 0])
  17. def yololayer(res):
  18. strides = np.load('py/tensor.npy', allow_pickle=True)
  19. anchor_points = make_anchor()
  20. x1y1 = anchor_points - res[:, :2]
  21. x2y2 = anchor_points + res[:, 2:4]
  22. c_xy = (x1y1 + x2y2) / 2
  23. wh = x2y2 - x1y1
  24. res[:, :4] = np.concatenate((c_xy, wh), axis=1) * strides
  25. # res[:, :4] = np.concatenate((x1y1, x2y2), axis=1) * strides
  26. return res
  27. if __name__ == '__main__':
  28. shapes = Helper.get_caffe_output_shapes(qk_file)
  29. # bin_path = ['model_pet/_share_res_hw__model.22_Concat.bin', 'model_pet/_share_res_hw__model.22_Concat_1.bin', 'model_pet/_share_res_hw__model.22_Concat_2.bin']
  30. bin_path = ['model_pet/sim/res_hw/_model.22_Concat.bin', 'model_pet/sim/res_hw/_model.22_Concat_1.bin', 'model_pet/sim/res_hw/_model.22_Concat_2.bin']
  31. name = ['/model.22/Concat', '/model.22/Concat_1', '/model.22/Concat_2']
  32. sim_res1 = np.fromfile(bin_path[0], dtype=np.int8 if Helper.get_quantize_out_bw(qk_file, name[0]) == 8 else np.int16) # /model.0/conv/Conv
  33. bin_res1 = Helper.hw_data_to_caffe_int_data(sim_res1, shapes[name[0]])
  34. sim_res2 = np.fromfile(bin_path[1], dtype=np.int8 if Helper.get_quantize_out_bw(qk_file, name[1]) == 8 else np.int16) # /model.0/conv/Conv
  35. bin_res2 = Helper.hw_data_to_caffe_int_data(sim_res2, shapes[name[1]])
  36. sim_res3 = np.fromfile(bin_path[2], dtype=np.int8 if Helper.get_quantize_out_bw(qk_file, name[2]) == 8 else np.int16) # /model.0/conv/Conv
  37. bin_res3 = Helper.hw_data_to_caffe_int_data(sim_res3, shapes[name[2]])
  38. bin_res1, bin_res2, bin_res3 = bin_res1 / math.pow(2, 3), bin_res2 / math.pow(2, 3), bin_res3 / math.pow(2, 3)
  39. # # ( 1, c, w, h)
  40. # '/model.22/Concat': (1, 6, 80, 80)
  41. res_python = []
  42. for data in [bin_res1, bin_res2, bin_res3]:
  43. for i in range(data.shape[2]):
  44. for j in range(data.shape[3]):
  45. res_python.append(data[0, :, i, j])
  46. res_python = np.stack(res_python) # 8400,6
  47. res_python = np.transpose(res_python, axes=[1, 0]) # 6,8400
  48. res_python = res_python[None] # 1,6,8400
  49. res_python = yololayer(res_python) # 1,6,8400
  50. res_python = non_max_suppression(torch.from_numpy(res_python))[0]
  51. print(res_python.shape)
  52. print(res_python)
  53. # res_python = np.transpose(res_python[0], axes=[1, 0]) # 8400,6
  54. # res_c = []
  55. # for data in [sim_res1, sim_res2, sim_res3]:
  56. # for i in range(0, data.shape[0], 16):
  57. # res_c.append(data[i:i+6])
  58. # res_c = np.stack(res_c) # 8400,6
  59. with open('out.txt') as f:
  60. data = np.array(list(map(lambda x:np.array(x.strip().split(), dtype=np.float), f.readlines())))
  61. print(data.shape, res_python.shape)
  62. # print(np.sum(np.abs(res_c - res_python)))
  63. # print(np.sum(np.abs(res_python - data)))
  64. # random_index = np.arange(8400)
  65. # # np.random.shuffle(random_index)
  66. # for i in random_index[:100]:
  67. # print(i, res_python[i], data[i])