metrics.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. import numpy as np
  2. from sklearn.preprocessing import normalize
  3. def classification_acc_metric(names, x, *args):
  4. """ Classification Acc Metric """
  5. # get probability
  6. output_name = args[-1]
  7. label = args[0]
  8. prob = x[output_name] # (N, K)
  9. # soft-max
  10. prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
  11. prob = np.exp(prob)
  12. prob /= np.sum(prob, axis=-1, keepdims=True)
  13. # acc
  14. target = prob.argmax(axis=-1) # top 1
  15. acc = np.mean(target == label)
  16. return acc
  17. def classification_entropy_metric(names, x, *args):
  18. """ Classification Entropy Loss Metric """
  19. # get probability
  20. output_name, label = args[-1], args[0]
  21. prob = x[output_name]
  22. # log-soft-max
  23. prob -= np.max(prob, axis=-1, keepdims=True)
  24. prob = np.exp(prob)
  25. prob /= np.sum(prob, axis=-1, keepdims=True)
  26. prob = np.log(prob)
  27. prob = np.nan_to_num(prob) # for numerical stability
  28. # cross-entropy loss value. Remember, larger means better, so we do not multiply -1 here.
  29. return np.mean(prob[range(prob.shape[0]), label])
  30. def recognition_pairs_metric(names, x, *args):
  31. """ Recognition Pairs Metric """
  32. # get register embedding and test embedding
  33. output_name, label = args[-1], args[0]
  34. embedding = x[output_name]
  35. register, test = embedding[::2], embedding[1::2]
  36. # cosine similarity & acc
  37. register, test = normalize(register), normalize(test)
  38. cosine_similarity = np.diag(np.matmul(register, test.T))
  39. best_acc, best_threshold = 0.0, 0.0
  40. for threshold in np.linspace(0, 1, 20):
  41. pred = cosine_similarity > threshold
  42. tp = np.sum(np.logical_and(pred, label))
  43. tn = np.sum(np.logical_and(np.logical_not(pred), np.logical_not(label)))
  44. acc = 1.0 * (tp + tn) / len(label)
  45. if acc > best_acc:
  46. best_acc, best_threshold = acc, threshold
  47. return best_acc
  48. def recognition_before_after_metric(names, x, *args):
  49. """ Compare Output Before And After Quantization"""
  50. # get data
  51. output_name = args[-1]
  52. output_before_quantization = args[0]
  53. before_embedding = output_before_quantization[output_name]
  54. after_embedding = x[output_name]
  55. if before_embedding.ndim > 2:
  56. before_embedding = before_embedding.reshape(before_embedding.shape[0], -1)
  57. if after_embedding.ndim > 2:
  58. after_embedding = after_embedding.reshape(after_embedding.shape[0], -1)
  59. # A face's similarity must be close to 1.0 before and after quantization
  60. before_embedding, after_embedding = normalize(before_embedding), normalize(after_embedding)
  61. cosine_similarity = np.diag(np.matmul(before_embedding, after_embedding.T))
  62. thresholds = [0.85, 0.875, 0.90, 0.925, 0.95, 0.975]
  63. ave_acc = 0.0
  64. for threshold in thresholds:
  65. ave_acc += 1.0 * np.sum(cosine_similarity > threshold) / len(cosine_similarity)
  66. ave_acc /= len(thresholds)
  67. return ave_acc
  68. def classification_top_1_acc(names, x, *args):
  69. """ classification top 1 acc """
  70. # prob
  71. output_name = args[-1]
  72. label = args[0]
  73. prob = x[output_name].squeeze()
  74. if prob.ndim == 1:
  75. prob = prob[None, :]
  76. # soft-max
  77. prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
  78. prob = np.exp(prob)
  79. prob /= np.sum(prob, axis=-1, keepdims=True)
  80. # top 1 acc
  81. target = prob.argmax(axis=-1)
  82. acc = np.mean(target == label)
  83. return acc
  84. def classification_top_5_acc(names, x, *args):
  85. """ classification top 5 acc """
  86. # prob
  87. output_name = args[-1]
  88. label = args[0]
  89. prob = x[output_name].squeeze()
  90. if prob.ndim == 1:
  91. prob = prob[None, :]
  92. # soft-max
  93. prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
  94. prob = np.exp(prob)
  95. prob /= np.sum(prob, axis=-1, keepdims=True)
  96. # top 5 acc
  97. index = np.argsort(prob, axis=-1)[:, -5:]
  98. tile_label = np.tile(label, (5, 1)).T
  99. acc = 1.0 * np.sum(index == tile_label) / len(label)
  100. return acc
  101. def cos_similarity_metric(names, x, *args):
  102. output_name = args[-1]
  103. before_emb = args[0][output_name]
  104. after_emb = x[output_name]
  105. if np.isnan(np.min(after_emb)) or np.isinf(np.min(after_emb)) or np.isinf(np.max(after_emb)):
  106. return 0.0
  107. if before_emb.ndim == 1:
  108. before_emb = before_emb[None, ...]
  109. if after_emb.ndim == 1:
  110. after_emb = after_emb[None, ...]
  111. if before_emb.ndim > 2:
  112. before_emb = before_emb.reshape(before_emb.shape[0], -1)
  113. if after_emb.ndim > 2:
  114. after_emb = after_emb.reshape(after_emb.shape[0], -1)
  115. before_emb, after_emb = normalize(before_emb), normalize(after_emb)
  116. sim = (before_emb * after_emb).sum(axis=1).mean()
  117. return sim
  118. last_sim_mean, last_sim_std = None, None
  119. def cos_similarity_mean_std_metric(names, x, *args):
  120. global last_sim_mean, last_sim_std
  121. output_name = args[-1]
  122. before_emb = args[0][output_name]
  123. after_emb = x[output_name]
  124. if before_emb.ndim == 1:
  125. before_emb = before_emb[None, ...]
  126. if after_emb.ndim == 1:
  127. after_emb = after_emb[None, ...]
  128. if before_emb.ndim > 2:
  129. before_emb = before_emb.reshape(before_emb.shape[0], -1)
  130. if after_emb.ndim > 2:
  131. after_emb = after_emb.reshape(after_emb.shape[0], -1)
  132. before_emb, after_emb = normalize(before_emb), normalize(after_emb)
  133. sim = (before_emb * after_emb).sum(axis=1)
  134. sim_mean, sim_std = sim.mean(), sim.std()
  135. if last_sim_mean is None and last_sim_std is None:
  136. last_sim_mean, last_sim_std = sim_mean, sim_std
  137. mean_loss, std_loss = sim_mean - last_sim_mean, last_sim_std - sim_std
  138. loss = mean_loss + std_loss
  139. return loss
  140. def mse_metric(names, x, *args):
  141. output_name = args[-1]
  142. before_output = args[0][output_name]
  143. after_output = x[output_name]
  144. mse = ((before_output - after_output) ** 2).mean()
  145. return -1 * mse
  146. def yolo_metric(names, x, *args):
  147. num_outputs = 3 # YOLO has 3 outputs
  148. output_names = args[-num_outputs:]
  149. sim = 0.0
  150. for name in output_names:
  151. src_data = args[0][name]
  152. dst_data = x[name]
  153. if np.isnan(np.min(dst_data)) or np.isinf(np.min(dst_data)) or np.isinf(np.max(dst_data)):
  154. return 0.0
  155. n = src_data.shape[0]
  156. src_xyxy = src_data[..., :4].reshape(n, -1) # [x, y, x, y, obj, cls]
  157. dst_xyxy = dst_data[..., :4].reshape(n, -1)
  158. src_obj = src_data[..., 4].reshape(n, -1)
  159. dst_obj = dst_data[..., 4].reshape(n, -1)
  160. src_cls = src_data[..., 5:].reshape(n, -1)
  161. dst_cls = dst_data[..., 5:].reshape(n, -1)
  162. sim_xyxy = (normalize(src_xyxy) * normalize(dst_xyxy)).sum(axis=1).mean()
  163. sim_obj = (normalize(src_obj) * normalize(dst_obj)).sum(axis=1).mean()
  164. sim_cls = (normalize(src_cls) * normalize(dst_cls)).sum(axis=1).mean()
  165. sim += 0.3 * sim_xyxy + 0.3 * sim_obj + 0.4 * sim_cls
  166. print(name, sim_xyxy, sim_obj, sim_cls)
  167. sim /= 3
  168. return sim
  169. """
  170. You Can Add Your Custom Metric Here.
  171. """
  172. def yolo_metric_hjj(names, x, *args):
  173. num_outputs = 3 # YOLO has 3 outputs
  174. output_names = args[-num_outputs:]
  175. sim = 0.0
  176. for name in output_names:
  177. src_data = args[0][name]
  178. dst_data = x[name]
  179. # print(src_data.shape, dst_data.shape)
  180. if np.isnan(np.min(dst_data)) or np.isinf(np.min(dst_data)) or np.isinf(np.max(dst_data)):
  181. return 0.0
  182. n, _, h, w = src_data.shape
  183. # src_data = np.transpose(src_data, axes=[0, 2, 1])
  184. # dst_data = np.transpose(dst_data, axes=[0, 2, 1])
  185. src_xyxy = src_data[:, :4].reshape(n, -1) # [x, y, x, y, obj, cls]
  186. dst_xyxy = dst_data[:, :4].reshape(n, -1)
  187. src_cls = src_data[:, 4:].reshape(n, -1)
  188. dst_cls = dst_data[:, 4:].reshape(n, -1)
  189. # sim_xyxy = (normalize(src_xyxy) * normalize(dst_xyxy)).sum(axis=1).mean()
  190. # sim_cls = (normalize(src_cls) * normalize(dst_cls)).sum(axis=1).mean()
  191. sim_xyxy = -np.abs(normalize(src_xyxy) - normalize(dst_xyxy)).sum(axis=1).mean()
  192. sim_cls = -np.abs(normalize(src_cls) - normalize(dst_cls)).sum(axis=1).mean()
  193. sim += 0.6 * sim_xyxy + 0.4 * sim_cls
  194. # print(name, sim_xyxy, sim_obj, sim_cls)
  195. return sim / num_outputs