123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250 |
- import numpy as np
- from sklearn.preprocessing import normalize
- def classification_acc_metric(names, x, *args):
- """ Classification Acc Metric """
- # get probability
- output_name = args[-1]
- label = args[0]
- prob = x[output_name] # (N, K)
- # soft-max
- prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
- prob = np.exp(prob)
- prob /= np.sum(prob, axis=-1, keepdims=True)
- # acc
- target = prob.argmax(axis=-1) # top 1
- acc = np.mean(target == label)
- return acc
- def classification_entropy_metric(names, x, *args):
- """ Classification Entropy Loss Metric """
- # get probability
- output_name, label = args[-1], args[0]
- prob = x[output_name]
- # log-soft-max
- prob -= np.max(prob, axis=-1, keepdims=True)
- prob = np.exp(prob)
- prob /= np.sum(prob, axis=-1, keepdims=True)
- prob = np.log(prob)
- prob = np.nan_to_num(prob) # for numerical stability
- # cross-entropy loss value. Remember, larger means better, so we do not multiply -1 here.
- return np.mean(prob[range(prob.shape[0]), label])
- def recognition_pairs_metric(names, x, *args):
- """ Recognition Pairs Metric """
- # get register embedding and test embedding
- output_name, label = args[-1], args[0]
- embedding = x[output_name]
- register, test = embedding[::2], embedding[1::2]
- # cosine similarity & acc
- register, test = normalize(register), normalize(test)
- cosine_similarity = np.diag(np.matmul(register, test.T))
- best_acc, best_threshold = 0.0, 0.0
- for threshold in np.linspace(0, 1, 20):
- pred = cosine_similarity > threshold
- tp = np.sum(np.logical_and(pred, label))
- tn = np.sum(np.logical_and(np.logical_not(pred), np.logical_not(label)))
- acc = 1.0 * (tp + tn) / len(label)
- if acc > best_acc:
- best_acc, best_threshold = acc, threshold
- return best_acc
- def recognition_before_after_metric(names, x, *args):
- """ Compare Output Before And After Quantization"""
- # get data
- output_name = args[-1]
- output_before_quantization = args[0]
- before_embedding = output_before_quantization[output_name]
- after_embedding = x[output_name]
- if before_embedding.ndim > 2:
- before_embedding = before_embedding.reshape(before_embedding.shape[0], -1)
- if after_embedding.ndim > 2:
- after_embedding = after_embedding.reshape(after_embedding.shape[0], -1)
- # A face's similarity must be close to 1.0 before and after quantization
- before_embedding, after_embedding = normalize(before_embedding), normalize(after_embedding)
- cosine_similarity = np.diag(np.matmul(before_embedding, after_embedding.T))
- thresholds = [0.85, 0.875, 0.90, 0.925, 0.95, 0.975]
- ave_acc = 0.0
- for threshold in thresholds:
- ave_acc += 1.0 * np.sum(cosine_similarity > threshold) / len(cosine_similarity)
- ave_acc /= len(thresholds)
- return ave_acc
- def classification_top_1_acc(names, x, *args):
- """ classification top 1 acc """
- # prob
- output_name = args[-1]
- label = args[0]
- prob = x[output_name].squeeze()
- if prob.ndim == 1:
- prob = prob[None, :]
- # soft-max
- prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
- prob = np.exp(prob)
- prob /= np.sum(prob, axis=-1, keepdims=True)
- # top 1 acc
- target = prob.argmax(axis=-1)
- acc = np.mean(target == label)
- return acc
- def classification_top_5_acc(names, x, *args):
- """ classification top 5 acc """
- # prob
- output_name = args[-1]
- label = args[0]
- prob = x[output_name].squeeze()
- if prob.ndim == 1:
- prob = prob[None, :]
- # soft-max
- prob -= np.max(prob, axis=-1, keepdims=True) # avoid numerical issues
- prob = np.exp(prob)
- prob /= np.sum(prob, axis=-1, keepdims=True)
- # top 5 acc
- index = np.argsort(prob, axis=-1)[:, -5:]
- tile_label = np.tile(label, (5, 1)).T
- acc = 1.0 * np.sum(index == tile_label) / len(label)
- return acc
- def cos_similarity_metric(names, x, *args):
- output_name = args[-1]
- before_emb = args[0][output_name]
- after_emb = x[output_name]
- if np.isnan(np.min(after_emb)) or np.isinf(np.min(after_emb)) or np.isinf(np.max(after_emb)):
- return 0.0
- if before_emb.ndim == 1:
- before_emb = before_emb[None, ...]
- if after_emb.ndim == 1:
- after_emb = after_emb[None, ...]
- if before_emb.ndim > 2:
- before_emb = before_emb.reshape(before_emb.shape[0], -1)
- if after_emb.ndim > 2:
- after_emb = after_emb.reshape(after_emb.shape[0], -1)
- before_emb, after_emb = normalize(before_emb), normalize(after_emb)
- sim = (before_emb * after_emb).sum(axis=1).mean()
- return sim
- last_sim_mean, last_sim_std = None, None
- def cos_similarity_mean_std_metric(names, x, *args):
- global last_sim_mean, last_sim_std
- output_name = args[-1]
- before_emb = args[0][output_name]
- after_emb = x[output_name]
- if before_emb.ndim == 1:
- before_emb = before_emb[None, ...]
- if after_emb.ndim == 1:
- after_emb = after_emb[None, ...]
- if before_emb.ndim > 2:
- before_emb = before_emb.reshape(before_emb.shape[0], -1)
- if after_emb.ndim > 2:
- after_emb = after_emb.reshape(after_emb.shape[0], -1)
- before_emb, after_emb = normalize(before_emb), normalize(after_emb)
- sim = (before_emb * after_emb).sum(axis=1)
- sim_mean, sim_std = sim.mean(), sim.std()
- if last_sim_mean is None and last_sim_std is None:
- last_sim_mean, last_sim_std = sim_mean, sim_std
- mean_loss, std_loss = sim_mean - last_sim_mean, last_sim_std - sim_std
- loss = mean_loss + std_loss
- return loss
- def mse_metric(names, x, *args):
- output_name = args[-1]
- before_output = args[0][output_name]
- after_output = x[output_name]
- mse = ((before_output - after_output) ** 2).mean()
- return -1 * mse
- def yolo_metric(names, x, *args):
- num_outputs = 3 # YOLO has 3 outputs
- output_names = args[-num_outputs:]
- sim = 0.0
- for name in output_names:
- src_data = args[0][name]
- dst_data = x[name]
- if np.isnan(np.min(dst_data)) or np.isinf(np.min(dst_data)) or np.isinf(np.max(dst_data)):
- return 0.0
- n = src_data.shape[0]
- src_xyxy = src_data[..., :4].reshape(n, -1) # [x, y, x, y, obj, cls]
- dst_xyxy = dst_data[..., :4].reshape(n, -1)
- src_obj = src_data[..., 4].reshape(n, -1)
- dst_obj = dst_data[..., 4].reshape(n, -1)
- src_cls = src_data[..., 5:].reshape(n, -1)
- dst_cls = dst_data[..., 5:].reshape(n, -1)
- sim_xyxy = (normalize(src_xyxy) * normalize(dst_xyxy)).sum(axis=1).mean()
- sim_obj = (normalize(src_obj) * normalize(dst_obj)).sum(axis=1).mean()
- sim_cls = (normalize(src_cls) * normalize(dst_cls)).sum(axis=1).mean()
- sim += 0.3 * sim_xyxy + 0.3 * sim_obj + 0.4 * sim_cls
- print(name, sim_xyxy, sim_obj, sim_cls)
- sim /= 3
- return sim
- """
- You Can Add Your Custom Metric Here.
- """
- def yolo_metric_hjj(names, x, *args):
- num_outputs = 3 # YOLO has 3 outputs
- output_names = args[-num_outputs:]
- sim = 0.0
- for name in output_names:
- src_data = args[0][name]
- dst_data = x[name]
- # print(src_data.shape, dst_data.shape)
-
- if np.isnan(np.min(dst_data)) or np.isinf(np.min(dst_data)) or np.isinf(np.max(dst_data)):
- return 0.0
- n, _, h, w = src_data.shape
- # src_data = np.transpose(src_data, axes=[0, 2, 1])
- # dst_data = np.transpose(dst_data, axes=[0, 2, 1])
-
- src_xyxy = src_data[:, :4].reshape(n, -1) # [x, y, x, y, obj, cls]
- dst_xyxy = dst_data[:, :4].reshape(n, -1)
- src_cls = src_data[:, 4:].reshape(n, -1)
- dst_cls = dst_data[:, 4:].reshape(n, -1)
- # sim_xyxy = (normalize(src_xyxy) * normalize(dst_xyxy)).sum(axis=1).mean()
- # sim_cls = (normalize(src_cls) * normalize(dst_cls)).sum(axis=1).mean()
-
- sim_xyxy = -np.abs(normalize(src_xyxy) - normalize(dst_xyxy)).sum(axis=1).mean()
- sim_cls = -np.abs(normalize(src_cls) - normalize(dst_cls)).sum(axis=1).mean()
-
- sim += 0.6 * sim_xyxy + 0.4 * sim_cls
- # print(name, sim_xyxy, sim_obj, sim_cls)
- return sim / num_outputs
|