RSNA abstract checkpoint

This commit is contained in:
Stefan 2022-05-06 15:29:01 +02:00
parent c2971b0fed
commit 18cceb9791
3 changed files with 58 additions and 45 deletions

View File

@ -24,7 +24,7 @@ if args.comparison:
colors = ['r','r','b','b','g','g','y','y']
plot_type = ['-','--','-','--','-','--','-','--']
else:
colors = ['r','b','g','k','y','c']
colors = ['r','b','k','g','c','y']
plot_type = ['-','-','-','-','-','-']
yaml_metric = args.yaml_metric
@ -38,61 +38,61 @@ sensitivity = []
fig = plt.figure(1)
ax = fig.add_subplot(111)
False_possitives_mean = np.linspace(0, 5, 200)
for idx in range(len(args.experiment)):
False_possitives_mean = np.linspace(0, 2.5, 200)
for fold in range(5):
print('fold:',fold)
paufroc = []
for fold in range(4):
# print('fold:',fold)
fold = fold + 1
experiment_metrics = {}
experiment_path = f'./../train_output/{experiments[idx]}_{fold}/froc_metrics_{yaml_metric}.yml'
experiment_metrics = read_yaml_to_dict(experiment_path)
pfroc = partial_auc(experiment_metrics["sensitivity"],experiment_metrics["FP_per_case"],low=0.1, high=2.5)
pfroc = partial_auc(experiment_metrics["sensitivity"],experiment_metrics["FP_per_case"],low=0.1, high=5)
paufroc.append(round(pfroc,2))
False_possitives.append(experiment_metrics["FP_per_case"])
sensitivity_ = np.interp(False_possitives_mean,experiment_metrics["FP_per_case"],experiment_metrics["sensitivity"])
sensitivity.append(sensitivity_)
print(f'pfROC van {experiments[idx]}: {paufroc}')
# calculate mean and std
sensitivity_mean = np.squeeze(np.mean(sensitivity,axis=0))
sensitivity_std = np.multiply(np.squeeze(np.std(sensitivity,axis=0)),2)
plt.plot(False_possitives_mean, sensitivity_mean,color=colors[idx],linestyle=plot_type[idx])
plt.fill_between(False_possitives_mean, np.subtract(sensitivity_mean,sensitivity_std), np.add(sensitivity_mean,sensitivity_std))
plt.fill_between(False_possitives_mean, np.subtract(sensitivity_mean,sensitivity_std), np.add(sensitivity_mean,sensitivity_std),alpha=0.15,color=colors[idx],)
ax.set(xscale="log")
ax.axes.xaxis.set_minor_locator(tkr.LogLocator(base=10, subs='all'))
ax.axes.xaxis.set_minor_formatter(tkr.NullFormatter())
ax.axes.xaxis.set_major_formatter(tkr.ScalarFormatter())
ax.axes.grid(True, which="both", ls="--", c='#d3d3d3')
ax.axes.set_xlim(left=0, right=2.5)
ax.axes.xaxis.set_major_locator(tkr.FixedLocator([0,0.1,0.5,1,2.5]))
ax.axes.set_xlim(left=0.1, right=5)
ax.axes.xaxis.set_major_locator(tkr.FixedLocator([0.1,0.5,1,5]))
fpr = []
tpr = []
fpr_mean = np.linspace(0, 1, 200)
for idx in range(len(args.experiment)):
experiment_path = f'./../train_output/{experiments[idx]}/froc_metrics_{yaml_metric}.yml'
experiment_metrics = read_yaml_to_dict(experiment_path)
auroc.append(round(experiment_metrics['auroc'],3))
fpr_mean = np.linspace(0, 1, 200)
for fold in range(5):
auroc = []
for fold in range(4):
fold= fold + 1
print('fold:',fold)
experiment_metrics = {}
experiment_path = f'./../train_output/{experiments[idx]}_{fold}/froc_metrics_{yaml_metric}.yml'
experiment_metrics = read_yaml_to_dict(experiment_path)
# pfroc = partial_auc(experiment_metrics["tpr"],experiment_metrics["fpr"],low=0.1, high=2.5)
paufroc.append(round(pfroc,2))
auroc.append(round(experiment_metrics['auroc'],3))
fpr.append(experiment_metrics["fpr"])
tpr_ = np.interp(fpr_mean,experiment_metrics["fpr"],experiment_metrics["tpr"])
tpr.append(tpr_)
print(f'auROC van {experiments[idx]}: {auroc}')
tpr_mean = np.squeeze(np.mean(tpr,axis=0))
tpr_std = np.multiply(np.squeeze(np.std(tpr,axis=0)),2)
plt.figure(2)
plt.plot(fpr_mean, tpr_mean,color=colors[idx],linestyle=plot_type[idx])
plt.fill_between(fpr_mean, np.subtract(tpr_mean,tpr_std), np.add(tpr_mean,tpr_std))
plt.fill_between(fpr_mean, np.subtract(tpr_mean,tpr_std), np.add(tpr_mean,tpr_std),alpha=0.15,color=colors[idx],)
print(auroc)
experiments = [exp.replace('train_10h_', '') for exp in experiments]
@ -107,7 +107,9 @@ plt.figure(1)
plt.title('fROC curve')
plt.xlabel('False positive per case')
plt.ylabel('Sensitivity')
plt.legend(experiments_paufroc,loc='lower right')
# plt.legend(experiments_paufroc,loc='lower right')
plt.legend(['calculated with b50-400','calculated with b50-800'],loc='lower right')
# plt.xlim([0,50])
plt.grid()
plt.ylim([0,1])
@ -119,8 +121,11 @@ experiments_auroc = list(map(concat_func,experiments,auroc)) # list the map func
plt.figure(2)
plt.title('ROC curve')
plt.legend(experiments_auroc,loc='lower right')
# plt.legend(experiments_auroc,loc='lower right')
plt.legend(['calculated with b50-400','calculated with b50-800'],loc='lower right')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.ylim([0,1])
plt.xlim([0,1])
plt.grid()
plt.savefig(f"./../train_output/ROC_{args.saveas}.png", dpi=300)

View File

@ -48,13 +48,18 @@ MODEL_PATH = f'./../train_output/{EXPERIMENT}_{series_}_{fold}/models/{EXPERIMEN
YAML_DIR = f'./../train_output/{EXPERIMENT}_{series_}_{fold}'
IMAGE_DIR = f'./../train_output/{EXPERIMENT}_{series_}_{fold}'
# MODEL_PATH = f'./../train_output/{EXPERIMENT}_{series_}/models/{EXPERIMENT}_{series_}.h5'
# YAML_DIR = f'./../train_output/{EXPERIMENT}_{series_}'
# IMAGE_DIR = f'./../train_output/{EXPERIMENT}_{series_}'
DATA_DIR = "./../data/Nijmegen paths/"
TARGET_SPACING = (0.5, 0.5, 3)
INPUT_SHAPE = (192, 192, 24, len(SERIES))
IMAGE_SHAPE = INPUT_SHAPE[:3]
DATA_SPLIT_INDEX = read_yaml_to_dict(f'./../data/Nijmegen paths/train_val_test_idxs_{fold}.yml')
TEST_INDEX = DATA_SPLIT_INDEX['val_set0']
# DATA_SPLIT_INDEX = read_yaml_to_dict(f'./../data/Nijmegen paths/train_val_test_idxs.yml')
TEST_INDEX = DATA_SPLIT_INDEX['test_set0']
N_CPUS = 12
@ -168,7 +173,7 @@ predictions = zeros
# perform Froc
metrics = evaluate(y_true=segmentations, y_pred=predictions)
dump_dict_to_yaml(metrics, YAML_DIR, "froc_metrics_focal_10", verbose=True)
dump_dict_to_yaml(metrics, YAML_DIR, "froc_metrics_focal_10_test", verbose=True)
############## save image as example #################

View File

@ -1,24 +1,27 @@
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import seaborn as sns
import matplotlib.ticker as tkr
from p_auc import partial_auc
x = [0,0.11,0.23,0.5,0.90,1.00,1.500,3]
y = [0,0.02,0.09,1,2,3,4,12]
from scipy import stats
import numpy as np
# siemens_froc = [1.68,1.81,1.44,1.55]
# b400_froc = [3.4,3.93,2.82,]
# b800_froc = [1.58,1.99,1.36,1.6]
tick_spacing = 1
# siemens_roc = [0.782, 0.732, 0.775, 0.854]
b400_roc = [0.746, 0.814, 0.789, 0.763]
b800_roc = [0.786, 0.731, 0.67, 0.782]
fig1, ax1 = plt.subplots(1,1)
ax1.plot(x,y)
pauc = partial_auc(x,y)
print(pauc)
# stat_test = stats.wilcoxon(siemens_froc,b800_froc,alternative='less')
# print('froc stats siemens > b400',stat_test)
# print(' Mean and std siemens froc:', np.mean(siemens_froc),'+-',np.std(siemens_froc))
# print(' Mean and std b400 froc:', np.mean(b400_froc),'+-',np.std(b400_froc))
# print(' Mean and std b800 froc:', np.mean(b800_froc),'+-',np.std(b800_froc))
# ax.set_xticks([0,100,1500])
# ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
# ax1.set(xscale="log")
# ax1.xaxis.set_minor_locator(tkr.LogLocator(base=10, subs='all'))
# ax1.xaxis.set_minor_formatter(tkr.NullFormatter())
# ax1.xaxis.set_major_formatter(tkr.ScalarFormatter())
# ax1.grid(True, which="both", ls="--", c='#d3d3d3')
# ax1.set_xlim(left=0, right=150)
# ax1.xaxis.set_major_locator(tkr.FixedLocator([0,1,3]))
# print(' Mean and std siemens roc:', np.mean(siemens_roc),'+-',np.std(siemens_roc))
print(' Mean and std b400 roc:', np.mean(b400_roc),'+-',np.std(b400_roc))
print(' Mean and std b800 roc:', np.mean(b800_roc),'+-',np.std(b800_roc))
# The test has been introduced in [4]. Given n independent samples (xi, yi) from a bivariate distribution
# (i.e. paired samples), it computes the differences di = xi - yi. One assumption of the test is that the
# differences are symmetric, see [2]. The two-sided test has the null hypothesis that the median of the
# differences is zero against the alternative that it is different from zero. The one-sided test has the
# null hypothesis that the median is positive against the alternative that it is negative
# (alternative == 'less'), or vice versa (alternative == 'greater.').