push before migration

This commit is contained in:
Stefan
2023-03-28 14:48:28 +02:00
parent 49b18fe7f0
commit 9468dadfa3
195 changed files with 645 additions and 18 deletions

View File

@@ -24,7 +24,7 @@ if args.comparison:
colors = ['b','c','r','m','g','g','y','y']
plot_type = ['-','-','-','--','-','--','-','--']
else:
colors = ['g','b','r','g','k','c','y']
colors = ['b','r','g','k','c','y']
plot_type = ['-','-','-','-','-','-']
@@ -40,7 +40,7 @@ sensitivity = []
fig = plt.figure(1)
ax = fig.add_subplot(111)
False_possitives_mean = np.linspace(0, 2.5, 200)
False_possitives_mean = np.linspace(0.1, 1, 200)
for idx in range(len(args.experiment)):
paufroc = []
@@ -54,7 +54,7 @@ for idx in range(len(args.experiment)):
experiment_metrics = {}
experiment_path = f'./../train_output/{experiments[idx]}_{fold}/froc_metrics_{yaml_metric}.yml'
experiment_metrics = read_yaml_to_dict(experiment_path)
pfroc = partial_auc(experiment_metrics["sensitivity"],experiment_metrics["FP_per_case"],low=0.1, high=2.5)
pfroc = partial_auc(experiment_metrics["sensitivity"],experiment_metrics["FP_per_case"],low=0.0, high=1)
paufroc.append(round(pfroc,2))
False_possitives.append(experiment_metrics["FP_per_case"])
sensitivity_ = np.interp(False_possitives_mean,experiment_metrics["FP_per_case"],experiment_metrics["sensitivity"])
@@ -73,8 +73,8 @@ for idx in range(len(args.experiment)):
ax.axes.xaxis.set_major_formatter(tkr.ScalarFormatter())
ax.axes.get_xaxis()
ax.axes.get_yaxis()
ax.axes.set_xlim(left=0.1, right=2.5)
ax.axes.xaxis.set_major_locator(tkr.FixedLocator([0.1,0.5,1,2.5]))
ax.axes.set_xlim(left=0.1, right=1)
ax.axes.xaxis.set_major_locator(tkr.FixedLocator([0.1,0.5,1]))
fpr = []
tpr = []
@@ -119,8 +119,8 @@ plt.xlabel('False positive lesions', fontsize=18)
plt.ylabel('Sensitivity', fontsize=18)
# plt.legend(experiments_paufroc,loc='lower right')
# plt.legend(['$T2_{tra}$ $ADC_{b50-b400}$ $b1400_{b50-b400}$','$T2_{tra}$ $ADC_{b50-b800}$ $b1400_{b50-b800}$','$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$'],loc='lower right')
# plt.legend(['$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$','$T2_{tra}$ b50 b400 b800'],loc='lower right')
plt.legend(["All b-values","Omitting b800","Omitting b400"],loc='lower right',fontsize=16)
plt.legend(['$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$','$T2_{tra}$ b50 b400 b800'],loc='lower right')
# plt.legend(["All b-values","Omitting b800","Omitting b400"],loc='lower right',fontsize=16)
# plt.xlim([0,50])
plt.grid()
@@ -134,9 +134,9 @@ experiments_auroc = list(map(concat_func,experiments,auroc)) # list the map func
plt.figure(2)
plt.title('ROC curve',fontsize=20)
# plt.legend(experiments_auroc,loc='lower right')
# plt.legend(['$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$','$T2_{tra}$ b50 b400 b800'],loc='lower right')
plt.legend(['$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$','$T2_{tra}$ b50 b400 b800'],loc='lower right')
# plt.legend(['$T2_{tra}$ $ADC_{b50-b400}$ $b1400_{b50-b400}$','$T2_{tra}$ $ADC_{b50-b800}$ $b1400_{b50-b800}$','$T2_{tra}$ $ADC_{b50-b400-b800}$ $b1400_{b50-b400-b800}$'],loc='lower right')
plt.legend(["All b-values","Omitting b800","Omitting b400"],loc='lower right',fontsize=16)
# plt.legend(["All b-values","Omitting b800","Omitting b400"],loc='lower right',fontsize=16)
plt.xlabel('False positive rate',fontsize=18)
plt.ylabel('True positive rate',fontsize=18)

View File

@@ -29,8 +29,8 @@ for idx,experiment in enumerate(experiments):
IMAGES_DIR = f'./../train_output/{experiment}/images_list_new.npy' #_new23
SEGMENTATION_DIR = f'./../train_output/{experiment}/segmentations_new.npy' #_new23
predictions_DIR = f'./../train_output/{experiment}/predictions_new.npy' #_new23
SLIDE = 10 #pat 371
# SLIDE = 7 #pat 23
SLIDE = 10 #pat_idx 371 = pat0623
# SLIDE = 7 #pat_idx ?? = pat023
########## load saliency map ############
heatmap = np.load(SALIENCY_DIR)

View File

@@ -79,6 +79,8 @@ for fold in range(5):
# print_p("pat_idx:",pat_id)
# print(image_paths['t2'][])
# input('check?')
# Read and preprocess each of the paths for each series, and the segmentations.
# print('images number',[TEST_INDEX[img_idx]])
img_s = {f'{s}': sitk.ReadImage(image_paths[s][img_idx], sitk.sitkFloat32) for s in SERIES}
@@ -147,3 +149,5 @@ for fold in range(5):
sorted_differences = {k: v for k, v in sorted(difference.items(), key=lambda item: item[1])}
print_p('>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print_p(sorted_differences)
print_p(sorted_differences[::-1])

View File

@@ -104,4 +104,5 @@ if __name__ == '__main__':
if type(data_dict[key]) == list:
print(f"{key}: {len(data_dict[key])}")
dump_dict_to_yaml(data_dict, "./../data", filename=f"train_val_test_idxs_0", verbose=False)
dump_dict_to_yaml(data_dict, "./../data", filename=f"train_val_test_idxs_0", verbose=False)

View File

@@ -99,3 +99,14 @@ for img_idx in tqdm(range(num_images)):
# append_new_line('t2calc.txt',path_t2)
# append_new_line('segcalc.txt',path_seg)
IMAGE_DICOM = sitk.ReadImage(IMAGE_PATH, sitk.sitkFloat32)
IMAGE_DICOM_arr = sitk.GetArrayFromImage(IMAGE_DICOM)
rotation / flip
IMAGE_DICOM_arr.origin
IMAGE_RECON = sitk.ReadImage(IMAGE_PATH, sitk.sitkFloat32)
# IMAGE_arr = sitk.GetArrayFromImage(IMAGE)
IMAGE_RECON.CopyInformation(IMAGE_DICOM)
sitk.WriteImage(IMAGE_RECON, IMAGE_PATH_NEW)

View File

@@ -1,31 +1,65 @@
from glob import glob
from os.path import normpath, basename
import SimpleITK as sitk
import numpy as np
import os
from os import path
from sfransen.utils_quintin import *
from sfransen.DWI_exp.helpers import *
from sfransen.DWI_exp.preprocessing_function import preprocess
from sfransen.DWI_exp.callbacks import dice_coef
#from sfransen.FROC.blob_preprocess import *
from sfransen.FROC.cal_froc_from_np import *
from sfransen.load_images import load_images_parrallel
from sfransen.DWI_exp.losses import weighted_binary_cross_entropy
from umcglib.froc import *
from umcglib.binarize import dynamic_threshold
from tensorflow.keras.models import load_model
def get_paths(main_dir):
all_niftis = glob(main_dir, recursive=True)
dwis_b800 = [i for i in all_niftis if ("diff" in i.lower() or "dwi" in i.lower()) and ("b-800" in i.lower() or "b800" in i.lower())]
dwis_b400 = [i for i in all_niftis if ("diff" in i.lower() or "dwi" in i.lower()) and ("b-400" in i.lower() or "b400" in i.lower())]
return dwis_b800, dwis_b400
pat_numbers = ['pat0132','pat0091','pat0352','pat0844','pat1006','pat0406','pat0128','pat0153','pat0062','pat0758','pat0932','pat0248','pat0129','pat0429','pat0181','pat0063','pat0674','pat0176','pat0366','pat0082']
def get_paths_seg(main_dir):
seg = glob(main_dir, recursive=True)
return seg
def get_paths_train(dir,SERIES,pat_id):
image_path = {}
for s in SERIES:
with open(path.join(dir, f"{s}.txt"), 'r') as f:
image_paths = [l.strip() for l in f.readlines()]
image_path[s] = [i for i in image_paths if pat_id in i]
return image_path
pat_numbers_worst = ['pat0132','pat0091','pat0352','pat0844','pat1006','pat0406','pat0128','pat0153','pat0062','pat0758','pat0932','pat0248','pat0129','pat0429','pat0181','pat0063','pat0674','pat0176','pat0366','pat0082']
pat_numbers_best = ['pat0651','pat0889','pat0448','pat1022','pat0887','pat0194','pat0603','pat0742','pat0811','pat0489','pat0622','pat0582','pat0105','pat0084','pat0643','pat0529','pat0476','pat0514','pat0506','pat0567']
pat_numbers_worst = ['pat0132', 'pat0091','pat0352','pat0844','pat1006','pat0636','pat1009','pat0584','pat0588','pat0198']
load_path = '../../datasets/radboud_new/{pat_number}/2016/**/*.nii.gz'
for idx, pat_number in enumerate(pat_numbers):
for idx, pat_number in enumerate(pat_numbers_worst):
print(pat_number)
dwis_b800,dwis_b400 = get_paths(f'../../datasets/radboud_new/{pat_number}/2016/**/*.nii.gz')
seg_path = get_paths_seg(f'/data/pca-rad/datasets/radboud_lesions_2022/{pat_number}*.nii.gz')
# load
dwi_b800 = sitk.ReadImage(dwis_b800, sitk.sitkFloat32)
dwi_b400 = sitk.ReadImage(dwis_b400, sitk.sitkFloat32)
seg = sitk.ReadImage(seg_path, sitk.sitkFloat32)
seg = sitk.GetArrayFromImage(seg)
print('count:', np.sum(np.clip(seg,0,1)))
# write
output_path_b800 = f'../temp/check_by_derya/{idx}_{pat_number}_b800.nii.gz'
output_path_b400 = f'../temp/check_by_derya/{idx}_{pat_number}_b400.nii.gz'
output_path_b800 = f'../temp/lowest_pred_exp/worst/{idx}_{pat_number}_b800.nii.gz'
output_path_b400 = f'../temp/lowest_pred_exp/worst/{idx}_{pat_number}_b400.nii.gz'
sitk.WriteImage(dwi_b800, output_path_b800)
sitk.WriteImage(dwi_b400, output_path_b400)
###################################################################################################################

156
scripts/test4.py Executable file
View File

@@ -0,0 +1,156 @@
from inspect import _ParameterKind
import SimpleITK as sitk
import tensorflow as tf
from tensorflow.keras.models import load_model
from focal_loss import BinaryFocalLoss
import numpy as np
import multiprocessing
from functools import partial
import os
from os import path
from tqdm import tqdm
import argparse
from sfransen.utils_quintin import *
from sfransen.DWI_exp.helpers import *
from sfransen.DWI_exp.preprocessing_function import preprocess
from sfransen.DWI_exp.callbacks import dice_coef
#from sfransen.FROC.blob_preprocess import *
from sfransen.FROC.cal_froc_from_np import *
from sfransen.load_images import load_images_parrallel
from sfransen.DWI_exp.losses import weighted_binary_cross_entropy
from umcglib.froc import *
from umcglib.binarize import dynamic_threshold
def print_p(*args, **kwargs):
"""
Shorthand for print(..., flush=True)
Useful on HPC cluster where output has buffered writes.
"""
print(*args, **kwargs, flush=True)
######## CUDA ################
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
N_CPUS = 12
DATA_DIR = "./../data/Nijmegen paths/"
TARGET_SPACING = (0.5, 0.5, 3)
INPUT_SHAPE = (192, 192, 24, 3)
IMAGE_SHAPE = INPUT_SHAPE[:3]
final_table = {}
difference = {}
for fold in range(5):
if fold == 0:
TEST_INDEX = [189,84]
if fold == 1:
TEST_INDEX = [828,12]
if fold == 2:
TEST_INDEX = [470,482]
if fold == 3:
TEST_INDEX = [591]
if fold == 4:
TEST_INDEX = [511,281,149]
for img_idx in TEST_INDEX:
for model in ['b800','b400']:
image_paths = {}
predictions_added = []
segmentations_added = []
images = []
images_list = []
segmentations = []
if model is 'b800':
MODEL_PATH = f'./../train_output/calc_exp_t2_b1400calc2_adccalc2_{fold}/models/calc_exp_t2_b1400calc2_adccalc2_{fold}.h5'
# YAML_DIR = f'./../train_output/calc_exp_t2_b1400calc2_adccalc2_{fold}'
# IMAGE_DIR = f'./../train_output/calc_exp_t2_b1400calc2_adccalc2_{fold}'
SERIES = ['t2','b1400calc2','adccalc2']
if model is 'b400':
MODEL_PATH = f'./../train_output/calc_exp_t2_b1400calc3_adccalc3_{fold}/models/calc_exp_t2_b1400calc3_adccalc3_{fold}.h5'
SERIES = ['t2','b1400calc3','adccalc3']
for s in SERIES:
with open(path.join(DATA_DIR, f"{s}.txt"), 'r') as f:
image_paths[s] = [l.strip() for l in f.readlines()]
with open(path.join(DATA_DIR, f"seg.txt"), 'r') as f:
seg_paths = [l.strip() for l in f.readlines()]
num_images = len(seg_paths)
pat_id = os.path.basename(os.path.normpath(seg_paths[img_idx]))[:-7]
# print_p("pat_idx:",pat_id)
# print(image_paths['t2'][])
# input('check?')
# Read and preprocess each of the paths for each series, and the segmentations.
# print('images number',[TEST_INDEX[img_idx]])
img_s = {f'{s}': sitk.ReadImage(image_paths[s][img_idx], sitk.sitkFloat32) for s in SERIES}
seg_s = sitk.ReadImage(seg_paths[img_idx], sitk.sitkFloat32)
img_n, seg_n = preprocess(img_s, seg_s,
shape=IMAGE_SHAPE, spacing=TARGET_SPACING)
for seq in img_n:
images.append(img_n[f'{seq}'])
images_list.append(images)
images = []
segmentations.append(seg_n)
images_list = np.transpose(images_list, (0, 2, 3, 4, 1))
########### load module ##################
# print(' >>>>>>> LOAD MODEL <<<<<<<<<')
dependencies = {
'dice_coef': dice_coef,
'weighted_cross_entropy_fn':weighted_binary_cross_entropy
}
reconstructed_model = load_model(MODEL_PATH, custom_objects=dependencies)
# reconstructed_model.summary(line_length=120)
# make predictions on all TEST_INDEX
# print(' >>>>>>> START prediction <<<<<<<<<')
predictions_blur = reconstructed_model.predict(images_list, batch_size=1)
############# preprocess #################
# preprocess predictions by removing the blur and making individual blobs
# print('>>>>>>>> START preprocess')
def move_dims(arr):
# UMCG numpy dimensions convention: dims = (batch, width, heigth, depth)
# Joeran numpy dimensions convention: dims = (batch, depth, heigth, width)
arr = np.moveaxis(arr, 3, 1)
arr = np.moveaxis(arr, 3, 2)
return arr
# Joeran has his numpy arrays ordered differently.
predictions_blur = move_dims(np.squeeze(predictions_blur,axis=4))
segmentations = move_dims(segmentations)
# predictions = [preprocess_softmax(pred, threshold="dynamic")[0] for pred in predictions_blur]
predictions = predictions_blur
# print("the size of predictions is:",np.shape(predictions))
# Remove outer edges
zeros = np.zeros(np.shape(predictions))
test = predictions[:,2:-2,2:190,2:190]
zeros[:,2:-2,2:190,2:190] = test
predictions = zeros
predictions = np.squeeze(predictions)
# print('size pred:',np.shape(predictions))
pred = sitk.GetImageFromArray(predictions)
sitk.WriteImage(pred, f'../temp/lowest_pred_exp/worst/{pat_id}_{model}_pred.nii.gz')
print('shape iamge:',np.shape(images_list))
t2 = sitk.GetImageFromArray(np.squeeze(images_list)[:,:,:,0].T)
sitk.WriteImage(t2, f'../temp/lowest_pred_exp/worst/{pat_id}_{model}_t2.nii.gz')
b1400 = sitk.GetImageFromArray(np.squeeze(images_list)[:,:,:,1].T)
sitk.WriteImage(b1400, f'../temp/lowest_pred_exp/worst/{pat_id}_{model}_b1400.nii.gz')
adc = sitk.GetImageFromArray(np.squeeze(images_list)[:,:,:,2].T)
sitk.WriteImage(adc, f'../temp/lowest_pred_exp/worst/{pat_id}_{model}_adc.nii.gz')