fast-mri/scripts/20.saliency_exp.py

161 lines
6.0 KiB
Python
Executable File

import argparse
from os import path
import SimpleITK as sitk
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
import os
from sfransen.utils_quintin import *
from sfransen.DWI_exp import preprocess
from sfransen.DWI_exp.helpers import *
from sfransen.DWI_exp.callbacks import dice_coef
from sfransen.DWI_exp.losses import weighted_binary_cross_entropy
from sfransen.FROC.blob_preprocess import *
from sfransen.FROC.cal_froc_from_np import *
from sfransen.load_images import load_images_parrallel
from sfransen.Saliency.base import *
from sfransen.Saliency.integrated_gradients import *
parser = argparse.ArgumentParser(
description='Calculate the froc metrics and store in froc_metrics.yml')
parser.add_argument('-experiment',
help='Title of experiment')
parser.add_argument('--series', '-s',
metavar='[series_name]', required=True, nargs='+',
help='List of series to include')
args = parser.parse_args()
def print_p(*args, **kwargs):
"""
Shorthand for print(..., flush=True)
Useful on HPC cluster where output has buffered writes.
"""
print(*args, **kwargs, flush=True)
######## CUDA ################
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
######## constants #############
SERIES = args.series
series_ = '_'.join(args.series)
EXPERIMENT = args.experiment
DATA_DIR = "./../data/Nijmegen paths/"
TARGET_SPACING = (0.5, 0.5, 3)
INPUT_SHAPE = (192, 192, 24, len(SERIES))
IMAGE_SHAPE = INPUT_SHAPE[:3]
image_paths = {}
for s in SERIES:
with open(path.join(DATA_DIR, f"{s}.txt"), 'r') as f:
image_paths[s] = [l.strip() for l in f.readlines()]
with open(path.join(DATA_DIR, f"seg.txt"), 'r') as f:
seg_paths = [l.strip() for l in f.readlines()]
num_images = len(seg_paths)
max_saliency_values = []
for fold in range(5):
print_p("fold:",fold)
# model path
MODEL_PATH = f'./../train_output/{EXPERIMENT}_{series_}_{fold}/models/{EXPERIMENT}_{series_}_{fold}.h5'
YAML_DIR = f'./../train_output/{EXPERIMENT}_{series_}_{fold}'
IMAGE_DIR = f'./../train_output/{EXPERIMENT}_{series_}_{fold}'
# test indices
DATA_SPLIT_INDEX = read_yaml_to_dict(f'./../data/Nijmegen paths/train_val_test_idxs_{fold}.yml')
TEST_INDEX_IMGS = DATA_SPLIT_INDEX['test_set0']
for img_idx in TEST_INDEX_IMGS[:10]:
print_p("img_idx:",img_idx)
images = []
images_list = []
segmentations = []
saliency_map = []
# Read and preprocess each of the paths for each series, and the segmentations.
# print('images number',[TEST_INDEX[img_idx]])
img_s = {f'{s}': sitk.ReadImage(image_paths[s][img_idx], sitk.sitkFloat32) for s in SERIES}
seg_s = sitk.ReadImage(seg_paths[img_idx], sitk.sitkFloat32)
img_n, seg_n = preprocess(img_s, seg_s,
shape=IMAGE_SHAPE, spacing=TARGET_SPACING)
for seq in img_n:
images.append(img_n[f'{seq}'])
images_list.append(images)
images = []
segmentations.append(seg_n)
images_list = np.transpose(images_list, (0, 2, 3, 4, 1))
########### load module ##################
# print(' >>>>>>> LOAD MODEL <<<<<<<<<')
dependencies = {
'dice_coef': dice_coef,
'weighted_cross_entropy_fn':weighted_binary_cross_entropy
}
reconstructed_model = load_model(MODEL_PATH, custom_objects=dependencies)
# reconstructed_model.summary(line_length=120)
# make predictions on all TEST_INDEX
# print(' >>>>>>> START prediction <<<<<<<<<')
predictions_blur = reconstructed_model.predict(images_list, batch_size=1)
############# preprocess #################
# preprocess predictions by removing the blur and making individual blobs
# print('>>>>>>>> START preprocess')
def move_dims(arr):
# UMCG numpy dimensions convention: dims = (batch, width, heigth, depth)
# Joeran numpy dimensions convention: dims = (batch, depth, heigth, width)
arr = np.moveaxis(arr, 3, 1)
arr = np.moveaxis(arr, 3, 2)
return arr
# Joeran has his numpy arrays ordered differently.
predictions_blur = move_dims(np.squeeze(predictions_blur,axis=4))
segmentations = move_dims(segmentations)
# predictions = [preprocess_softmax(pred, threshold="dynamic")[0] for pred in predictions_blur]
predictions = predictions_blur
# print("the size of predictions is:",np.shape(predictions))
# Remove outer edges
zeros = np.zeros(np.shape(predictions))
test = predictions[:,2:-2,2:190,2:190]
zeros[:,2:-2,2:190,2:190] = test
predictions = zeros
# print(np.shape(predictions))
######### Build Saliency heatmap ##############
# print(' >>>>>>> Build saliency map <<<<<<<<<')
ig = IntegratedGradients(reconstructed_model)
for img_idx in range(len(images_list)):
# input_img = np.resize(images_list[img_idx],(1,192,192,24,len(SERIES)))
saliency_map.append(ig.get_mask(images_list).numpy())
# print("size saliency map",np.shape(saliency_map))
idx_max = np.argmax(np.mean(np.mean(np.mean(np.squeeze(saliency_map),axis=0),axis=0),axis=0))
max_saliency_values.append(idx_max)
print_p("max_saliency_values:",max_saliency_values)
t2_max = sum(map(lambda x : x == 0, max_saliency_values))
dwi_max = sum(map(lambda x : x == 1, max_saliency_values))
adc_max = sum(map(lambda x : x == 2, max_saliency_values))
print_p(f"max value in t2 is: {t2_max}")
print_p(f"max value in dwi is: {dwi_max}")
print_p(f"max value in adc is: {adc_max}")
# np.save(f'{YAML_DIR}/saliency_new23',saliency_map)
# np.save(f'{YAML_DIR}/images_list_new23',images_list)
# np.save(f'{YAML_DIR}/segmentations_new23',segmentations)
# np.save(f'{YAML_DIR}/predictions_new23',predictions)