import sys from os import path import SimpleITK as sitk import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import load_model from focal_loss import BinaryFocalLoss import json import matplotlib.pyplot as plt import numpy as np from sfransen.Saliency.base import * from sfransen.Saliency.integrated_gradients import * # from tensorflow.keras.vis.visualization import visualize_saliency sys.path.append('./../code') from utils_quintin import * sys.path.append('./../code/DWI_exp') # from preprocessing_function import preprocess from sfransen.DWI_exp import preprocess print("done step 1") from sfransen.DWI_exp.helpers import * # from helpers import * from callbacks import dice_coef sys.path.append('./../code/FROC') from blob_preprocess import * from cal_froc_from_np import * quit() # train_10h_t2_b50_b400_b800_b1400_adc SERIES = ['t2','b50','b400','b800','b1400','adc'] MODEL_PATH = f'./../train_output/train_10h_t2_b50_b400_b800_b1400_adc/models/train_10h_t2_b50_b400_b800_b1400_adc.h5' YAML_DIR = f'./../train_output/train_10h_t2_b50_b400_b800_b1400_adc' ################ constants ############ DATA_DIR = "./../data/Nijmegen paths/" TARGET_SPACING = (0.5, 0.5, 3) INPUT_SHAPE = (192, 192, 24, len(SERIES)) IMAGE_SHAPE = INPUT_SHAPE[:3] # import val_indx # DATA_SPLIT_INDEX = read_yaml_to_dict('./../data/Nijmegen paths/train_val_test_idxs.yml') # TEST_INDEX = DATA_SPLIT_INDEX['val_set0'] experiment_path = f'./../train_output/train_10h_t2_b50_b400_b800_b1400_adc/froc_metrics.yml' experiment_metrics = read_yaml_to_dict(experiment_path) DATA_SPLIT_INDEX = read_yaml_to_dict('./../data/Nijmegen paths/train_val_test_idxs.yml') TEST_INDEX = DATA_SPLIT_INDEX['val_set0'] top_10_idx = np.argsort(experiment_metrics['roc_pred'])[-10:] TEST_INDEX = [TEST_INDEX[i] for i in top_10_idx] ########## load images ############## images, image_paths = {s: [] for s in SERIES}, {} segmentations = [] print_(f"> Loading images into RAM...") for s in SERIES: with open(path.join(DATA_DIR, f"{s}.txt"), 'r') as f: image_paths[s] = [l.strip() for l in f.readlines()] with open(path.join(DATA_DIR, f"seg.txt"), 'r') as f: seg_paths = [l.strip() for l in f.readlines()] num_images = len(seg_paths) # Read and preprocess each of the paths for each SERIES, and the segmentations. for img_idx in TEST_INDEX[:5]: #for less images img_s = {s: sitk.ReadImage(image_paths[s][img_idx], sitk.sitkFloat32) for s in SERIES} seg_s = sitk.ReadImage(seg_paths[img_idx], sitk.sitkFloat32) img_n, seg_n = preprocess(img_s, seg_s, shape=IMAGE_SHAPE, spacing=TARGET_SPACING) for seq in img_n: images[seq].append(img_n[seq]) segmentations.append(seg_n) images_list = [images[s] for s in images.keys()] images_list = np.transpose(images_list, (1, 2, 3, 4, 0)) ########### load module ################## dependencies = { 'dice_coef': dice_coef } reconstructed_model = load_model(MODEL_PATH, custom_objects=dependencies) # reconstructed_model.layers[-1].activation = tf.keras.activations.linear print('START prediction') ig = IntegratedGradients(reconstructed_model) saliency_map = [] for img_idx in range(len(images_list)): # input_img = np.resize(images_list[img_idx],(1,48,48,8,8)) input_img = np.resize(images_list[img_idx],(1,192,192,24,len(SERIES))) saliency_map.append(ig.get_mask(input_img).numpy()) print("size saliency map is:",np.shape(saliency_map)) np.save('saliency',saliency_map) # Christian Roest, [11-3-2022 15:30] # input_img heeft dimensies (1, 48, 48, 8, 8) # reconstructed_model.summary(line_length=120) # make predictions on all val_indx print('START saliency') # predictions_blur = reconstructed_model.predict(images_list, batch_size=1)