Latest DICOM to NIfTI code

This commit is contained in:
Ananthi Somasundaram 2019-11-27 15:17:31 +01:00
parent f1730cdf1d
commit d761866278
129 changed files with 1238 additions and 152 deletions

View File

@ -381,7 +381,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -392,10 +392,7 @@
"\n",
"PET image. Processing...\n",
"Static scan. Processing...\n",
"Ideal Image Orientation (0020,0037). Processing...\n",
"Head First Supine (HFS). Processing...\n",
"PET slice z-coordinate decreases -> Flip in z-coordinates applied\n",
"DICOM converted to NIfTI: C:\\Users\\vallezgard\\NIfTI\\data\\output\\s1_2400s_Phantom_EARL1.nii\n"
"DICOM converted to NIfTI: D:\\MyFiles\\Documents\\Work\\DICOM_files\\Test\\s1_300s_Phantom.nii\n"
]
}
],
@ -423,6 +420,8 @@
"\n",
"(0028,1053) Slope\n",
"\n",
"(0054,0081) Number of slices per frame\n",
"\n",
"(0054,1000) Series Type of the scan\n",
"\n",
"(0054,0414) Patient Gantry Relationship Code Sequence, i.e. orientation of the patient within the gantry.(Empty if Unknown)\n",
@ -436,10 +435,11 @@
"!cls\n",
"## Libraries\n",
"import os\n",
"import numpy as np\n",
"import nibabel as nib\n",
"import sys\n",
"import pydicom\n",
"import numpy as np\n",
"import tkinter as tk\n",
"import nibabel as nib\n",
"from tkinter import filedialog\n",
"\n",
"## Input data\n",
@ -452,104 +452,107 @@
"input_dicom_dir = os.path.abspath(filedialog.askdirectory(title=\"Please select the input DICOM folder\"))\n",
"\n",
"root.deiconify() # Makes the window visible again\n",
"root.withdraw()\n",
"output_dir = os.path.abspath(filedialog.askdirectory(title=\"Please select a folder to save output NIfTI file(s)\"))\n",
"root.destroy()\n",
"root.destroy() # Destroys the root window along with all other tkinter widgets\n",
"\n",
"study_name = 'Phantom_EARL1'\n",
"study_name = os.path.basename(input_dicom_dir) # The study name is the same as input DICOM directory name\n",
"\n",
"## Predefined variables\n",
"ideal_Image_Orientation = ['1', '0', '0', '0', '1', '0']\n",
"\n",
"## Functions\n",
"def read_dicom_files(input_dicom_dir):\n",
" \n",
" # Read the DICOM dataset from the input directory. \n",
" # If the DICOM dataset is not stored in accordance with the DICOM Standard File Format, SET force to True.\n",
" # Do not read files with .db or .xml suffixes (to avoid Thumbs.db and VinciDC0.xml files). \n",
" # ADD more such files to avoid later as required!\n",
" \n",
" dicom_files = os.listdir(input_dicom_dir)\n",
" \n",
" try:\n",
" ds_list = [pydicom.dcmread(os.path.join(input_dicom_dir, filename), force = False) \\\n",
" for filename in dicom_files \\\n",
" if filename.endswith(('.IMA','.dcm'))]\n",
" dcm_dataset = [pydicom.dcmread(os.path.join(input_dicom_dir, filename), force = False) \\\n",
" for filename in os.listdir(input_dicom_dir) \\\n",
" if not filename.endswith(('.db','.xml'))]\n",
" except:\n",
" sys.exit(\"Not a valid DICOM files were found. Only .IMA and .dcm is implemented.\")\n",
" sys.exit(\"Invalid DICOM file(s) was found.\")\n",
" \n",
" return ds_list\n",
" return dcm_dataset\n",
"\n",
"def sort_dicom_files(ds_list): \n",
" sorted_ds_list = ds_list.copy()\n",
" sorted_ds_list.sort(key = lambda x: int(x.ImageIndex))\n",
"def get_z_coordinates(dcm_dataset):\n",
" \n",
" return sorted_ds_list\n",
" # Get the z_coordinates based on the following Required tags. \n",
"\n",
"def get_img_volume(sorted_ds_list):\n",
" # Get the voxel intensity array of each slice and stack it in the z-direction\n",
" # Info:\n",
" # direction cosines: https://en.wikipedia.org/wiki/Direction_cosine\n",
" # http://mathworld.wolfram.com/DirectionCosine.html\n",
" # cross product: https://en.wikipedia.org/wiki/Cross_product\n",
" # http://mathworld.wolfram.com/CrossProduct.html\n",
" # dot product: https://en.wikipedia.org/wiki/Dot_product\n",
" # http://mathworld.wolfram.com/DotProduct.html\n",
" \n",
" # Transpose the array to change the array order from row-major to column-major\n",
" # ToDo : Preallocate variable\n",
" img_volume_data = np.transpose(sorted_ds_list[0].pixel_array)\n",
" nr_of_slices = dcm_dataset[0].NumberOfSlices\n",
" \n",
" for ds in sorted_ds_list[1:]:\n",
" img_slice_data = ds.pixel_array\n",
" img_slice_data_transposed = img_slice_data.T\n",
" img_volume_data = np.dstack((img_volume_data, img_slice_data_transposed))\n",
" # x and y direction cosines\n",
" x_dir_cos = np.array(list(map(float,dcm_dataset[0].ImageOrientationPatient[:3])))\n",
" y_dir_cos = np.array(list(map(float,dcm_dataset[0].ImageOrientationPatient[3:])))\n",
" \n",
" Image_Orientation = sorted_ds_list[0].ImageOrientationPatient # (0020,0037)\n",
" # For vectors a and b, a × b vector is perpendicular to both a and b and thus normal to the plane containing them\n",
" z_dir_cos = np.cross(x_dir_cos, y_dir_cos)\n",
" \n",
" Patient_Position = sorted_ds_list[0].PatientPosition # (0018,5100)\n",
" image_position_list = [dcm_dataset[i].ImagePositionPatient._list \\\n",
" for i in range(0, nr_of_slices)]\n",
" \n",
" patient_orientation_code = sorted_ds_list[0].PatientGantryRelationshipCodeSequence._list[0].CodeValue # (0054,0414)\n",
" patient_orientation = sorted_ds_list[0].PatientGantryRelationshipCodeSequence._list[0].CodeMeaning \n",
" \n",
" \n",
" Image_Position_0 = sorted_ds_list[0].ImagePositionPatient # (0020,0032)\n",
" Image_Position_1 = sorted_ds_list[1].ImagePositionPatient # (0020,0032)\n",
" \n",
" # Check for the Patient Position: HFS or FFS, and reorder the slices in z-direction accordingly\n",
" if str(Image_Orientation) == str(ideal_Image_Orientation):\n",
" \n",
" print(\"Ideal Image Orientation (0020,0037). Processing...\")\n",
" \n",
" if Patient_Position == \"HFS\" and patient_orientation_code == 'F-10470' and patient_orientation == 'headfirst':\n",
" \n",
" print(\"Head First Supine (HFS). Processing...\")\n",
" \n",
" if int(Image_Position_0[2]) > int(Image_Position_1[2]):\n",
" \n",
" print(\"PET slice z-coordinate decreases -> Flip in z-coordinates applied\")\n",
" \n",
" final_ds_list = sorted_ds_list.copy() \n",
" final_ds_list.reverse()\n",
" \n",
" img_volume_data_final = np.transpose(final_ds_list[0].pixel_array)\n",
" \n",
" for ds in final_ds_list[1:]:\n",
" img_slice_data_final = ds.pixel_array\n",
" img_slice_data_final_transposed = img_slice_data_final.T\n",
" img_volume_data_final = np.dstack((img_volume_data_final, img_slice_data_final_transposed))\n",
" \n",
" elif int(Image_Position_0[2]) < int(Image_Position_1[2]):\n",
" \n",
" print(\"PET slice z-coordinate increases. Please, check DICOM information.\")\n",
" \n",
" elif Patient_Position == \"FFS\" and patient_orientation_code == 'F-10480' and patient_orientation == 'feet-first': \n",
" print(\"Feet First Supine (FFS). Processing...\")\n",
" \n",
" if int(Image_Position_0[2]) < int(Image_Position_1[2]): \n",
" print(\"PET slice z-coordinate increases. Processing...\") \n",
" img_volume_data_final = img_volume_data\n",
" \n",
" elif int(Image_Position_0[2]) > int(Image_Position_1[2]): \n",
" print(\"PET slice z-coordinate decreases. Please, check DICOM information.\") \n",
" \n",
" else:\n",
" sys.exit(\"Sorry, this orientation was not implemented yet.\")\n",
" \n",
" else:\n",
" sys.exit(\"Please, check Image Orientation tag (0020,0037) and Patient Gantry Relationship tag (0054,0414)\")\n",
" \n",
" return img_volume_data_final\n",
" # Converting the x,y,z coordinates from str to float\n",
" image_position_patient = [[float(coordinate) \\\n",
" for coordinate in image_position] \\\n",
" for image_position in image_position_list]\n",
"\n",
"''''' No need to anonymize now the DICOM\n",
"def get_header_data(dcm_set, output_filename): \n",
" # Vector dot product or scalar inner product to get z coordinates\n",
" z_coordinates = np.dot(image_position_patient, z_dir_cos)\n",
" \n",
" return z_coordinates, nr_of_slices\n",
"\n",
"def sort_dicom_files(dcm_dataset):\n",
" \n",
" # Sort the DICOM dataset based on the z_coordinates.\n",
"\n",
" z_coordinates, nr_of_slices = get_z_coordinates(dcm_dataset)\n",
" sorted_dcm_dataset = [x for _,x in sorted(zip(z_coordinates, dcm_dataset))]\n",
" \n",
" return sorted_dcm_dataset, nr_of_slices\n",
"\n",
"def get_voxel_size_z(dcm_dataset):\n",
" \n",
" # The absolute difference between two consecutive z_coordinates gives the voxel size along the z-direction, in mm. \n",
" \n",
" z_coordinates, _ = get_z_coordinates(dcm_dataset)\n",
" voxel_size_z = abs(z_coordinates[0] - z_coordinates[1])\n",
" \n",
" return voxel_size_z\n",
"\n",
"def get_img_volume(dcm_dataset): \n",
" \n",
" # 1. Get the voxel intensity array of each slice\n",
" # 2. Transpose the array to account for the change in array order from row-major to column-major\n",
" # 3. Stack it in the z-direction\n",
" \n",
" img_volume_array = np.transpose(dcm_dataset[0].pixel_array)\n",
" \n",
" for dcm_slice in dcm_dataset[1:]:\n",
" img_slice_array = np.transpose(dcm_slice.pixel_array)\n",
" img_volume_array = np.dstack((img_volume_array, img_slice_array))\n",
" \n",
" return img_volume_array\n",
"\n",
"def get_header_data(dcm_set, output_filename):\n",
"\n",
" # Dump the DICOM header into a text file with the same filename as NIFTI \n",
" # Set anonymize_flag = \"Y\" to not add patient data tags in the text file\n",
" \n",
" anonymize_flag = \"Y\"\n",
" \n",
" patient_tags = ['PatientID', 'PatientName', 'PatientBirthDate']\n",
" \n",
" output_txt_filename = output_filename + \".txt\"\n",
@ -559,107 +562,96 @@
" if anonymize_flag == \"Y\":\n",
" if header_tag not in [dcm_set[0].data_element(tag) for tag in patient_tags]:\n",
" file.write(str(header_tag) + '\\n')\n",
" file.close() \n",
"'''''\n",
" file.close() \n",
"\n",
"def convert_to_nifti(dcm_set, output_file, frame_number, scan_duration_in_sec):\n",
"def convert_to_nifti(dcm_dataset, output_nifti_file):\n",
" \n",
" img_volume_data_final = get_img_volume(dcm_set)\n",
" img_volume_array_in_LPS = get_img_volume(dcm_dataset)\n",
" \n",
" # From LPS in DICOM to RAS in NIfTI\n",
" img_data = np.fliplr(img_volume_data_final)\n",
" img_data_volume = np.flipud(img_data)\n",
" #img_data_volume = np.rot90(img_volume_data_final, 2) # Alternative: 180 rotation instead of two flips\n",
" img_volume_array_in_RAS = np.flipud(np.fliplr(img_volume_array_in_LPS))\n",
" #img_volume_array_in_RAS = np.rot90(img_volume_array_in_LPS, 2) # Alternative: 180 rotation instead of two flips\n",
" \n",
" Pixel_Spacing = dcm_set[0].PixelSpacing\n",
" pixel_spacing = dcm_dataset[0].PixelSpacing\n",
" \n",
" Slice_Thickness = dcm_set[0].SliceThickness\n",
" voxel_size_z = get_voxel_size_z(dcm_dataset)\n",
" \n",
" voxel_size = np.array([float(Pixel_Spacing[0]), float(Pixel_Spacing[1]), float(Slice_Thickness)])\n",
" voxel_size = np.array([float(pixel_spacing[0]), float(pixel_spacing[1]), voxel_size_z])\n",
" \n",
" slope = dcm_set[0].RescaleSlope # (0028,1053) \n",
" intercept = dcm_set[0].RescaleIntercept # (0028,1052)\n",
" slope = dcm_dataset[0].RescaleSlope # (0028,1053) \n",
" intercept = dcm_dataset[0].RescaleIntercept # (0028,1052)\n",
" \n",
" # Change the datatype from int to float and apply slope and intercept to the array\n",
" img_data_volume = img_data_volume.astype(float)\n",
" img_data_volume_final = (img_data_volume * slope) + intercept\n",
" img_volume_array = (img_volume_array_in_RAS.astype(float) * slope) + intercept\n",
" \n",
" # Origin of coordinates: centre of the image\n",
" center = (voxel_size * img_data_volume.shape) / 2\n",
" center = (voxel_size * img_volume_array.shape) / 2\n",
" \n",
" # Affine matrix\n",
" apply_affine = np.diag([voxel_size[0], voxel_size[1], voxel_size[2], 1])\n",
" apply_affine[:3,3] = np.array([-center[0], -center[1], -center[2]])\n",
" \n",
" nii_out = nib.Nifti1Image(img_data_volume_final, apply_affine)\n",
" nii_out = nib.Nifti1Image(img_volume_array, apply_affine)\n",
" \n",
" # Adjust NIfTI header\n",
" nii_out.header['qform_code'] = 1\n",
" nii_out.header['sform_code'] = 2\n",
"\n",
" #nii_out.set_data_dtype(np.float32)\n",
" #xyz_unit = 'mm'\n",
" #nii_out.header.set_xyzt_units(xyz=xyz_unit)\n",
" #nii_out.header.set_data_offset(352)\n",
" #nii_out.header['extents'] = 16384 # Remove?\n",
" #nii_out.header['regular'] = 'r' # Remove?\n",
" #nii_out.header['intent_name'] = 0 # Remove?\n",
" #nii_out.header['cal_max'] = np.max(img_data_volume_final) # Check if present in header if not specified\n",
" #nii_out.header['cal_min'] = np.min(img_data_volume_final) # Check if present in header if not specified \n",
" nii_out.set_data_dtype(np.float32) # Float32: slope=1 and intercept=0. Software does not have to apply manually. \n",
" nii_out.header.set_xyzt_units(xyz='mm') # Set the voxel size unit manually\n",
" nii_out.header['intent_code'] = 0 # None\n",
" nii_out.header['intent_name'] = 'PET'\n",
" nii_out.header['cal_max'] = np.max(img_volume_array) # For software to apply proper color scaling for visualization\n",
" nii_out.header['cal_min'] = np.min(img_volume_array) \n",
" \n",
" # Save the NIfTI file\n",
" nib.save(nii_out, output_file) \n",
" nib.save(nii_out, output_nifti_file) \n",
" \n",
" print(\"DICOM converted to NIfTI: \",output_file)\n",
" print(\"DICOM converted to NIfTI: \",output_nifti_file)\n",
"\n",
"## Read DICOM\n",
"ds_list = read_dicom_files(input_dicom_dir) # Read the DICOM files from the directory \n",
"sorted_ds_list = sort_dicom_files(ds_list) # Sort the DICOM files based on Image Index (0054,1330)\n",
"dcm_dataset = read_dicom_files(input_dicom_dir) # Read the DICOM files from the directory \n",
"sorted_dcm_dataset, nr_of_slices = sort_dicom_files(dcm_dataset) # Sort the DICOM files based on z-coordinates\n",
"\n",
"# Check DICOM modality\n",
"if sorted_ds_list[0].Modality == 'PT' and sorted_ds_list[0].SOPClassUID == '1.2.840.10008.5.1.4.1.1.128': \n",
"if sorted_dcm_dataset[0].Modality == 'PT' and sorted_dcm_dataset[0].SOPClassUID == '1.2.840.10008.5.1.4.1.1.128': \n",
" print(\"PET image. Processing...\")\n",
" \n",
" # Number of slices per frame\n",
" nr_of_slices = sorted_ds_list[0].NumberOfSlices\n",
" \n",
" frame_number = 1\n",
" \n",
" nr_of_dcm_files = len(sorted_ds_list)\n",
" \n",
" scan_series_type = sorted_ds_list[0].SeriesType[0] # (0054,1000) \n",
" scan_series_type = sorted_dcm_dataset[0].SeriesType[0] # (0054,1000) \n",
" \n",
" if scan_series_type == 'STATIC' or scan_series_type == 'WHOLE BODY': \n",
" print(\"Static scan. Processing...\") \n",
" scan_duration_in_msec = sorted_ds_list[0].ActualFrameDuration \n",
" scan_duration_in_sec = int(scan_duration_in_msec / 1000) \n",
" output_file = \"s\" + str(frame_number) + \"_\" + str(scan_duration_in_sec) + \"s_\" + study_name + \".nii\"\n",
" output_file = os.path.join(output_dir, output_file)\n",
" convert_to_nifti(sorted_ds_list, output_file, frame_number, scan_duration_in_msec) \n",
" scan_duration_in_sec = int(sorted_dcm_dataset[0].ActualFrameDuration / 1000) \n",
" output_filename = \"s\" + str(frame_number) + \"_\" + str(scan_duration_in_sec) + \"s_\" + study_name \n",
" output_nifti_filename = output_filename + \".nii\"\n",
" output_nifti_file = os.path.join(output_dir, output_nifti_filename)\n",
" convert_to_nifti(sorted_dcm_dataset, output_nifti_file) \n",
" get_header_data(sorted_dcm_dataset, output_filename) # Dump the DICOM header into text file\n",
" \n",
" elif scan_series_type == 'DYNAMIC': \n",
" print(\"Dynamic scan. Processing...\") \n",
" nr_of_time_frames = sorted_ds_list[0].NumberOfTimeSlices \n",
" \n",
" # split files in multiple of nr_of_slices. Each frame has \"nr_of_slices\" slices \n",
" dcm_set_split_by_frames = [sorted_ds_list[x:x+nr_of_slices] for x in range(0, len(sorted_ds_list), nr_of_slices)]\n",
"\n",
" # Split DICOM dataset in multiple of number of slices per frame. \n",
" dcm_dataset_split_by_frames = [sorted_dcm_dataset[x:x+nr_of_slices] \\\n",
" for x in range(0, len(sorted_dcm_dataset), nr_of_slices)]\n",
" \n",
" for dcm_set in dcm_set_split_by_frames: \n",
" scan_duration_in_msec = dcm_set[0].ActualFrameDuration \n",
" scan_duration_in_sec = int(scan_duration_in_msec / 1000) \n",
" output_file = \"s\" + str(frame_number) + \"_\" + str(scan_duration_in_sec) + \"s_\" + study_name + \".nii\"\n",
" output_file = os.path.join(output_dir, output_file)\n",
" convert_to_nifti(dcm_set, output_file, frame_number, scan_duration_in_sec) \n",
" for dcm_dataset in dcm_dataset_split_by_frames:\n",
" scan_duration_in_sec = int(dcm_dataset[0].ActualFrameDuration / 1000)\n",
" output_nifti_filename = output_filename + \".nii\"\n",
" output_nifti_file = os.path.join(output_dir, output_nifti_filename)\n",
" convert_to_nifti(sorted_dcm_dataset, output_nifti_file) \n",
" get_header_data(sorted_dcm_dataset, output_filename) # Dump the DICOM header into text file\n",
" frame_number += 1 \n",
" \n",
" elif scan_series_type == 'GATED':\n",
" sys.exit(\"GATED scans are not supported yet\")\n",
" \n",
" else:\n",
" sys.exit(\"Please, check Scan Series Type (0054,1000)\")\n",
" sys.exit(\"Please check Scan Series Type (0054,1000)\")\n",
" \n",
"else: \n",
" sys.exit(\"Please, check Image Modality\")\n"
" sys.exit(\"Please check Image Modality\")\n"
]
},
{
@ -701,7 +693,7 @@
"shape_Clinical = (111,400,400)\n",
"\n",
"# Reconstruction protocol used\n",
"recon_used = \"Clinical\" # Options: Clinical, EARL1 and EALR2\n",
"recon_used = \"Clinical\" # Options: Clinical, EARL1 and EARL2\n",
"\n",
"## Example dataset\n",
"cwd = os.getcwd()\n",
@ -729,21 +721,22 @@
"elif recon_used == 'EARL1' or recon_used == 'EARL2':\n",
" reshaped_voi_data = np.reshape(voi_data, shape_EARL)\n",
" voxel_size = voxel_size_EARL\n",
" \n",
"else: \n",
" sys.exit('This reconstruction was not implemented yet')\n",
"\n",
"# Transpose the array to change the array order from row-major to column-major \n",
"voi_swapped_axes = reshaped_voi_data.T # Also: np.swapaxes(reshaped_voi_data, 2, 0) #\n",
"\n",
"# RAS in NIfTI: From Posterior to Anterior\n",
"flipped_voi = np.flipud(voi_swapped_axes)\n",
"\n",
"# Binary Mask of 0's and 1's\n",
"voi_swapped_axes[voi_swapped_axes <= 50] = 0\n",
"voi_swapped_axes[voi_swapped_axes > 50] = 1\n",
"\n",
"# RAS in NIfTI: From Posterior to Anterior\n",
"flipped_voi = np.flipud(voi_swapped_axes)\n",
"\n",
"# Shape of image\n",
"matrix_size = np.array(voi_swapped_axes.shape)\n",
"matrix_size = np.array(flipped_voi.shape)\n",
"\n",
"# Centre of image\n",
"center = (voxel_size * matrix_size) / 2\n",
@ -755,21 +748,19 @@
"voi_nifti_out = nib.Nifti1Image(flipped_voi, apply_affine)\n",
"\n",
"# Set header\n",
"voi_nifti_out.set_data_dtype(np.uint8)\n",
"voi_nifti_out.set_data_dtype(np.int8)\n",
"voi_nifti_out.header['qform_code'] = 1\n",
"voi_nifti_out.header['sform_code'] = 2\n",
"#xyz_unit = 'mm'\n",
"#voi_nifti_out.header.set_xyzt_units(xyz=xyz_unit)\n",
"#voi_nifti_out.header.set_data_offset(352)\n",
"#voi_nifti_out.header['extents'] = 16384\n",
"#voi_nifti_out.header['regular'] = 'r'\n",
"#voi_nifti_out.header['intent_name'] = 0\n",
"#voi_nifti_out.header['cal_max'] = np.max(voi_swapped_axes)\n",
"#voi_nifti_out.header['cal_min'] = np.min(voi_swapped_axes)\n",
"voi_nifti_out.header.set_xyzt_units(xyz='mm') # Set the voxel size unit manually\n",
"voi_nifti_out.header.set_data_offset(352)\n",
"voi_nifti_out.header['intent_code'] = 0 # None\n",
"voi_nifti_out.header['intent_name'] = 'PET_VOI'\n",
"voi_nifti_out.header['cal_max'] = np.max(flipped_voi) # For software to apply proper color scaling for visualization\n",
"voi_nifti_out.header['cal_min'] = np.min(flipped_voi) \n",
"\n",
"# Save the NIfTI file\n",
"nib.save( voi_nifti_out, os.path.join(output_dir, voi_nifti_out_filename) )\n",
"print('Congratulations! ACCURATE VOI was saved as NIfTI file')\n",
"print('ACCURATE VOI was successfully saved as NIfTI file')\n",
"print(os.path.join(output_dir, voi_nifti_out_filename))\n"
]
},
@ -1096,7 +1087,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.3"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More