Commit e2920aba authored by sjromuel's avatar sjromuel
Browse files

d

parent 33d5f069
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import tkinter as tk
import os
from tkinter import filedialog
from skimage import transform
import SimpleITK as sitk
import argparse
#import os
#import pydot
#from graphviz import Digraph
#import shutil
#from tensorflow.keras import layers, models
#from utils.dataLoader import *
from utils.other_functions import *
from nets.Unet import dice_loss
def main():
file_path = "data/npy/"
dice_Score16 = []
for patientnumber in range(16):
patient_dsc = []
seg_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_seg.gipl.npy")
noisygt_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_ctfgt2.gipl.npy")
seg_npy = np.reshape(seg_npy, (seg_npy.shape[0], 512, 512, 1))
noisygt_npy = np.reshape(noisygt_npy, (noisygt_npy.shape[0], 512, 512, 1))
test_dataset = tf.data.Dataset.from_tensor_slices((noisygt_npy, seg_npy))
test_dataset = test_dataset.batch(batch_size=1)
for features in test_dataset:
noisygt, seg = features
noisygt = onehotencode(noisygt, autoencoder=True)
seg = onehotencode(seg, autoencoder=True)
slice_dsc = dice_loss(noisygt, seg)
patient_dsc.append(slice_dsc)
dice_Score16.append(np.mean(patient_dsc))
print(dice_Score16)
'''
##################### U-Net #####################
if 'Unet' in file_path or 'Cluster' in file_path or 'class' in file_path:
from nets.Unet import run_test_patient, show_test_patient_pred, dice_loss, Unet
print("UNet_model selected")
### read out files ###
weights = np.load(file_path+"model.npy", allow_pickle=True)
[test_patients,
val_patients,
number_patients,
img_path,
shrink_data,
newSize,
lr,
batch_size,
num_epochs,
e,
augment,
save_path,
gt_type,
filter_multiplier] = np.load(file_path+"params.npy", allow_pickle=True)
# autoencoder_model__e100_switchclass1024_nohiddenclusternet needs to comment out gt_type and val_patients
print('Training Parameters:')
print('-----------------')
print('Number of Patients: ', number_patients)
print('Number of epochs: ', num_epochs)
print('Test Patient number: ', test_patients)
print('Image Size: ', newSize)
print('Filter Multiplier: ', filter_multiplier)
print('Data Augmentation: ', augment)
print('Learning rate: ', lr)
print('Image Path: ', img_path)
print('Save Path: ', save_path)
print('GT Type:', gt_type)
### Load test patient
if gt_type == "thresh":
img_path = "../data/npy_thresh/"
else:
img_path = "../data/npy/"
full_list = os.listdir(img_path)
seg_list = os.listdir("../data/npy/")
X_img_list = []
GT_img_list = []
ytrue_img_list = []
# thresh_img_list = []
for elem in full_list:
if elem.endswith("ct.gipl.npy") and (elem.startswith('P' + str(test_patients[0]).zfill(2)) or elem.startswith('P' + str(test_patients[1]).zfill(2))):
X_img_list.append(elem)
if gt_type == "thresh":
X_img_list.append(elem)
X_img_list.append(elem)
elif elem.endswith(gt_type+".gipl.npy") and (elem.startswith('P' + str(test_patients[0]).zfill(2)) or elem.startswith('P' + str(test_patients[1]).zfill(2))):
GT_img_list.append(elem)
for elem in seg_list:
if elem.endswith("seg.gipl.npy") and (elem.startswith('P' + str(test_patients[0]).zfill(2)) or elem.startswith('P' + str(test_patients[1]).zfill(2))):
ytrue_img_list.append(elem)
if gt_type == "thresh":
ytrue_img_list.append(elem)
ytrue_img_list.append(elem)
list.sort(X_img_list)
list.sort(GT_img_list)
list.sort(ytrue_img_list)
print(X_img_list)
print(GT_img_list)
print(ytrue_img_list)
for j in range(len(X_img_list)):
X_img_npys = np.load(img_path + X_img_list[j])
GT_img_npys = np.load(img_path + GT_img_list[j])
ytrue_img_npys = np.load(img_path + ytrue_img_list[j])
X_img_npys = transform.resize(X_img_npys, (X_img_npys.shape[0], newSize[0], newSize[1]), order=0,
preserve_range=True, mode='constant', anti_aliasing=False,
anti_aliasing_sigma=None)
GT_img_npys = transform.resize(GT_img_npys, (GT_img_npys.shape[0], newSize[0], newSize[1]), order=0,
preserve_range=True, mode='constant', anti_aliasing=False,
anti_aliasing_sigma=None)
ytrue_img_npys = transform.resize(ytrue_img_npys, (ytrue_img_npys.shape[0], newSize[0], newSize[1]),
order=0,
preserve_range=True, mode='constant', anti_aliasing=False,
anti_aliasing_sigma=None)
X_test = np.reshape(X_img_npys, (X_img_npys.shape[0], X_img_npys.shape[1], X_img_npys.shape[2], 1))
GT_test = np.reshape(GT_img_npys, (GT_img_npys.shape[0], GT_img_npys.shape[1], GT_img_npys.shape[2], 1))
ytrue = np.reshape(ytrue_img_npys,
(ytrue_img_npys.shape[0], ytrue_img_npys.shape[1], ytrue_img_npys.shape[2], 1))
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, ytrue))
test_dataset = test_dataset.batch(batch_size=1)
#test_patient_pred = run_test_patient(test_dataset, weights, filter_multiplier)
test_loss = []
test_loss_hdd = []
counter=0
for features in test_dataset:
image, gt = features
y_true = onehotencode(gt)
y_pred = Unet(image, weights, filter_multiplier, training=False)
# print(tf.shape(y_pred), tf.shape(y_true))
#y_true = onehotencode(tf.reshape(y_true, (1, 512, 512, 1)), autoencoder=True)
#y_pred = tf.reshape(y_pred, (1, 512, 512, 2))
fig = plt.figure()
fig.add_subplot(1, 3, 1)
plt.title("Prediction")
plt.imshow(y_pred[0, :, :, 0], cmap=plt.cm.bone)
fig.add_subplot(1, 3, 2)
plt.title("True Segmentation")
plt.imshow(y_true[0, :, :, 0], cmap=plt.cm.bone)
fig.add_subplot(1, 3, 3)
plt.title("Train-Segmentation (Fake GT)")
plt.imshow(GT_test[counter, :, :, 0], cmap=plt.cm.bone)
counter = counter+1
loss = dice_loss(y_pred, y_true)
loss = tf.make_ndarray(tf.make_tensor_proto(loss))
test_loss.append(loss)
#print(test_loss)
try:
gt_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0)
gt_np = gt_np.astype(np.float_)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0)
pred_np = pred_np.astype(np.float_)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(sitk.GetImageFromArray(gt_np), sitk.GetImageFromArray(pred_np))
test_loss_hdd.append(hausdorff_distance_filter.GetHausdorffDistance())
except:
pass
#plt.show()
#print(test_loss)
print("TestLoss Mean for P", test_patients[j], ": ", np.mean(test_loss))
#print(test_loss_hdd)
print("Hausdorff-Distance for P", test_patients[j],":", np.mean(test_loss_hdd))
#####
X_img_npys = np.load(img_path + X_img_list[0])
GT_img_npys = np.load(img_path + GT_img_list[0])
ytrue_img_npys = np.load("../data/npy/" + ytrue_img_list[0])
for i in range(len(X_img_list)-1):
X_img_npys = np.append(X_img_npys, np.load(img_path + X_img_list[i+1]), axis=0)
GT_img_npys = np.append(GT_img_npys, np.load(img_path + GT_img_list[i+1]), axis=0)
ytrue_img_npys = np.append(ytrue_img_npys, np.load("../data/npy/" + ytrue_img_list[i+1]), axis=0)
X_img_npys = transform.resize(X_img_npys, (X_img_npys.shape[0], newSize[0], newSize[1]), order=0,
preserve_range=True, mode='constant', anti_aliasing=False, anti_aliasing_sigma=None)
GT_img_npys = transform.resize(GT_img_npys, (GT_img_npys.shape[0], newSize[0], newSize[1]), order=0,
preserve_range=True, mode='constant', anti_aliasing=False, anti_aliasing_sigma=None)
ytrue_img_npys = transform.resize(ytrue_img_npys, (ytrue_img_npys.shape[0], newSize[0], newSize[1]), order=0,
preserve_range=True, mode='constant', anti_aliasing=False, anti_aliasing_sigma=None)
X_test = np.reshape(X_img_npys,(X_img_npys.shape[0], X_img_npys.shape[1], X_img_npys.shape[2], 1))
GT_test = np.reshape(GT_img_npys, (GT_img_npys.shape[0], GT_img_npys.shape[1], GT_img_npys.shape[2], 1))
ytrue = np.reshape(ytrue_img_npys, (ytrue_img_npys.shape[0], ytrue_img_npys.shape[1], ytrue_img_npys.shape[2], 1))
### recreate test-dataset ###
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, GT_test))
test_dataset = test_dataset.batch(batch_size)
print("x_test shape: " + str(X_test.shape))
### test model and show plots ###
test_patient_pred = run_test_patient(test_dataset, weights, filter_multiplier)
print("test_patient_pred shape: " + str(test_patient_pred.shape))
# show_test_patient_pred(test_patient_pred, test_dataset)
print(GT_test.shape)
### compute loss for each slice ###
test_loss = []
test_loss_hdd = []
for i in range(test_patient_pred.shape[0]):
y_pred = tf.convert_to_tensor(test_patient_pred[i])
y_true = tf.convert_to_tensor(ytrue[i])
#print(tf.shape(y_pred), tf.shape(y_true))
y_true = onehotencode(tf.reshape(y_true, (1, 512, 512, 1)), autoencoder=True)
y_pred = tf.reshape(y_pred, (1, 512, 512, 2))
loss = dice_loss(y_pred, y_true)
loss = tf.make_ndarray(tf.make_tensor_proto(loss))
test_loss.append(loss)
gt_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0)
gt_np = gt_np.astype(np.float_)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0)
pred_np = pred_np.astype(np.float_)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(sitk.GetImageFromArray(gt_np), sitk.GetImageFromArray(pred_np))
test_loss_hdd.append(hausdorff_distance_filter.GetHausdorffDistance())
print(test_loss)
print("TestLoss Mean: ", np.mean(test_loss))
print(test_loss_hdd)
print("Hausdorff-Distance: ", np.mean(test_loss_hdd))
#show_loss_plot(len(test_loss), test_loss)
print("test_patient_pred shape: " + str(test_patient_pred.shape))
#show_test_patient_pred(test_patient_pred, test_dataset)
'''
def show_loss_plot(slice, loss_list):
x_axis = list(range(1, slice+1, 1))
plt.plot(x_axis, loss_list)
plt.xlabel('Slice Number')
plt.ylabel('Loss')
plt.show()
if __name__ == "__main__":
main()
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment