Commit 517703b8 authored by sjromuel's avatar sjromuel
Browse files

d

parent d08bcdab
......@@ -20,31 +20,48 @@ from nets.Unet import dice_loss
def main():
file_path = "data/npy/"
dice_Score16 = []
for patientnumber in range(16):
patient_dsc = []
seg_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_seg.gipl.npy")
noisygt_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_ctfgt2.gipl.npy")
gt_types = ['ctfgt2', 'thresh']
seg_npy = np.reshape(seg_npy, (seg_npy.shape[0], 512, 512, 1))
noisygt_npy = np.reshape(noisygt_npy, (noisygt_npy.shape[0], 512, 512, 1))
for gt_type in gt_types:
dice_Score16 = []
for patientnumber in range(16):
patient_dsc = []
test_dataset = tf.data.Dataset.from_tensor_slices((noisygt_npy, seg_npy))
test_dataset = test_dataset.batch(batch_size=1)
if gt_type == 'ctfgt2':
seg_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_seg.gipl.npy")
noisygt_npy = np.load(file_path+"P"+str(patientnumber+1).zfill(2)+"_ctfgt2.gipl.npy")
seg_npy = np.reshape(seg_npy, (seg_npy.shape[0], 512, 512, 1))
noisygt_npy = np.reshape(noisygt_npy, (noisygt_npy.shape[0], 512, 512, 1))
test_dataset = tf.data.Dataset.from_tensor_slices((noisygt_npy, seg_npy))
test_dataset = test_dataset.batch(batch_size=1)
elif gt_type == 'thresh':
file_path = "data/npy_thresh/"
seg_npy = np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_seg.gipl.npy")
noisygt_npy = np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_100_thresh.gipl.npy")
seg_npy = np.append(seg_npy, np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_seg.gipl.npy"), axis=0)
noisygt_npy = np.append(noisygt_npy, np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_250_thresh.gipl.npy"), axis=0)
seg_npy = np.append(seg_npy, np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_seg.gipl.npy"), axis=0)
noisygt_npy = np.append(noisygt_npy, np.load(file_path + "P" + str(patientnumber + 1).zfill(2) + "_400_thresh.gipl.npy"), axis=0)
seg_npy = np.reshape(seg_npy, (seg_npy.shape[0], 512, 512, 1))
noisygt_npy = np.reshape(noisygt_npy, (noisygt_npy.shape[0], 512, 512, 1))
for features in test_dataset:
noisygt, seg = features
noisygt = onehotencode(noisygt, autoencoder=True)
seg = onehotencode(seg, autoencoder=True)
test_dataset = tf.data.Dataset.from_tensor_slices((noisygt_npy, seg_npy))
test_dataset = test_dataset.batch(batch_size=1)
slice_dsc = dice_loss(noisygt, seg)
patient_dsc.append(slice_dsc)
dice_Score16.append(np.mean(patient_dsc))
print(dice_Score16)
for features in test_dataset:
noisygt, seg = features
noisygt = onehotencode(noisygt, autoencoder=True)
seg = onehotencode(seg, autoencoder=True)
slice_dsc = dice_loss(noisygt, seg)
patient_dsc.append(slice_dsc)
dice_Score16.append(np.mean(patient_dsc))
print(gt_type, dice_Score16)
......
......@@ -125,7 +125,7 @@ def train_unet(model, inputs, gt, weights, optimizer, filter_multiplier):
def init_weights(manual_seed=26):
### initializing weights ###
#initializer = tf.initializers.glorot_uniform(seed=manual_seed)
initializer = tf.keras.initializers.TruncatedNormal()
initializer = tf.keras.initializers.TruncatedNormal(seed=26)
shapes = [ # filter_height, filter_width, in_channels, out_channels
# for conv2d_transpose: filter_height, filter_width, out_channels, in_channels
[3, 3, 1, 16],
......
......@@ -20,7 +20,7 @@ def main():
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir="finalResults/")
file_path = filedialog.askopenfilename(initialdir="finalResults/complete_thresh/")
file_path = file_path[:-9]
print(file_path)
......@@ -146,9 +146,9 @@ def main():
#print(test_loss)
try:
gt_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0)
gt_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0.5)
gt_np = gt_np.astype(np.float_)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0.5)
pred_np = pred_np.astype(np.float_)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(sitk.GetImageFromArray(gt_np), sitk.GetImageFromArray(pred_np))
......
......@@ -21,7 +21,7 @@ def main():
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir="finalResults/complete_thresh")
file_path = filedialog.askopenfilename(initialdir="finalResults/complete_thresh/")
file_path = file_path[:-9]
print(file_path)
......@@ -164,7 +164,7 @@ def main():
###################################################################################
detailed_images = True
detailed_images = False
npys3d = False
###################################################################################
......@@ -226,9 +226,9 @@ def main():
test_loss.append(loss)
#print(test_loss)
try:
y_true_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0)
y_true_np = np.squeeze(y_true[0, :, :, 0].numpy() > 0.5)
y_true_np = y_true_np.astype(np.float_)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0)
pred_np = np.squeeze(y_pred[0, :, :, 0].numpy() > 0.5)
pred_np = pred_np.astype(np.float_)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(sitk.GetImageFromArray(y_true_np), sitk.GetImageFromArray(pred_np))
......
......@@ -9,8 +9,18 @@ import matplotlib.pyplot as plt
from scipy import ndimage
from datetime import datetime
mr_img = np.load("data/npy/P01_segmr.gipl.npy")
ct_img = np.load("data/npy/P01_seg.gipl.npy")
#p11ctfgt2 = np.load("data/npy/P11_seg.gipl.npy")
print(np.shape(ct_img))
print(np.shape(mr_img))
print(np.min(mr_img))
print(np.max(mr_img))
'''#p11ctfgt2 = np.load("data/npy/P11_seg.gipl.npy")
p11ctfgt2 = np.load("data/npy/P14_ctfgt2.gipl.npy")
print(np.shape(p11ctfgt2))
......@@ -18,7 +28,7 @@ for i in range(np.shape(p11ctfgt2)[0]):
print(i)
plt.imshow(p11ctfgt2[i], cmap=plt.cm.bone)
plt.show()
'''
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(initialdir="data/npy/")
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment