Commit 51a653c8 authored by sjromuel's avatar sjromuel
Browse files

d

parent 5301bd76
......@@ -440,7 +440,7 @@ class BaseNetwork:
self.log.write('Loading Training Data ...'+"\r")
### initializing weights ###
weights = init_weights(manual_seed=np.sum(self.test_patients)*3)
weights = init_weights(manual_seed=np.sum(self.test_patients)*2)
filter_multiplier = int(self.newSize[0] / 128)
......
......@@ -115,17 +115,29 @@ def Unet(x, weights, filter_multiplier, training=True):
# model -> z.B. UNet
def train_unet(model, inputs, gt, weights, optimizer, filter_multiplier):
with tf.GradientTape() as tape:
current_loss = dice_loss(model(inputs, weights, filter_multiplier, training=True), gt, axis=(1, 2, 3))
pred = model(inputs, weights, filter_multiplier, training=True)
'''fig = plt.figure()
fig.add_subplot(2, 3, 1)
plt.imshow(inputs[0, :, :, 0], cmap=plt.cm.bone)
plt.title('Input')
fig.add_subplot(2, 3, 2)
plt.imshow(pred[1,:,:,0], cmap=plt.cm.bone)
plt.title('Pred')
fig.add_subplot(2, 3, 3)
plt.imshow(gt[1,:,:,0], cmap=plt.cm.bone)
plt.title('True')
plt.show()'''
current_loss = dice_loss(pred, gt, axis=(1, 2, 3))
grads = tape.gradient(current_loss, weights)
optimizer.apply_gradients(zip(grads, weights))
return tf.reduce_mean(current_loss), weights
def init_weights(manual_seed=26):
def init_weights(manual_seed):
### initializing weights ###
#initializer = tf.initializers.glorot_uniform(seed=manual_seed)
initializer = tf.keras.initializers.TruncatedNormal(seed=manual_seed)
initializer = tf.initializers.glorot_uniform(seed=manual_seed)
#initializer = tf.keras.initializers.TruncatedNormal(seed=manual_seed)
shapes = [ # filter_height, filter_width, in_channels, out_channels
# for conv2d_transpose: filter_height, filter_width, out_channels, in_channels
[3, 3, 1, 16],
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment