Commit d6b7378d authored by sjjsmuel's avatar sjjsmuel

Grad-Cam on all test-images

parent 4f8883f3
# Data dir
data/training_data/
data/test_data/
data/test_data_mini/
data/training_data_small/
data/training_data_mini/
input/training_data/
input/test_data/
input/test_data_mini/
input/training_data_small/
input/training_data_mini/
#output
out/
......
import pathlib
import PIL
import numpy as np
import cv2
from PIL import Image
from classifier.Resnet152 import Resnet152
import tensorflow as tf
img_name = 'IMG_20190404_101926.jpg'
img_raw_path = 'data/test_data/'
out = 'out/'
#test_image = img_raw_path + img_name
model_file = 'out/checkpoints/2020.03.25.10.26.51/model.0002-0.319.hdf5'
resnet_file = 'data/resnet152v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
n_classes = 2
img_width = 224
img_height = 224
orig_size = None
channels = 3
class_index = 0
#LAYER_NAME = 'conv4_block36_out'
#LAYER_NAME = 'conv5_block3_3_conv'
#LAYER_NAME = 'conv5_block3_out'
#LAYER_NAME = 'post_bn'
LAYER_NAME = 'post_relu'
#LAYER_NAME = 'global_average_pooling2d'
'''
img = tf.io.read_file(test_image)
img = tf.image.decode_jpeg(img, channels=channels)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [img_width, img_height])
'''
def get_heatmap(img, class_index, eps=1e-8):
with tf.GradientTape() as tape:
conv_outputs, predictions = grad_model(np.array([img]))
#loss = predictions[:, np.argmax(predictions[0])]
loss = predictions[:, class_index]
print('Argmax', np.argmax(predictions[0]))
print('Diff', np.max(predictions[0])-np.min(predictions[0]))
print(loss)
grads = tape.gradient(loss, conv_outputs)
castConvOutputs = tf.cast(conv_outputs > 0, "float32")
castGrads = tf.cast(grads > 0, "float32")
guided_grads = castConvOutputs * castGrads * grads
output = conv_outputs[0]
guided_grads = guided_grads[0]
weights = tf.reduce_mean(guided_grads, axis=(0, 1))
#weights = tf.reduce_mean(grads, axis=(0, 1))
#cam = np.zeros(output.shape[0: 2], dtype=np.float32)
#for i, w in enumerate(weights):
# cam += w * output[:, :, i]
cam = tf.reduce_sum(tf.multiply(weights, output), axis=-1)
(w, h) = (img.shape[2], img.shape[1])
heatmap = cv2.resize(cam.numpy(), (w,h))
#heatmap = np.maximum(cam, 0) # ReLU
print(heatmap.max() - heatmap.min())
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
return (heatmap * 255).astype("uint8")
'''
cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
output_image = cv2.addWeighted(cv2.cvtColor(img.astype('uint8'), cv2.COLOR_RGB2BGR), 0.5, cam, 1, 0)
'''
def apply_heatmap(heatmap, image, alpha=.5, colormap = cv2.COLORMAP_JET):
heatmap = cv2.applyColorMap(heatmap, colormap)
#img = np.array(image) * 255
#img = cv2.resize(image, orig_size)
return cv2.addWeighted(cv2.cvtColor(image.astype('uint8'), cv2.COLOR_RGB2BGR), alpha, heatmap, 1-alpha, 0)
#return cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
# Create Network
#network = Resnet152(n_classes, img_width, img_height, channels, resnet_file)
#model = network.get_model()
#model.load_weights(model_file)
model = tf.keras.models.load_model(model_file)
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
#model.summary()
#for layer in model.layers:
# print(layer.name)
grad_model = tf.keras.models.Model([model.inputs], [model.get_layer(LAYER_NAME).output, model.output])
class_index_map = {'caries/': 0, 'no_caries/': 1}
#Run Cam-Function on all provided examples
for folder in ['caries/', 'no_caries/']:
path = pathlib.Path(img_raw_path+folder)
outpath = pathlib.Path(out+folder)
if not outpath.exists():
outpath.mkdir()
filenames = [item.name for item in path.glob('*') if item.name != '.DS_Store']
class_index = class_index_map[folder]
print(folder, class_index)
for img_name in filenames:
# Load image
test_image = img_raw_path + folder + img_name
orig_image = tf.keras.preprocessing.image.load_img(test_image)
orig_size = orig_image.size
# img = img.resize(img, tf.Variable([img_width, img_height], tf.int32))
img = orig_image.resize((img_width, img_height), Image.BILINEAR)
img = tf.keras.preprocessing.image.img_to_array(img) / 255
orig_image = tf.keras.preprocessing.image.img_to_array(orig_image)
heat_map = get_heatmap(img, class_index)
heat_map = cv2.resize(heat_map, orig_size)
image_with_heatmap_applied = apply_heatmap(heat_map, orig_image)
cv2.imwrite(str(outpath) + '/'+ img_name[:-4] + '.png', image_with_heatmap_applied)
print(img_name[:-4], 'written to file.')
# import the necessary packages
import cv2
from tensorflow.keras.models import Model
import tensorflow as tf
import numpy as np
class GradCAM:
def __init__(self, model, classIdx, layerName=None):
# store the model, the class index used to measure the class
# activation map, and the layer to be used when visualizing
# the class activation map
self.model = model
self.classIdx = classIdx
self.layerName = layerName
# if the layer name is None, attempt to automatically find
# the target output layer
if self.layerName is None:
self.layerName = self.find_target_layer()
def find_target_layer(self):
# attempt to find the final convolutional layer in the network
# by looping over the layers of the network in reverse order
for layer in reversed(self.model.layers):
# check to see if the layer has a 4D output
if len(layer.output_shape) == 4:
return layer.name
# otherwise, we could not find a 4D layer so the GradCAM
# algorithm cannot be applied
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
def compute_heatmap(self, image, eps=1e-8):
# construct our gradient model by supplying (1) the inputs
# to our pre-trained model, (2) the output of the (presumably)
# final 4D layer in the network, and (3) the output of the
# softmax activations from the model
gradModel = Model(
inputs=[self.model.inputs],
outputs=[self.model.get_layer(self.layerName).output,
self.model.output])
# record operations for automatic differentiation
with tf.GradientTape() as tape:
# cast the image tensor to a float-32 data type, pass the
# image through the gradient model, and grab the loss
# associated with the specific class index
inputs = tf.cast(image, tf.float32)
(convOutputs, predictions) = gradModel(inputs)
loss = predictions[:, self.classIdx]
# use automatic differentiation to compute the gradients
grads = tape.gradient(loss, convOutputs)
# compute the guided gradients
castConvOutputs = tf.cast(convOutputs > 0, "float32")
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
# the convolution and guided gradients have a batch dimension
# (which we don't need) so let's grab the volume itself and
# discard the batch
convOutputs = convOutputs[0]
guidedGrads = guidedGrads[0]
# compute the average of the gradient values, and using them
# as weights, compute the ponderation of the filters with
# respect to the weights
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
# grab the spatial dimensions of the input image and resize
# the output class activation map to match the input image
# dimensions
(w, h) = (image.shape[2], image.shape[1])
heatmap = cv2.resize(cam.numpy(), (w, h))
# normalize the heatmap such that all values lie in the range
# [0, 1], scale the resulting values to the range [0, 255],
# and then convert to an unsigned 8-bit integer
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) #+ eps
if not denom <= 0:
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
# return the resulting heatmap to the calling function
return heatmap
def overlay_heatmap(self, heatmap, image, alpha=0.5,
#colormap=cv2.COLORMAP_VIRIDIS):
colormap=cv2.COLORMAP_JET):
# apply the supplied color map to the heatmap and then
# overlay the heatmap on the input image
heatmap = cv2.applyColorMap(heatmap, colormap)
output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0)
# return a 2-tuple of the color mapped heatmap and the output,
# overlaid image
return (heatmap, output)
\ No newline at end of file
from classifier.NetworkBase import NetworkBase
from network_helpers.NetworkBase import NetworkBase
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout, Flatten, AveragePooling2D
from tensorflow.keras.applications.resnet_v2 import ResNet152V2
......
from classifier.NetworkBase import NetworkBase
from network_helpers.NetworkBase import NetworkBase
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout, Flatten, AveragePooling2D
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
......@@ -27,7 +27,7 @@ class Resnet50(NetworkBase):
base_model = ResNet50V2(weights=weights, input_tensor=input_tensor, include_top=False)
if shouldSave:
base_model.save('data/resnet_50_base_model.h5')
base_model.save('input/resnet_50_base_model.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
......
import PIL
from pathlib import Path
import imutils
from PIL import Image
from grad_cam.gradcam import GradCAM
from network_helpers.Resnet152 import Resnet152
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
import cv2
img = 'IMG_20190404_101926.jpg'
img_raw_path = 'input/test_data/'
out = 'out/'
#model_file = 'out/checkpoints/2020.03.25.10.26.51/model.0002-0.319.hdf5'
#model_file = 'out/checkpoints/2020.04.10.13.22.27/model.0016-0.471.hdf5'
model_file= 'out/checkpoints/2020.04.16.10.12.48/model.0005-0.498.hdf5'
resnet_file = 'input/resnet152v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
class_index_map = {'caries': 0, 'no_caries': 1}
index_class_map = {}
for element in class_index_map:
index_class_map[class_index_map[element]] = element
# load the model
print('[INFO] loading model...')
model = load_model(model_file)
# extract network parameters
_, network_input_width, network_input_height, channels = model.input.shape
network_input_size = (network_input_width , network_input_height)
n_classes = model.output.shape[1]
# iterate over all (both) folders of classes
for path in [path for path in Path(img_raw_path).iterdir() if path.is_dir()]:
# define an output folder for each class
out_path = Path(out+path.name)
if not out_path.exists():
out_path.mkdir()
# find out the class-index the network uses for the classname
class_index = class_index_map[path.name]
print('[INFO] Staring to process folder \'{}\' with index {}'.format(path.name, class_index))
# get the filenames of all files in the folder
filenames = [item for item in path.glob('*') if item.name != '.DS_Store']
#filenames = filenames[:3] # ------ simplification for testing | remove afterwards -------------------
for img in filenames:
#print("Starting image {}".format(img.name))
# load original image
orig = cv2.imread(str(img))
orig_image_size = (orig.shape[1], orig.shape[0])
# load image for processing
image = load_img(img, target_size=network_input_size)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = image / 255
prediction = model.predict(image)
i = int(np.argmax(prediction[0]))
# decode the predictions to obtain the human-readable label
label = index_class_map[i]
prob = prediction[0][i]
label = "{}: {:.2f}%".format(label, prob * 100)
print("[INFO] {} - {}".format(label, img.name))
# initialize our gradient class activation map and build the heatmap
cam = GradCAM(model, i)
heatmap = cam.compute_heatmap(image)
# resize the resulting heatmap to the original input image dimensions
# and then overlay heatmap on top of the image
heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0]))
(heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5)
# draw the predicted label on the output image
cv2.rectangle(output, (0, 0), (1300, 150), (0, 0, 0), -1)
cv2.putText(output, label, (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)
# display the original image and resulting heatmap and output image
# to our screen
output = np.hstack([orig, heatmap, output])
output = imutils.resize(output, height=700)
out_file = str(out_path / f'{img.name[:-4]}.png')
cv2.imwrite(out_file, output)
print()
......@@ -3,9 +3,9 @@ from optparse import OptionParser
from PIL import ImageFile
from keras_preprocessing.image import ImageDataGenerator
from classifier.Resnet152 import Resnet152
from classifier.Resnet50 import Resnet50
from classifier.DataLoader import DataLoader
from network_helpers.Resnet152 import Resnet152
from network_helpers.Resnet50 import Resnet50
from network_helpers.DataLoader import DataLoader
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
......@@ -16,11 +16,11 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
parser = OptionParser()
parser.add_option("-p", "--path_train", dest="train_path", help="Path to training data.", default="./data/train_data")
parser.add_option("-p", "--path_train", dest="train_path", help="Path to training input.", default="./input/train_data")
parser.add_option("--train_size", type="int", dest="train_size", default=200)
parser.add_option("--validation_size", type="int", dest="validation_size", default=200)
parser.add_option("-t", "--path_test", dest="test_path", help="Path to test data.", default="./data/test_data")
parser.add_option("-o", "--path_output", dest="output_path", help="Path to base folder for output data.", default='./out')
parser.add_option("-t", "--path_test", dest="test_path", help="Path to test input.", default="./input/test_data")
parser.add_option("--base_network_file", dest="base_net_file", help="Optional link to local file of Resnet 152 V2 for TF without top.")
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=100)
parser.add_option("--batch_size", type="int", dest="batch_size", help="Size of batches.", default=10)
......@@ -34,9 +34,9 @@ parser.add_option("--height", type="int", dest="height", default=224)
(options, args) = parser.parse_args()
if not options.train_path: # if folder name is not given
parser.error('Error: path to training data must be specified. Pass --path_train to command line')
parser.error('Error: path to training input must be specified. Pass --path_train to command line')
if not options.test_path: # if folder name is not given
parser.error('Error: path to test data must be specified. Pass --path_test to command line')
parser.error('Error: path to test input must be specified. Pass --path_test to command line')
def get_curr_time():
return datetime.now().strftime("%Y.%m.%d.%H.%M.%S")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment