Commit 268c6a29 authored by sjjsmuel's avatar sjjsmuel
Browse files

adapt to original resnet

parent d472c6b0
......@@ -13,7 +13,7 @@ class AnnotationLocationLoader:
_data = {}
def __init__(self, annotation_file='input/caries_dataset_annotation.json', images_base_folder=Path('input/test_data/'), mouth_annotations_folder =Path('input/mouth_annotations/')):
def __init__(self, annotation_file='input/caries_dataset_annotation.json', images_base_folder=Path('input/test_data/'), mouth_annotations_folder=Path('input/mouth_annotations/')):
self._annotation_file = annotation_file
if not type(images_base_folder) == PosixPath:
......@@ -93,6 +93,7 @@ class AnnotationLocationLoader:
img_width = mouth_annotation['asset']['size']['width']
img_height = mouth_annotation['asset']['size']['height']
self._data[picture_filename].append(('mouth', [(img_width, img_height), top_left, bottom_right]))
if counter_number_of_annotated_but_missing_files > 0:
print('[INFO] {} mouth annotations were skipped during loading. This was done due to missing corresponding files in the assigned folder.'.format(counter_number_of_annotated_but_missing_files))
......
......@@ -132,4 +132,4 @@ class CAMModel(Model):
# Update the metrics.
metric_tracker.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
return {'loss': loss_tracker.result(), 'acc': metric_tracker.result()}
\ No newline at end of file
return {'loss': loss_tracker.result(), 'accuracy': metric_tracker.result()}
\ No newline at end of file
from helpers.AnnotationLocationLoader import AnnotationLocationLoader
from helpers.PredictionLocationLoader import PredictionLocationLoader
from pathlib import Path
def convertGroundtruths(out_path):
# create folder for gt
out_path = out_path / 'groundtruths'
if not out_path.exists():
out_path.mkdir(parents=True)
annotLoader = AnnotationLocationLoader(annotation_file='../input/caries_dataset_annotation.json', images_base_folder='../input/test_data', mouth_annotations_folder='../input/mouth_annotations')
for image in annotLoader.get_all_annotated_images():
#create file
file = image[:-3] + 'txt'
with open(out_path / file, "w") as groundtruth_file:
for annotation in annotLoader.get_annotations(image):
# format: <class_name> <left> <top> <right> <bottom>
groundtruth_file.write(str(annotation[0]) + " " + str(annotation[1][0][0]) + " " + str(annotation[1][0][1]) + " " + str(annotation[1][1][0]) + " " + str(annotation[1][1][1]) + "\n")
def convertDetections(out_path):
# create folder for detections
out_path = out_path / 'detections'
if not out_path.exists():
out_path.mkdir(parents=True)
predLoader = PredictionLocationLoader(prediction_file='../out/evaluation/predictions.txt', images_base_folder='../input/test_data')
for image in predLoader.get_all_annotated_images():
file = image[:-3] + 'txt'
with open(out_path / file, "w") as detection_file:
for annotation in predLoader.get_annotations(image):
# format: <class_name> <confidence> <left> <top> <right> <bottom>
detection_file.write(
str(annotation[0]) + " " + str(annotation[1][2]) + " " + str(annotation[1][0][0]) + " " + str(annotation[1][0][1]) + " " + str(
annotation[1][1][0]) + " " + str(annotation[1][1][1]) + "\n")
if __name__ == '__main__':
'''
Please run the Pascal VOC Metrics implemented by Rafael Padilla on the output of this Script.
https://github.com/rafaelpadilla/Object-Detection-Metrics
'''
output_path = Path('../out/convertedForObjectDetectionMetrics')
convertGroundtruths(output_path)
convertDetections(output_path)
print('[INFO] Finished conversion for ObjectDetectionMetrics.')
......@@ -38,8 +38,8 @@ def rotate(x, label, size):
img = x['img']
mask = x['mouth']
mask = tf.expand_dims(mask, -1)
# rotate either 0 or 180 degrees (0 times or 2 times 90 degrees)
random_value = tf.multiply(tf.random.uniform(shape=[], minval=0, maxval=2, dtype=tf.int32),2)
# rotate either 0 , 90, 180 or 270 degrees (0 or 1 or 2 or 3 times 90 degrees)
random_value = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
img = tf.image.rot90(img, random_value)
mask = tf.image.rot90(mask, random_value)
mask = tf.squeeze(mask)
......
import json
from pathlib import Path
import ast
class PredictionLocationLoader:
_prediction_file = None
_img_path = None
_annotated_images = set()
_available_annotations = set()
_available_images = None
_data = {}
def __init__(self, prediction_file='out/predictions.txt', images_base_folder='input/test_data/'):
self._prediction_file = prediction_file
self._img_path = images_base_folder
# get the names of the images witch are available as files
self._available_images = self._get_names_from_available_images()
self._load_predictions()
def _get_names_from_available_images(self):
names_from_available_images = []
for path in [path for path in Path(self._img_path).iterdir() if path.is_dir()]:
names_from_available_images.extend([filename.name for filename in path.iterdir() if filename.is_file() and not filename.name.startswith('.')])
return names_from_available_images
def _load_predictions(self):
with open(self._prediction_file) as file:
for line in file.readlines():
filename, predictions = line.split(';')
filename = filename.strip()
predictions = predictions.strip()
predictions = ast.literal_eval(predictions)
self._data[filename] = predictions
# prepare meta-structures
for pred in predictions:
prediction_tpye = pred[0].upper()
self._available_annotations.add(prediction_tpye)
self._annotated_images.add(filename)
self._annotated_images = list(self._annotated_images)
self._available_annotations = list(self._available_annotations)
def get_all_types_of_annotations(self):
"""
:return: list of all the types of annotations witch appeared at least once in the annotation_file
"""
return self._available_annotations
def get_all_annotated_images(self):
"""
:return: list of the names of all images witch have at least one annotation
"""
return self._annotated_images
def is_annotated(self, image_name):
"""
Should check weather for the given filename an annotation exists
:param image_name: complete name of the file including the filetype as a string
:return: boolean weather there is an annotation for the image
"""
return image_name in self._annotated_images
def get_annotations(self, image_name, filter=None):
"""
Returns a list of annotations for the given image_name
e.g. [ ('caries', [(x1,y1), (x2,y2)]), _more_entries_ ]
:param filter: a list of strings representing the types of annotations the user wants to derive
"""
if self.is_annotated(image_name):
if filter and len(filter)>0:
filter = [category.lower() for category in filter]
return [annotation for annotation in self._data[image_name] if annotation[0] in filter]
return self._data[image_name]
else:
return []
\ No newline at end of file
......@@ -32,18 +32,12 @@ class Resnet50(NetworkBase):
if shouldSave:
base_model.save('input/resnet_50_base_model.h5')
for layer in base_model.layers:
layer.trainable = False
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.4)(x)
x = Dense(128)(x)
x = Dropout(0.2)(x)
x = Dense(self.NUM_CLASSES)(x)
out = Dense(self.NUM_CLASSES, activation='softmax', name='prediction')(x)
model = CAMModel(inputs=[input_tensor], outputs=[out, base_model.layers[-1].output])
for layer in model.layers[86:]:
layer.trainable = True
return model
from helpers.AnnotationLocationLoader import AnnotationLocationLoader
from helpers.PredictionLocationLoader import PredictionLocationLoader
print('Annotations')
annot_loader = AnnotationLocationLoader(annotation_file='../input/caries_dataset_annotation.json', images_base_folder='../input/training_data/', mouth_annotations_folder='../input/mouth_annotations/')
print('Annotations -- Mouth')
print(annot_loader.get_annotations('20190328_084512.jpg', ['mouth'])[0][1])
print()
print('Find images with missing annotation')
annot_imgs = annot_loader.get_all_annotated_images()
for img in annot_imgs:
annots = annot_loader.get_annotations(img, ['mouth'])
print(annots)
if len(annots) == 0:
print(img)
print()
print('Annotations -- Caries')
print(annot_loader.get_all_types_of_annotations())
for image in annot_imgs[:10]:
#print(image, annot_loader.get_annotations(image, ['caries']))
print(image, annot_loader.get_annotations(image))
print()
print('Predictions')
pred_loader = PredictionLocationLoader(prediction_file='../out/predictions.txt', images_base_folder='../input/training_data/', )
print(pred_loader.get_all_types_of_annotations())
for image in pred_loader.get_all_annotated_images()[:10]:
print(pred_loader.get_annotations(image, ['caries']))
......@@ -19,7 +19,7 @@ for element in class_index_map:
train_dir = pathlib.Path('input/training_data_mini')
annotation_path = './input/mouth_annotations/'
annotation_path = '../input/mouth_annotations/'
batch_size = 5
img_width = 500
img_height = 500
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment