-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathevaluate.py
121 lines (95 loc) · 4 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
"""Evaluation script for the DeepLab-ResNet network on the validation subset
of PASCAL VOC dataset.
This script evaluates the model on 1449 validation images.
"""
from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from deeplab_resnet import DeepLabResNetModel, ImageReader, dense_crf, inv_preprocess, prepare_label
DATA_DIRECTORY = '/home/VOCdevkit'
DATA_LIST_PATH = './dataset/val.txt'
NUM_STEPS = 1449 # Number of images in the validation set.
RESTORE_FROM = './deeplab_resnet.ckpt'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLabLFOV Network")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of images in the validation set.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
return parser.parse_args()
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
# Create queue coordinator.
coord = tf.train.Coordinator()
# Load reader.
with tf.name_scope("create_inputs"):
reader = ImageReader(
args.data_dir,
args.data_list,
None, # No defined input size.
False, # No random scale.
coord)
image, label = reader.image, reader.label
image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.
# Create network.
net = DeepLabResNetModel({'data': image_batch}, is_training=False)
# Which variables to load.
restore_var = tf.global_variables()
# Predictions.
raw_output = net.layers['fc1_voc12']
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])
# CRF.
inv_image = tf.py_func(inv_preprocess, [image_batch], tf.uint8)
raw_output = tf.py_func(dense_crf, [tf.nn.softmax(raw_output), inv_image], tf.float32)
raw_output = tf.argmax(raw_output, dimension=3)
pred = tf.expand_dims(raw_output, dim=3) # Create a 4-d tensor.
# mIoU
pred = tf.reshape(pred, [-1,])
gt = tf.reshape(label_batch, [-1,])
weights = tf.cast(tf.less_equal(gt, 20), tf.int32) # Ignore the void label '255'.
mIoU, update_op = tf.contrib.metrics.streaming_mean_iou(pred, gt, num_classes=21, weights=weights)
# Set up a TF session and initialise variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.local_variables_initializer())
# Load weights.
loader = tf.train.Saver(var_list=restore_var)
if args.restore_from is not None:
load(loader, sess, args.restore_from)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in tqdm(range(args.num_steps)):
preds, _ = sess.run([pred, update_op])
print('Mean IoU: {:.3f}'.format(mIoU.eval(session=sess)))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()