-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsketchy_input.py
96 lines (78 loc) · 3.38 KB
/
sketchy_input.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import tensorflow as tf
def build_input(dataset, data_path, batch_size, mode):
"""Build Sketchy image and labels.
Args:
dataset: sketchy or Others.
data_path: Filename for data.
batch_size: Input batch size.
mode: Either 'train' or 'eval'.
Returns:
images: Batchs of images. [batch_size, image_size, image_size, 3]
labels: Batchs of labels. [batch_size, num_classes]
Raises:
ValueError: when the specified dataset is not supported.
"""
image_size = 256
if dataset == 'sketchy':
num_classes = 125
image_resize = 224
else:
raise ValueError('Not supported dataset %s', dataset)
depth = 3
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True)
# Read examples from files in the filename queue.
reader = tf.TFRecordReader()
key, value = reader.read(file_queue)
#Convert these example to dense labels and processed images.
features = tf.parse_single_example(value, features={
'image_label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
image = tf.reshape(image, [image_size, image_size, depth])
label = tf.cast(features['image_label'], tf.int32)
label = tf.reshape(label, [1])
image = tf.cast(image, tf.float32)
if mode == 'train':
image = tf.image.resize_image_with_crop_or_pad(image, image_size+32, image_size+32)
image = tf.random_crop(image, [image_resize, image_resize, 3])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
example_queue = tf.RandomShuffleQueue(
capacity = 16 * batch_size,
min_after_dequeue = 8 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_resize, image_resize, depth], [1]]
)
num_threads = 16
else:
image = tf.image.resize_image_with_crop_or_pad(image, image_resize, image_resize)
image = tf.image.per_image_standardization(image)
example_queue = tf.FIFOQueue(
3 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_resize, image_resize, depth], [1]]
)
num_threads = 1
assert len(image.get_shape()) == 3
assert label.get_shape() == 1
assert len(example_queue.shapes[1]) == 1
example_queue_op = example_queue.enqueue([image, label])
tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(example_queue, [example_queue_op] * num_threads))
# Read 'batch' labels + images from the example queue.
images, labels = example_queue.dequeue_many(batch_size)
labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
labels = tf.sparse_to_dense(tf.concat(values = [indices, labels], axis = 1), [batch_size, num_classes], 1.0, 0.0)
assert len(images.get_shape()) == 4
assert images.get_shape()[0] == batch_size
assert images.get_shape()[-1] == 3
assert len(labels.get_shape()) == 2
assert labels.get_shape()[0] == batch_size
assert labels.get_shape()[1] == num_classes
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, labels