-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference.py
101 lines (82 loc) · 3.24 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import numpy as np
import tensorflow as tf
from pyAIUtils.aiutils.tftools import layers
import tensorflow as tf
import pdb
class Inference():
def __init__(
self,
data,
hidden_units,
residual,
activation,
keep_prob,
use_batchnorm,
is_training):
# Args:
# - data : num_samples x dimensions tensorflow variable
# - hidden_units : A list containing the number of units in each hidden layer
# - activation : Activation function to use such as tf.nn.relu
# - keep_prob : Keep probability to be used for dropout
# - use_batchnorm : If True, batch normalization is applied to each hidden layer
# - is_training : Mode distinguishing inference for training vs test
assert(not(use_batchnorm and keep_prob<1.)), 'use either batchnorm or dropout'
self.activation = activation
self.keep_prob = keep_prob
self.use_batchnorm = use_batchnorm
self.is_training = is_training
num_hidden_layers = len(hidden_units)
self.hidden_layers = [None]*num_hidden_layers
with tf.variable_scope('hidden_layer_0'):
self.hidden_layers[0] = self.create_hidden_layer(
data,
hidden_units[0],
residual[0])
for i in range(1,num_hidden_layers):
with tf.variable_scope('hidden_layer_' + str(i)):
self.hidden_layers[i] = self.create_hidden_layer(
self.hidden_layers[i-1],
hidden_units[i],
residual[i])
with tf.variable_scope('output_layer'):
self.output_layer = layers.full(
self.hidden_layers[-1],
2,
'fc',
func=None)
self.prob = tf.nn.softmax(self.output_layer)
def create_hidden_layer(self, input, num_hidden_units, residual):
hidden_layer = layers.full(
input,
num_hidden_units,
'fc_layer',
func=None)
if self.use_batchnorm:
hidden_layer = layers.batch_norm(
hidden_layer,
self.is_training)
if residual:
hidden_layer += input
hidden_layer = self.apply_activation(hidden_layer,self.activation)
hidden_layer = layers.dropout(hidden_layer,self.is_training,self.keep_prob)
# hidden_layer = tf.nn.dropout(hidden_layer,self.keep_prob)
return hidden_layer
def apply_activation(self, x, activation):
if self.activation=='relu':
return tf.nn.relu(x)
elif self.activation=='sigmoid':
return tf.nn.sigmoid(x)
elif self.activation=='tanh':
return tf.nn.tanh(x)
elif self.activation=='elu':
return tf.nn.elu(x)
elif self.activation=='swish':
return x*tf.nn.sigmoid(x)
else:
assert(False), 'activation should be relu or sigmoid'
def get_logits(self):
return self.output_layer
def get_prob(self):
return self.prob
def get_hidden_layers(self):
return self.hidden_layers