maxval=np.sqrt(6.0*initialization_factor/(num_features+2.0*num_cells))
)
)
b_a = tf.Variable(tf.zeros([num_cells]))
W_o = tf.Variable(
tf.random_uniform(
[num_cells, num_classes],
minval=-np.sqrt(6.0*initialization_factor/(num_cells+num_classes)),
maxval=np.sqrt(6.0*initialization_factor/(num_cells+num_classes))
)
)
b_o = tf.Variable(tf.zeros([num_classes]))
// Internal states
//
n = tf.zeros([batch_size, num_cells])
d = tf.zeros([batch_size, num_cells])
h = tf.zeros([batch_size, num_cells])
a_max = tf.fill([batch_size, num_cells], -1E38) // Start off with lowest number possible
// Define model
//
h += activation(tf.expand_dims(s, 0))
for i in range(max_steps):
x_step = x[:,i,:]
xh_join = tf.concat(1, [x_step, h]) // Combine the features and hidden state into one tensor
u = tf.matmul(x_step, W_u)+b_u
g = tf.matmul(xh_join, W_g)+b_g
a = tf.matmul(xh_join, W_a)+b_a
z = tf.mul(u, tf.nn.tanh(g))
a_newmax = tf.maximum(a_max, a)
After Change
u = tf.matmul(x_step, W_u)+b_u
g = tf.matmul(xh_join, W_g)+b_g
a = tf.matmul(xh_join, W_a)
z = tf.mul(u, tf.nn.tanh(g))
a_newmax = tf.maximum(a_max, a)