Skip to content

Commit

Permalink
TF 1.0 !
Browse files Browse the repository at this point in the history
  • Loading branch information
pannous committed Feb 16, 2017
1 parent 9b39c67 commit e1506aa
Show file tree
Hide file tree
Showing 18 changed files with 38 additions and 38 deletions.
Empty file modified densenet_layer.py
100755 → 100644
Empty file.
2 changes: 1 addition & 1 deletion layer
Submodule layer updated 2 files
+80 −0 Clockwork_RNN.py
+152 −153 net.py
Empty file modified lstm-tflearn.py
100755 → 100644
Empty file.
14 changes: 7 additions & 7 deletions lstm_ctc_to_chars.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
# inputs = tf.transpose(inputs, [0, 2, 1]) # inputs must be a `Tensor` of shape: `[batch_size, max_time, ...]`
inputs = tf.transpose(inputs, [2, 0, 1]) # [max_time, batch_size, features] to split:
# Split data because rnn cell needs a list of inputs for the RNN inner loop
inputs = tf.split(0, max_input_length, inputs) # n_steps * (batch_size, features)
inputs = tf.split(axis=0, num_or_size_splits=max_input_length, value=inputs) # n_steps * (batch_size, features)

num_hidden = 100 # features
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
Expand Down Expand Up @@ -89,7 +89,7 @@
bias = tf.Variable(uniform_bias, name="bias_dense_%d" % i)
y_ = outputY = tf.matmul(output, weights, name="dense_%d" % i) + bias

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_, y[:, i, :]), name="cost") # prediction, target
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y[:, i, :]), name="cost") # prediction, target
costs.append(cost)
logits.append(y_)

Expand All @@ -108,7 +108,7 @@

####Optimizing
logits = y_
logits3d = tf.pack(logits)
logits3d = tf.stack(logits)
seqLengths = [20] * batch_size
cost = tf.reduce_mean(ctc.ctc_loss(logits3d, targetY, seqLengths))
# CTCLoss op expects the reserved blank label to be the largest value! REALLY?
Expand All @@ -132,22 +132,22 @@
try:
saver = tf.train.Saver(tf.global_variables())
except:
saver = tf.train.Saver(tf.all_variables())
saver = tf.train.Saver(tf.global_variables())
snapshot = "lstm_mfcc"
checkpoint = tf.train.latest_checkpoint(checkpoint_dir="checkpoints")
if checkpoint:
print("LOADING checkpoint " + checkpoint + "")
try: saver.restore(session, checkpoint)
except: print("incompatible checkpoint")
try: session.run([tf.global_variables_initializer()])
except: session.run([tf.initialize_all_variables()]) # tf <12
except: session.run([tf.global_variables_initializer()]) # tf <12

# train
step = 0 # show first
try: summaries = tf.summary.merge_all()
except: summaries = tf.merge_all_summaries() # tf<12
except: summaries = tf.summary.merge_all() # tf<12
try: summary_writer = tf.summary.FileWriter("logs", session.graph) #
except: summary_writer = tf.train.SummaryWriter("logs", session.graph) # tf<12
except: summary_writer = tf.summary.FileWriter("logs", session.graph) # tf<12
while step < steps:
batch_xs, batch_ys = next(batch)

Expand Down
14 changes: 7 additions & 7 deletions lstm_mfcc_ctc_to_words.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
# inputs = tf.transpose(inputs, [0, 2, 1]) # inputs must be a `Tensor` of shape: `[batch_size, max_time, ...]`
inputs = tf.transpose(inputs, [2, 0, 1]) # [max_time, batch_size, features] to split:
# Split data because rnn cell needs a list of inputs for the RNN inner loop
inputs = tf.split(0, max_input_length, inputs) # n_steps * (batch_size, features)
inputs = tf.split(axis=0, num_or_size_splits=max_input_length, value=inputs) # n_steps * (batch_size, features)

num_hidden = 100 #features
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
Expand Down Expand Up @@ -75,7 +75,7 @@
bias = tf.Variable(tf.random_uniform([classes], minval=-1. / width, maxval=1. / width), name="bias_dense_%d"%i)
y_ = outputY = tf.matmul(output, weights, name="dense_%d" % i) + bias

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_, y[:,i,:]), name="cost") # prediction, target
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y[:,i,:]), name="cost") # prediction, target
costs.append(cost)
logits.append(y_)

Expand All @@ -94,7 +94,7 @@

####Optimizing
logits=y_
logits3d = tf.pack(logits)
logits3d = tf.stack(logits)
seqLengths=[20]*batch_size
cost = tf.reduce_mean(ctc.ctc_loss(logits3d, targetY, seqLengths))
# CTCLoss op expects the reserved blank label to be the largest value! REALLY?
Expand All @@ -116,23 +116,23 @@
steps = 9999999
session=tf.Session()
try:saver = tf.train.Saver(tf.global_variables())
except:saver = tf.train.Saver(tf.all_variables())
except:saver = tf.train.Saver(tf.global_variables())
snapshot = "lstm_mfcc"
checkpoint = tf.train.latest_checkpoint(checkpoint_dir="checkpoints")
if checkpoint:
print("LOADING " + checkpoint + " !!!")
try:saver.restore(session, checkpoint)
except: print("incompatible checkpoint")
try: session.run([tf.global_variables_initializer()])
except: session.run([tf.initialize_all_variables()])# tf <12
except: session.run([tf.global_variables_initializer()])# tf <12


#train
step = 0 # show first
try:summaries = tf.summary.merge_all()
except:summaries = tf.merge_all_summaries() # tf<12
except:summaries = tf.summary.merge_all() # tf<12
try:summary_writer = tf.summary.FileWriter("logs", session.graph) #
except:summary_writer = tf.train.SummaryWriter("logs", session.graph) # tf<12
except:summary_writer = tf.summary.FileWriter("logs", session.graph) # tf<12
while step < steps:
batch_xs, batch_ys = next(batch)

Expand Down
16 changes: 8 additions & 8 deletions lstm_mfcc_to_chars.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
# inputs = tf.transpose(inputs, [0, 2, 1]) # inputs must be a `Tensor` of shape: `[batch_size, max_time, ...]`
inputs = tf.transpose(inputs, [2, 0, 1]) # [max_time, batch_size, features] to split:
# Split data because rnn cell needs a list of inputs for the RNN inner loop
inputs = tf.split(0, max_length, inputs) # n_steps * (batch_size, features)
inputs = tf.split(axis=0, num_or_size_splits=max_length, value=inputs) # n_steps * (batch_size, features)

num_hidden = 100 #features
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
Expand Down Expand Up @@ -65,32 +65,32 @@
# optimize
# if use_word: y=target=tf.placeholder(tf.float32, shape=(batch_size,(None,32))) # -> seq2seq!
y=target=tf.placeholder(tf.float32, shape=(batch_size,classes))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_,y),name="cost") # prediction, target
tf.scalar_summary('cost', cost)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_,labels=y),name="cost") # prediction, target
tf.summary.scalar('cost', cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
prediction = y_
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(target, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.scalar_summary('accuracy', accuracy)
tf.summary.scalar('accuracy', accuracy)

steps = 9999999
session=tf.Session()
saver = tf.train.Saver(tf.all_variables())
saver = tf.train.Saver(tf.global_variables())
snapshot = "lstm_mfcc"
checkpoint = tf.train.latest_checkpoint(checkpoint_dir="checkpoints")
if checkpoint:
print("LOADING " + checkpoint + " !!!")
try:saver.restore(session, checkpoint)
except: print("incompatible checkpoint")
try: session.run([tf.global_variables_initializer()])
except: session.run([tf.initialize_all_variables()])
except: session.run([tf.global_variables_initializer()])


#train
step = 0 # show first
summaries = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter("logs", session.graph) #
summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter("logs", session.graph) #
while step < steps:
batch_xs, batch_ys = next(batch)
# tf.train.shuffle_batch_join(example_list, batch_size, capacity=min_queue_size + batch_size * 16, min_queue_size)
Expand Down
14 changes: 7 additions & 7 deletions lstm_to_chars.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@
# Reshape to 2-D tensor (nTimeSteps*Size, nfeatures)
inputXrs = tf.reshape(inputX, [-1, nFeatures])
# Split to get a list of 'n_steps' tensors of shape (_size, n_hidden)
inputList = tf.split(0, maxTimeSteps, inputXrs)
inputList = tf.split(axis=0, num_or_size_splits=maxTimeSteps, value=inputXrs)
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
Expand All @@ -104,13 +104,13 @@
print("building fbH1rs ")
fbH1rs = [tf.reshape(t, [Size, 2, nHidden]) for t in fbH1]
print("building outH1 ")
outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(tf.multiply(t, weightsOutH1), axis=1) + biasesOutH1 for t in fbH1rs]
print("building logits ")
logits = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
print("len(outH1) %d"% len(outH1))
####Optimizing
print("building loss")
logits3d = tf.pack(logits)
logits3d = tf.stack(logits)
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, targetY, seqLengths))
out = tf.identity(loss, 'ctc_loss_mean')
optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
Expand All @@ -128,13 +128,13 @@
####Run session
with tf.Session(graph=graph) as session:
try: merged = tf.summary.merge_all()
except: merged = tf.merge_all_summaries()
except: merged = tf.summary.merge_all()
try:writer = tf.summary.FileWriter("/tmp/basic_new", session.graph)
except: writer = tf.train.SummaryWriter("/tmp/basic_new", session.graph)
except: writer = tf.summary.FileWriter("/tmp/basic_new", session.graph)
try:saver = tf.train.Saver() # defaults to saving all variables
except:
print("tf.train.Saver() broken in tensorflow 0.12")
saver = tf.train.Saver(tf.all_variables())# WTF stupid API breaking
saver = tf.train.Saver(tf.global_variables())# WTF stupid API breaking
ckpt = tf.train.get_checkpoint_state('./checkpoints')

start = 0
Expand All @@ -150,7 +150,7 @@
else:
print('Initializing')
try: session.run(tf.global_variables_initializer())
except:session.run(tf.initialize_all_variables())
except:session.run(tf.global_variables_initializer())
for epoch in range(nEpochs):
print('Epoch', epoch + 1, '...')
errors = np.zeros(len(edData))
Expand Down
Empty file modified mfcc_feature_classifier.py
100755 → 100644
Empty file.
Empty file modified number_classifier_tflearn.py
100755 → 100644
Empty file.
Empty file modified number_gan_layer.py
100755 → 100644
Empty file.
Empty file modified record.py
100755 → 100644
Empty file.
Empty file modified speaker_classifier_tflearn.py
100755 → 100644
Empty file.
2 changes: 1 addition & 1 deletion spectro_gan.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def check_accuracy():
draw=0

# Train
tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
steps=30000
e=0
for i in range(steps):
Expand Down
2 changes: 1 addition & 1 deletion speech2text-seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def res_block(tensor, size, rate, dim=num_dim):
# CTC loss
loss = logit.sg_ctc(target=y, seq_len=seq_len)
tf.train.AdamOptimizer(learning_rate).minimize(loss)
saver = tf.train.Saver(tf.all_variables())
saver = tf.train.Saver(tf.global_variables())

# train
tf.sg_train(log_interval=30, lr=0.0001, loss=loss, ep_size=1000, max_ep=200, early_stop=False)
Expand Down
Empty file modified speech2text-tflearn.py
100755 → 100644
Empty file.
8 changes: 4 additions & 4 deletions speech_data.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -401,8 +401,8 @@ def many_hot_to_word(word):
def dense_to_one_hot(batch, batch_size, num_labels):
sparse_labels = tf.reshape(batch, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
concatenated = tf.concat(1, [indices, sparse_labels])
concat = tf.concat(0, [[batch_size], [num_labels]])
concatenated = tf.concat(axis=1, values=[indices, sparse_labels])
concat = tf.concat(axis=0, values=[[batch_size], [num_labels]])
output_shape = tf.reshape(concat, [2])
sparse_to_dense = tf.sparse_to_dense(concatenated, output_shape, 1.0, 0.0)
return tf.reshape(sparse_to_dense, [batch_size, num_labels])
Expand All @@ -411,8 +411,8 @@ def dense_to_one_hot(batch, batch_size, num_labels):
def dense_to_one_hot(batch, batch_size, num_labels):
sparse_labels = tf.reshape(batch, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
concatenated = tf.concat(1, [indices, sparse_labels])
concat = tf.concat(0, [[batch_size], [num_labels]])
concatenated = tf.concat(axis=1, values=[indices, sparse_labels])
concat = tf.concat(axis=0, values=[[batch_size], [num_labels]])
output_shape = tf.reshape(concat, [2])
sparse_to_dense = tf.sparse_to_dense(concatenated, output_shape, 1.0, 0.0)
return tf.reshape(sparse_to_dense, [batch_size, num_labels])
Expand Down
2 changes: 1 addition & 1 deletion speech_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def eval(feed):


def train_spectrogram_encoder():
tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
print("Pretrain")
for i in range(6000-1):
batch_xs, batch_ys = speech.train.next_batch(100)
Expand Down
2 changes: 1 addition & 1 deletion wave_GANerate.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def play_pcm(data):


# Train
tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
steps=3000#000
batch=speech_data.wave_batch_generator(target=speech_data.Target.digits)
negative=[0]*batch_size # input was fake
Expand Down

0 comments on commit e1506aa

Please sign in to comment.