Skip to content
This repository was archived by the owner on Jan 1, 2021. It is now read-only.

Commit b95dcdf

Browse files
authored
Merge pull request #37 from gurumov/master
minor fixes in the examples
2 parents 6a05182 + a4ddf2c commit b95dcdf

6 files changed

Lines changed: 17 additions & 36 deletions

File tree

assignments/exercises/e01.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
1818

1919
###############################################################################
20-
# 1b: Create two 0-d tensors x and y randomly selected from -1 and 1.
20+
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
2121
# Return x + y if x < y, x - y if x > y, 0 otherwise.
2222
# Hint: Look up tf.case().
2323
###############################################################################

assignments/exercises/e01_sol.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
1717

1818
###############################################################################
19-
# 1b: Create two 0-d tensors x and y randomly selected from -1 and 1.
19+
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
2020
# Return x + y if x < y, x - y if x > y, 0 otherwise.
2121
# Hint: Look up tf.case().
2222
###############################################################################

examples/02_simple_tf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
# [4 6]]
2929

3030
tf.zeros(shape, dtype=tf.float32, name=None)
31-
creates a tensor of shape and all elements will be zeros (when ran in session)
31+
#creates a tensor of shape and all elements will be zeros (when ran in session)
3232

3333
x = tf.zeros([2, 3], tf.int32)
3434
y = tf.zeros_like(x, optimize=True)

examples/03_linear_regression_starter.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,39 +26,35 @@
2626

2727
# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
2828
# Both have the type float32
29-
X = tf.placeholders(tf.float32, name='X')
30-
Y = tf.placeholders(tf.float32, name='Y')
29+
3130

3231
# Step 3: create weight and bias, initialized to 0
3332
# name your variables w and b
34-
w = tf.Variable(0.0, 'weights')
35-
b = tf.Variable(0.0, 'biases')
3633

3734

3835
# Step 4: predict Y (number of theft) from the number of fire
3936
# name your variable Y_predicted
40-
Y_predicted = tf.matmul(X, w) + b
37+
4138

4239
# Step 5: use the square error as the loss function
4340
# name your variable loss
44-
loss = (Y - Y_predicted) ** 2
41+
4542

4643
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
47-
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
4844

4945
# Phase 2: Train our model
5046
with tf.Session() as sess:
5147
# Step 7: initialize the necessary variables, in this case, w and b
5248
# TO - DO
53-
sess.run(tf.global_variables_initializer())
49+
5450

5551
# Step 8: train the model
5652
for i in range(50): # run 100 epochs
5753
total_loss = 0
5854
for x, y in data:
5955
# Session runs optimizer to minimize loss and fetch the value of loss. Name the received value as l
6056
# TO DO: write sess.run()
61-
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
57+
6258
total_loss += l
6359
print("Epoch {0}: {1}".format(i, total_loss/n_samples))
6460

examples/04_word2vec_no_frills.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
""" The mo frills implementation of word2vec skip-gram model using NCE loss.
1+
""" The no frills implementation of word2vec skip-gram model using NCE loss.
22
Author: Chip Huyen
33
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
44
cs20si.stanford.edu

examples/04_word2vec_starter.py

Lines changed: 8 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -32,68 +32,53 @@ def word2vec(batch_gen):
3232
# center_words have to be int to work on embedding lookup
3333

3434
# TO DO
35-
with tf.name_scope('data'):
36-
center_words = tf.placeholder(tf.int32, [BATCH_SIZE], name='center_words')
37-
target_words = tf.placeholder(tf.int32, [BATCH_SIZE, 1], name='target_words')
35+
3836

3937
# Step 2: define weights. In word2vec, it's actually the weights that we care about
4038
# vocab size x embed size
4139
# initialized to random uniform -1 to 1
4240

4341
# TOO DO
44-
with tf.name_scope('embedding_matrix'):
45-
embed_matrix = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBED_SIZE], -1.0, 1.0), name='embed_matrix')
42+
4643

4744
# Step 3: define the inference
4845
# get the embed of input words using tf.nn.embedding_lookup
4946
# embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
5047

5148
# TO DO
52-
with tf.name_scope('loss'):
53-
embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')
49+
5450

5551
# Step 4: construct variables for NCE loss
5652
# tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
5753
# nce_weight (vocab size x embed size), intialized to truncated_normal stddev=1.0 / (EMBED_SIZE ** 0.5)
5854
# bias: vocab size, initialized to 0
5955

6056
# TO DO
61-
nce_weights = tf.Variable(tf.truncated_normal([VOCAB_SIZE, EMBED_SIZE],
62-
stddev=1.0 / (EMBED_SIZE ** 0.5)),
63-
name='nce_weights')
64-
nce_biases = tf.Variable(tf.zeros(VOCAB_SIZE), name='nce_biases')
57+
6558

6659
# define loss function to be NCE loss function
6760
# tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
6861
# need to get the mean accross the batch
6962
# note: you should use embedding of center words for inputs, not center words themselves
7063

7164
# TO DO
72-
nce_loss = tf.nn.nce_loss(weights=nce_weights,
73-
biases=nce_biases,
74-
labels=target_words,
75-
inputs=embed,
76-
num_sampled=NUM_SAMPLED,
77-
num_classes=VOCAB_SIZE,
78-
name='loss')
79-
loss = tf.reduce_mean(nce_loss)
65+
8066

8167
# Step 5: define optimizer
8268

8369
# TO DO
84-
optimizer = tf.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
85-
70+
71+
8672

8773
with tf.Session() as sess:
8874
# TO DO: initialize variables
89-
sess.run(tf.global_variable_initializer())
75+
9076

9177
total_loss = 0.0 # we use this to calculate the average loss in the last SKIP_STEP steps
9278
writer = tf.summary.FileWriter('./graphs/no_frills/', sess.graph)
9379
for index in range(NUM_TRAIN_STEPS):
9480
centers, targets = next(batch_gen)
9581
# TO DO: create feed_dict, run optimizer, fetch loss_batch
96-
_, loss_batch = sess.run([optimizer, loss], feed_dict={center_words: centers, target_words: targets})
9782

9883
total_loss += loss_batch
9984
if (index + 1) % SKIP_STEP == 0:

0 commit comments

Comments
 (0)