Skip to content
This repository was archived by the owner on Jan 1, 2021. It is now read-only.

Commit 48ac942

Browse files
committed
code for lecture 2
1 parent ca70cdf commit 48ac942

File tree

5 files changed

+212
-4
lines changed

5 files changed

+212
-4
lines changed

2017/README.md

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,4 @@ Embedding visualization with TensorBoard<br>
2828
Usage of summary ops<br>
2929
Exercises to be familiar with other special TensorFlow ops<br>
3030
Demonstration of the danger of lazy loading <br>
31-
Convolutional GRU (CRGU) (by Lukasz Kaiser)
32-
33-
34-
31+
Convolutional GRU (CRGU) (by Lukasz Kaiser)

examples/02_lazy_loading.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
""" Example of lazy vs normal loading
2+
Created by Chip Huyen (huyenn@stanford.edu)
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 02
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
10+
import tensorflow as tf
11+
12+
########################################
13+
## NORMAL LOADING ##
14+
## print out a graph with 1 Add node ##
15+
########################################
16+
17+
x = tf.Variable(10, name='x')
18+
y = tf.Variable(20, name='y')
19+
z = tf.add(x, y)
20+
21+
with tf.Session() as sess:
22+
sess.run(tf.global_variables_initializer())
23+
writer = tf.summary.FileWriter('graphs/normal_loading', sess.graph)
24+
for _ in range(10):
25+
sess.run(z)
26+
print(tf.get_default_graph().as_graph_def())
27+
writer.close()
28+
29+
########################################
30+
## LAZY LOADING ##
31+
## print out a graph with 10 Add nodes##
32+
########################################
33+
34+
x = tf.Variable(10, name='x')
35+
y = tf.Variable(20, name='y')
36+
37+
with tf.Session() as sess:
38+
sess.run(tf.global_variables_initializer())
39+
writer = tf.summary.FileWriter('graphs/lazy_loading', sess.graph)
40+
for _ in range(10):
41+
sess.run(tf.add(x, y))
42+
print(tf.get_default_graph().as_graph_def())
43+
writer.close()

examples/02_placeholder.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
""" Placeholder and feed_dict example
2+
Created by Chip Huyen (huyenn@stanford.edu)
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 02
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
10+
import tensorflow as tf
11+
12+
# Example 1: feed_dict with placeholder
13+
14+
# a is a placeholderfor a vector of 3 elements, type tf.float32
15+
a = tf.placeholder(tf.float32, shape=[3])
16+
b = tf.constant([5, 5, 5], tf.float32)
17+
18+
# use the placeholder as you would a constant
19+
c = a + b # short for tf.add(a, b)
20+
21+
writer = tf.summary.FileWriter('graphs/placeholders', tf.get_default_graph())
22+
with tf.Session() as sess:
23+
# compute the value of c given the value of a is [1, 2, 3]
24+
print(sess.run(c, {a: [1, 2, 3]})) # [6. 7. 8.]
25+
writer.close()
26+
27+
28+
# Example 2: feed_dict with variables
29+
a = tf.add(2, 5)
30+
b = tf.multiply(a, 3)
31+
32+
with tf.Session() as sess:
33+
print(sess.run(b)) # >> 21
34+
# compute the value of b given the value of a is 15
35+
print(sess.run(b, feed_dict={a: 15})) # >> 45

examples/02_simple_tf.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
""" Simple TensorFlow's ops
2+
Created by Chip Huyen (huyenn@stanford.edu)
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
"""
6+
import os
7+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
8+
9+
import numpy as np
10+
import tensorflow as tf
11+
12+
# Example 1: Simple ways to create log file writer
13+
a = tf.constant(2, name='a')
14+
b = tf.constant(3, name='b')
15+
x = tf.add(a, b, name='add')
16+
writer = tf.summary.FileWriter('./graphs/simple', tf.get_default_graph())
17+
with tf.Session() as sess:
18+
# writer = tf.summary.FileWriter('./graphs', sess.graph)
19+
print(sess.run(x))
20+
writer.close() # close the writer when you’re done using it
21+
22+
# Example 2: The wonderful wizard of div
23+
a = tf.constant([2, 2], name='a')
24+
b = tf.constant([[0, 1], [2, 3]], name='b')
25+
26+
with tf.Session() as sess:
27+
print(sess.run(tf.div(b, a)))
28+
print(sess.run(tf.divide(b, a)))
29+
print(sess.run(tf.truediv(b, a)))
30+
print(sess.run(tf.floordiv(b, a)))
31+
# print(sess.run(tf.realdiv(b, a)))
32+
print(sess.run(tf.truncatediv(b, a)))
33+
print(sess.run(tf.floor_div(b, a)))
34+
35+
# Example 3: multiplying tensors
36+
a = tf.constant([10, 20], name='a')
37+
b = tf.constant([2, 3], name='b')
38+
39+
with tf.Session() as sess:
40+
print(sess.run(tf.multiply(a, b)))
41+
print(sess.run(tf.tensordot(a, b, 1)))
42+
43+
# Example 4: Python native type
44+
t_0 = 19
45+
x = tf.zeros_like(t_0) # ==> 0
46+
y = tf.ones_like(t_0) # ==> 1
47+
48+
t_1 = ['apple', 'peach', 'banana']
49+
x = tf.zeros_like(t_1) # ==> ['' '' '']
50+
# y = tf.ones_like(t_1) # ==> TypeError: Expected string, got 1 of type 'int' instead.
51+
52+
t_2 = [[True, False, False],
53+
[False, False, True],
54+
[False, True, False]]
55+
x = tf.zeros_like(t_2) # ==> 3x3 tensor, all elements are False
56+
y = tf.ones_like(t_2) # ==> 3x3 tensor, all elements are True
57+
58+
print(tf.int32.as_numpy_dtype())
59+
60+
# Example 5: printing your graph's definition
61+
my_const = tf.constant([1.0, 2.0], name='my_const')
62+
print(tf.get_default_graph().as_graph_def())

examples/02_variables.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
""" Variable exmaples
2+
Created by Chip Huyen (huyenn@stanford.edu)
3+
CS20: "TensorFlow for Deep Learning Research"
4+
cs20.stanford.edu
5+
Lecture 02
6+
"""
7+
import os
8+
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
9+
10+
import numpy as np
11+
import tensorflow as tf
12+
13+
# Example 1: creating variables
14+
s = tf.Variable(2, name='scalar')
15+
m = tf.Variable([[0, 1], [2, 3]], name='matrix')
16+
W = tf.Variable(tf.zeros([784,10]), name='big_matrix')
17+
V = tf.Variable(tf.truncated_normal([784, 10]), name='normal_matrix')
18+
19+
s = tf.get_variable('scalar', initializer=tf.constant(2))
20+
m = tf.get_variable('matrix', initializer=tf.constant([[0, 1], [2, 3]]))
21+
W = tf.get_variable('big_matrix', shape=(784, 10), initializer=tf.zeros_initializer())
22+
V = tf.get_variable('normal_matrix', shape=(784, 10), initializer=tf.truncated_normal_initializer())
23+
24+
with tf.Session() as sess:
25+
sess.run(tf.global_variables_initializer())
26+
print(V.eval())
27+
28+
# Example 2: assigning values to variables
29+
W = tf.Variable(10)
30+
W.assign(100)
31+
with tf.Session() as sess:
32+
sess.run(W.initializer)
33+
print(sess.run(W)) # >> 10
34+
35+
W = tf.Variable(10)
36+
assign_op = W.assign(100)
37+
with tf.Session() as sess:
38+
sess.run(assign_op)
39+
print(W.eval()) # >> 100
40+
41+
# create a variable whose original value is 2
42+
a = tf.get_variable('scalar', initializer=tf.constant(2))
43+
a_times_two = a.assign(a * 2)
44+
with tf.Session() as sess:
45+
sess.run(tf.global_variables_initializer())
46+
sess.run(a_times_two) # >> 4
47+
sess.run(a_times_two) # >> 8
48+
sess.run(a_times_two) # >> 16
49+
50+
W = tf.Variable(10)
51+
with tf.Session() as sess:
52+
sess.run(W.initializer)
53+
print(sess.run(W.assign_add(10))) # >> 20
54+
print(sess.run(W.assign_sub(2))) # >> 18
55+
56+
# Example 3: Each session has its own copy of variable
57+
W = tf.Variable(10)
58+
sess1 = tf.Session()
59+
sess2 = tf.Session()
60+
sess1.run(W.initializer)
61+
sess2.run(W.initializer)
62+
print(sess1.run(W.assign_add(10))) # >> 20
63+
print(sess2.run(W.assign_sub(2))) # >> 8
64+
print(sess1.run(W.assign_add(100))) # >> 120
65+
print(sess2.run(W.assign_sub(50))) # >> -42
66+
sess1.close()
67+
sess2.close()
68+
69+
# Example 4: create a variable with the initial value depending on another variable
70+
W = tf.Variable(tf.truncated_normal([700, 10]))
71+
U = tf.Variable(W * 2)

0 commit comments

Comments
 (0)