99from __future__ import print_function
1010
1111import os
12+ os .environ ['TF_CPP_MIN_LOG_LEVEL' ]= '2'
1213import time
1314
1415import numpy as np
2627IMAGE_WIDTH = 333
2728NOISE_RATIO = 0.6 # percentage of weight of the noise for intermixing with the content image
2829
30+ CONTENT_WEIGHT = 0.01
31+ STYLE_WEIGHT = 1
32+
2933# Layers used for style features. You can change this.
3034STYLE_LAYERS = ['conv1_1' , 'conv2_1' , 'conv3_1' , 'conv4_1' , 'conv5_1' ]
3135W = [0.5 , 1.0 , 1.5 , 3.0 , 4.0 ] # give more weights to deeper layers.
@@ -62,13 +66,14 @@ def _create_content_loss(p, f):
6266 the content loss
6367
6468 """
65- pass
69+ return tf . reduce_sum (( f - p ) ** 2 ) / ( 4.0 * p . size )
6670
6771def _gram_matrix (F , N , M ):
6872 """ Create and return the gram matrix for tensor F
6973 Hint: you'll first have to reshape F
7074 """
71- pass
75+ F = tf .reshape (F , (M , N ))
76+ return tf .matmul (tf .transpose (F ), F )
7277
7378def _single_style_loss (a , g ):
7479 """ Calculate the style loss at a certain layer
@@ -82,7 +87,11 @@ def _single_style_loss(a, g):
8287 2. we'll use the same coefficient for style loss as in the paper
8388 3. a and g are feature representation, not gram matrices
8489 """
85- pass
90+ N = a .shape [3 ] # number of filters
91+ M = a .shape [1 ] * a .shape [2 ] # height times width of the feature map
92+ A = _gram_matrix (a , N , M )
93+ G = _gram_matrix (g , N , M )
94+ return tf .reduce_sum ((G - A ) ** 2 / ((2 * N * M ) ** 2 ))
8695
8796def _create_style_loss (A , model ):
8897 """ Return the total style loss
@@ -92,7 +101,7 @@ def _create_style_loss(A, model):
92101
93102 ###############################
94103 ## TO DO: return total style loss
95- pass
104+ return sum ([ W [ i ] * E [ i ] for i in range ( n_layers )])
96105 ###############################
97106
98107def _create_losses (model , input_image , content_image , style_image ):
@@ -110,7 +119,7 @@ def _create_losses(model, input_image, content_image, style_image):
110119 ##########################################
111120 ## TO DO: create total loss.
112121 ## Hint: don't forget the content loss and style loss weights
113-
122+ total_loss = CONTENT_WEIGHT * content_loss + STYLE_WEIGHT * style_loss
114123 ##########################################
115124
116125 return content_loss , style_loss , total_loss
@@ -119,7 +128,14 @@ def _create_summary(model):
119128 """ Create summary ops necessary
120129 Hint: don't forget to merge them
121130 """
122- pass
131+ with tf .name_scope ('summaries' ):
132+ tf .summary .scalar ('content loss' , model ['content_loss' ])
133+ tf .summary .scalar ('style loss' , model ['style_loss' ])
134+ tf .summary .scalar ('total loss' , model ['total_loss' ])
135+ tf .summary .histogram ('histogram content loss' , model ['content_loss' ])
136+ tf .summary .histogram ('histogram style loss' , model ['style_loss' ])
137+ tf .summary .histogram ('histogram total loss' , model ['total_loss' ])
138+ return tf .summary .merge_all ()
123139
124140def train (model , generated_image , initial_image ):
125141 """ Train your model.
@@ -132,6 +148,9 @@ def train(model, generated_image, initial_image):
132148 ## TO DO:
133149 ## 1. initialize your variables
134150 ## 2. create writer to write your graph
151+ saver = tf .train .Saver ()
152+ sess .run (tf .global_variables_initializer ())
153+ writer = tf .summary .FileWriter ('graphs' , sess .graph )
135154 ###############################
136155 sess .run (generated_image .assign (initial_image ))
137156 ckpt = tf .train .get_checkpoint_state (os .path .dirname ('checkpoints/checkpoint' ))
@@ -150,6 +169,8 @@ def train(model, generated_image, initial_image):
150169 if (index + 1 ) % skip_step == 0 :
151170 ###############################
152171 ## TO DO: obtain generated image and loss
172+ gen_image , total_loss , summary = sess .run ([generated_image , model ['total_loss' ],
173+ model ['summary_op' ]])
153174
154175 ###############################
155176 gen_image = gen_image + MEAN_PIXELS
@@ -172,9 +193,11 @@ def main():
172193 input_image = tf .Variable (np .zeros ([1 , IMAGE_HEIGHT , IMAGE_WIDTH , 3 ]), dtype = tf .float32 )
173194
174195 utils .download (VGG_DOWNLOAD_LINK , VGG_MODEL , EXPECTED_BYTES )
196+ utils .make_dir ('checkpoints' )
197+ utils .make_dir ('outputs' )
175198 model = vgg_model .load_vgg (VGG_MODEL , input_image )
176199 model ['global_step' ] = tf .Variable (0 , dtype = tf .int32 , trainable = False , name = 'global_step' )
177-
200+
178201 content_image = utils .get_resized_image (CONTENT_IMAGE , IMAGE_HEIGHT , IMAGE_WIDTH )
179202 content_image = content_image - MEAN_PIXELS
180203 style_image = utils .get_resized_image (STYLE_IMAGE , IMAGE_HEIGHT , IMAGE_WIDTH )
@@ -184,7 +207,8 @@ def main():
184207 input_image , content_image , style_image )
185208 ###############################
186209 ## TO DO: create optimizer
187- ## model['optimizer'] = ...
210+ model ['optimizer' ] = tf .train .AdamOptimizer (LR ).minimize (model ['total_loss' ],
211+ global_step = model ['global_step' ])
188212 ###############################
189213 model ['summary_op' ] = _create_summary (model )
190214
0 commit comments