@@ -92,7 +92,9 @@ def add_placeholders(self):
92
92
(Don't change the variable names)
93
93
"""
94
94
### YOUR CODE HERE
95
- raise NotImplementedError
95
+ self .input_placeholder = tf .placeholder (tf .int32 , shape = (None , self .config .window_size ))
96
+ self .labels_placeholder = tf .placeholder (tf .float32 , shape = (None , self .config .label_size ))
97
+ self .dropout_placeholder = tf .placeholder (tf .float32 )
96
98
### END YOUR CODE
97
99
98
100
def create_feed_dict (self , input_batch , dropout , label_batch = None ):
@@ -117,7 +119,11 @@ def create_feed_dict(self, input_batch, dropout, label_batch=None):
117
119
feed_dict: The feed dictionary mapping from placeholders to values.
118
120
"""
119
121
### YOUR CODE HERE
120
- raise NotImplementedError
122
+ assert input_batch is not None
123
+ if label_batch is None :
124
+ feed_dict = {self .input_placeholder :input_batch , self .dropout_placeholder :dropout }
125
+ else :
126
+ feed_dict = {self .input_placeholder :input_batch , self .labels_placeholder :label_batch , self .dropout_placeholder :dropout }
121
127
### END YOUR CODE
122
128
return feed_dict
123
129
@@ -148,7 +154,8 @@ def add_embedding(self):
148
154
# The embedding lookup is currently only implemented for the CPU
149
155
with tf .device ('/cpu:0' ):
150
156
### YOUR CODE HERE
151
- raise NotImplementedError
157
+ embeddings = tf .Variable (tf .random_uniform ([len (self .wv ), self .config .embed_size ], - 1.0 , 1.0 ))
158
+ window = tf .reshape (tf .nn .embedding_lookup (embeddings , self .input_placeholder ), [- 1 , self .config .window_size * self .config .embed_size ])
152
159
### END YOUR CODE
153
160
return window
154
161
@@ -180,7 +187,22 @@ def add_model(self, window):
180
187
output: tf.Tensor of shape (batch_size, label_size)
181
188
"""
182
189
### YOUR CODE HERE
183
- raise NotImplementedError
190
+ xavier_initializer = xavier_weight_init ()
191
+
192
+ with tf .variable_scope ("Layer" ):
193
+ W = tf .get_variable ("W" , initializer = xavier_initializer ([self .config .window_size * self .config .embed_size , self .config .hidden_size ]))
194
+ b1 = tf .get_variable ("b1" , [self .config .hidden_size ])
195
+ h = tf .tanh (tf .matmul (window , W ) + b1 )
196
+
197
+ with tf .variable_scope ("Softmax" ):
198
+ U = tf .get_variable ("U" , initializer = xavier_initializer ([self .config .hidden_size , self .config .label_size ]))
199
+ b2 = tf .get_variable ("b2" , [self .config .label_size ])
200
+ output_bf_dropout = tf .matmul (h , U ) + b2
201
+
202
+ regularization_loss = self .config .l2 * (tf .nn .l2_loss (W ) + tf .nn .l2_loss (U ))
203
+ tf .add_to_collection ("total_loss" , regularization_loss )
204
+
205
+ output = tf .nn .dropout (output_bf_dropout , self .dropout_placeholder )
184
206
### END YOUR CODE
185
207
return output
186
208
@@ -195,7 +217,8 @@ def add_loss_op(self, y):
195
217
loss: A 0-d tensor (scalar)
196
218
"""
197
219
### YOUR CODE HERE
198
- raise NotImplementedError
220
+ loss = tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (y , self .labels_placeholder ))
221
+ loss += tf .get_collection ("total_loss" )[- 1 ]
199
222
### END YOUR CODE
200
223
return loss
201
224
@@ -219,7 +242,7 @@ def add_training_op(self, loss):
219
242
train_op: The Op for training.
220
243
"""
221
244
### YOUR CODE HERE
222
- raise NotImplementedError
245
+ train_op = tf . train . AdamOptimizer ( learning_rate = self . config . lr ). minimize ( loss )
223
246
### END YOUR CODE
224
247
return train_op
225
248
0 commit comments