q1_classifier.py
import time
import numpy as np
import tensorflow as tf
from q1_softmax import softmax
from q1_softmax import cross_entropy_loss
from model import Model
from utils.general_utils import get_minibatches
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_samples = 1024
n_features = 100
n_classes = 5
batch_size = 64
n_epochs = 50
lr = 1e-4
class SoftmaxModel(Model):
"""Implements a Softmax classifier with cross-entropy loss."""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
and will be fed data during training.
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(batch_size, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape
(batch_size, n_classes), type tf.int32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
"""
### YOUR CODE HERE
input_placeholder = tf.placeholder(tf.float32,[self.config.batch_size, self.config.n_features],name='input')
labels_placeholder = tf.placeholder(tf.float32,[self.config.batch_size,self.config.n_classes ],name='labels')
self.input_placeholder=input_placeholder
self.labels_placeholder=labels_placeholder
### END YOUR CODE
def create_feed_dict(self, inputs_batch, labels_batch=None):
"""Creates the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
If label_batch is None, then no labels are added to feed_dict.
Hint: The keys for the feed_dict should be the placeholder
tensors created in add_placeholders.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict={self.input_placeholder:inputs_batch,self.labels_placeholder:labels_batch}
### END YOUR CODE
return feed_dict
def add_prediction_op(self):
"""Adds the core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the transformation is a linear layer plus a
softmax transformation:
y = softmax(Wx + b)
Hint: Make sure to create tf.Variables as needed.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
pred: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
with tf.variable_scope("transformation"):
bias = tf.Variable(tf.random_uniform([self.config.n_classes]))
W = tf.Variable(tf.random_uniform([self.config.n_features, self.config.n_classes]))
z = tf.matmul(self.input_placeholder, W) + bias
pred = softmax(z)
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: Use the cross_entropy_loss function we defined. This should be a very
short function.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
loss = cross_entropy_loss(self.labels_placeholder, pred)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer to get an optimizer object.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
train_op = tf.train.GradientDescentOptimizer(self.config.lr).minimize(loss)
### END YOUR CODE
return train_op
q1_softmax.py
import numpy as np
import tensorflow as tf
from utils.general_utils import test_all_close
def softmax(x):
"""
Compute the softmax function in tensorflow.
You might find the tensorflow functions tf.exp, tf.reduce_max,
tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
not need to use all of these functions). Recall also that many common
tensorflow operations are sugared (e.g. x * y does a tensor multiplication
if x and y are both tensors). Make sure to implement the numerical stability
fixes as in the previous homework!
Args:
x: tf.Tensor with shape (n_samples, n_features). Note feature vectors are
represented by row-vectors. (For simplicity, no need to handle 1-d
input as in the previous homework)
Returns:
out: tf.Tensor with shape (n_sample, n_features). You need to construct this
tensor in this problem.
"""
### YOUR CODE HERE
x_max = tf.reduce_max(x,1,keep_dims=True)
x_sub = tf.subtract(x,x_max)
x_exp = tf.exp(x_sub)
sum_exp = tf.reduce_sum(x_exp,1,keep_dims=True)
out = tf.div(x_exp,sum_exp)
### END YOUR CODE
return out
def cross_entropy_loss(y, yhat):
"""
Compute the cross entropy loss in tensorflow.
The loss should be summed over the current minibatch.
y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
be of dtype tf.float32.
The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
solutions are possible, so you may not need to use all of these functions).
Note: You are NOT allowed to use the tensorflow built-in cross-entropy
functions.
Args:
y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
probability distribution and should sum to 1.
Returns:
out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
tensor in the problem.
"""
### YOUR CODE HERE
log_yhat = tf.log(yhat)
product = tf.multiply(tf.to_float(y), log_yhat) # multiply element-wise
out = tf.negative(tf.reduce_sum(product))
### END YOUR CODE
return out
q2_parser_transition.py
class PartialParse(object):
def __init__(self, sentence):
"""Initializes this partial parse.
Your code should initialize the following fields:
self.stack: The current stack represented as a list with the top of the stack as the
last element of the list.
self.buffer: The current buffer represented as a list with the first item on the
buffer as the first item of the list
self.dependencies: The list of dependencies produced so far. Represented as a list of
tuples where each tuple is of the form (head, dependent).
Order for this list doesn't matter.
The root token should be represented with the string "ROOT"
Args:
sentence: The sentence to be parsed as a list of words.
Your code should not modify the sentence.
"""
# The sentence being parsed is kept for bookkeeping purposes. Do not use it in your code.
self.sentence = sentence
### YOUR CODE HERE
self.stack=["ROOT"]
self.buffer=sentence[:]
self.dependencies=[]
### END YOUR CODE
def parse_step(self, transition):
"""Performs a single parse step by applying the given transition to this partial parse
Args:
transition: A string that equals "S", "LA", or "RA" representing the shift, left-arc,
and right-arc transitions.
"""
### YOUR CODE HERE
if transition=="S":
self.stack.append(self.buffer[0])
del self.buffer[0]
elif transition=="LA":
ls=len(self.stack)-1
self.dependencies.append((self.stack[ls],self.stack[ls-1]))
del self.stack[ls-1]
elif transition=="RA":
ls=len(self.stack)-1
self.dependencies.append((self.stack[ls-1],self.stack[ls]))
del self.stack[ls]
else:
print("unknown transition")
### END YOUR CODE
def parse(self, transitions):
"""Applies the provided transitions to this PartialParse
Args:
transitions: The list of transitions in the order they should be applied
Return:
dependencies: The list of dependencies produced when parsing the sentence. Represented
as a list of tuples where each tuple is of the form (head, dependent)
"""
for transition in transitions:
self.parse_step(transition)
return self.dependencies
def minibatch_parse(sentences, model, batch_size):
"""Parses a list of sentences in minibatches using a model.
Args:
sentences: A list of sentences to be parsed (each sentence is a list of words)
model: The model that makes parsing decisions. It is assumed to have a function
model.predict(partial_parses) that takes in a list of PartialParses as input and
returns a list of transitions predicted for each parse. That is, after calling
transitions = model.predict(partial_parses)
transitions[i] will be the next transition to apply to partial_parses[i].
batch_size: The number of PartialParses to include in each minibatch
Returns:
dependencies: A list where each element is the dependencies list for a parsed sentence.
Ordering should be the same as in sentences (i.e., dependencies[i] should
contain the parse for sentences[i]).
"""
### YOUR CODE HERE
import copy
partial_parses=[PartialParse(sen) for sen in sentences]
#shallow copy
unfinished_parses=copy.copy(partial_parses)
while unfinished_parses:
minibatch=unfinished_parses[0:batch_size]
while minibatch:
transitions = model.predict(minibatch)
for i in range(len(transitions)):
minibatch[i].parse_step(transitions[i])
#remove elements of which all the transitions have benn done
minibatch=[mini for mini in minibatch if len(mini.stack)>1 or len(mini.buffer) > 0]
#remove first "batch_size" parses from unfinished_parses
unfinished_parses=unfinished_parses[batch_size:]
dependencies=[]
for i in range(len(sentences)):
#since it is a shallow cpoy, partial_parses and unfinished_parses share the same transitions
dependencies.append(partial_parses[i].dependencies)
### END YOUR CODE
return dependencies
q1.initialization.py
import numpy as np
import tensorflow as tf
def xavier_weight_init():
"""Returns function that creates random tensor.
The specified function will take in a shape (tuple or 1-d array) and
returns a random tensor of the specified shape drawn from the
Xavier initialization distribution.
Hint: You might find tf.random_uniform useful.
"""
def _xavier_initializer(shape, **kwargs):
"""Defines an initializer for the Xavier distribution.
Specifically, the output should be sampled uniformly from [-epsilon, epsilon] where
epsilon = sqrt(6) / <sum of the sizes of shape's dimensions>
e.g., if shape = (2, 3), epsilon = sqrt(6 / (2 + 3))
This function will be used as a variable initializer.
Args:
shape: Tuple or 1-d array that species the dimensions of the requested tensor.
Returns:
out: tf.Tensor of specified shape sampled from the Xavier distribution.
"""
### YOUR CODE HERE
# epsilon = np.sqrt(6/np.sum(shape))
# out = tf.Variable(tf.random_uniform(shape=shape, minval=-epsilon, maxval=epsilon))
out =tf.random_normal(shape, stddev=np.sqrt(6)/np.sum(shape))
### END YOUR CODE
return out
# Returns defined initializer function.
return _xavier_initializer
def test_initialization_basic():
"""Some simple tests for the initialization.
"""
print "Running basic tests..."
xavier_initializer = xavier_weight_init()
shape = (1,)
xavier_mat = xavier_initializer(shape)
assert xavier_mat.get_shape() == shape
shape = (1, 2, 3)
xavier_mat = xavier_initializer(shape)
assert xavier_mat.get_shape() == shape
print "Basic (non-exhaustive) Xavier initialization tests pass"
if __name__ == "__main__":
test_initialization_basic()
q2_parser_model.py
import os
import time
import tensorflow as tf
import cPickle
from model import Model
from q2_initialization import xavier_weight_init
from utils.general_utils import Progbar
from utils.parser_utils import minibatches, load_and_preprocess_data
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_features = 36
n_classes = 3
dropout = 0.5
embed_size = 50
hidden_size = 200
batch_size = 2048
n_epochs = 10
lr = 0.001
class ParserModel(Model):
"""
Implements a feedforward neural network with an embedding layer and single hidden layer.
This network will predict which transition should be applied to a given partial parse
configuration.
"""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building and will be fed
data during training. Note that when "None" is in a placeholder's shape, it's flexible
(so we can use different batch sizes without rebuilding the model).
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape (None, n_features), type tf.int32
labels_placeholder: Labels placeholder tensor of shape (None, n_classes), type tf.float32
dropout_placeholder: Dropout value placeholder (scalar), type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder=tf.placeholder(tf.int32,shape=(None,self.config.n_features))
self.labels_placeholder=tf.placeholder(tf.float32,shape=(None,self.config.n_classes))
self.dropout_placeholder=tf.placeholder(tf.float32)
### END YOUR CODE
def create_feed_dict(self, inputs_batch, labels_batch=None, dropout=1):
"""Creates the feed_dict for the dependency parser.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When an argument is None, don't add it to the feed_dict.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
dropout: The dropout rate.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
if labels_batch is not None:
feed_dict={self.input_placeholder:inputs_batch,
self.labels_placeholder:labels_batch,
self.dropout_placeholder:dropout}
else:
feed_dict={self.input_placeholder:inputs_batch,
# self.labels_placeholder:labels_batch,
self.dropout_placeholder:dropout}
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Adds an embedding layer that maps from input tokens (integers) to vectors and then
concatenates those vectors:
- Creates an embedding tensor and initializes it with self.pretrained_embeddings.
- Uses the input_placeholder to index into the embeddings tensor, resulting in a
tensor of shape (None, n_features, embedding_size).
- Concatenates the embeddings by reshaping the embeddings tensor to shape
(None, n_features * embedding_size).
Hint: You might find tf.nn.embedding_lookup useful.
Hint: You can use tf.reshape to concatenate the vectors. See following link to understand
what -1 in a shape means.
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#reshape.
Returns:
embeddings: tf.Tensor of shape (None, n_features*embed_size)
"""
### YOUR CODE HERE
emb = tf.Variable(self.pretrained_embeddings)
embedding=tf.nn.embedding_lookup(emb,self.input_placeholder)
embeddings = tf.reshape(embedding, (-1, self.config.n_features * self.config.embed_size))
### END YOUR CODE
return embeddings
def add_prediction_op(self):
"""Adds the 1-hidden-layer NN:
h = Relu(xW + b1)
h_drop = Dropout(h, dropout_rate)
pred = h_dropU + b2
Note that we are not applying a softmax to pred. The softmax will instead be done in
the add_loss_op function, which improves efficiency because we can use
tf.nn.softmax_cross_entropy_with_logits
Use the initializer from q2_initialization.py to initialize W and U (you can initialize b1
and b2 with zeros)
Hint: Here are the dimensions of the various variables you will need to create
W: (n_features*embed_size, hidden_size)
b1: (hidden_size,)
U: (hidden_size, n_classes)
b2: (n_classes)
Hint: Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
The keep probability should be set to the value of self.dropout_placeholder
Returns:
pred: tf.Tensor of shape (batch_size, n_classes)
"""
x = self.add_embedding()
### YOUR CODE HERE
xinit=xavier_weight_init()
W=tf.Variable(xinit((self.config.n_features*self.config.embed_size, self.config.hidden_size)))
U=tf.Variable(xinit((self.config.hidden_size,self.config.n_classes)))
b1=tf.Variable(tf.zeros([self.config.hidden_size,]))
b2=tf.Variable(tf.zeros([self.config.n_classes]))
h_Relu=tf.add(tf.matmul(x, W), b1)
h=tf.nn.relu(h_Relu)
h_drop=tf.nn.dropout(h, self.dropout_placeholder)
pred=tf.add(tf.matmul(h_drop,U),b2)
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds Ops for the loss function to the computational graph.
In this case we are using cross entropy loss.
The loss should be averaged over all examples in the current minibatch.
Hint: You can use tf.nn.softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, n_classes) containing the output of the neural
network before the softmax layer.
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
prob=tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=self.labels_placeholder)
loss=tf.reduce_mean(prob,axis=None)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
#init Adam Optimizer with learning rate of self.config.lr
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss)
### END YOUR CODE
return train_op
def train_on_batch(self, sess, inputs_batch, labels_batch):
feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch,
dropout=self.config.dropout)
_, loss = sess.run([self.train_op, self.loss], feed_dict=feed)
return loss
def run_epoch(self, sess, parser, train_examples, dev_set):
prog = Progbar(target=1 + len(train_examples) / self.config.batch_size)
for i, (train_x, train_y) in enumerate(minibatches(train_examples, self.config.batch_size)):
loss = self.train_on_batch(sess, train_x, train_y)
prog.update(i + 1, [("train loss", loss)])
print "Evaluating on dev set",
dev_UAS, _ = parser.parse(dev_set)
print "- dev UAS: {:.2f}".format(dev_UAS * 100.0)
return dev_UAS
def fit(self, sess, saver, parser, train_examples, dev_set):
best_dev_UAS = 0
for epoch in range(self.config.n_epochs):
print "Epoch {:} out of {:}".format(epoch + 1, self.config.n_epochs)
dev_UAS = self.run_epoch(sess, parser, train_examples, dev_set)
if dev_UAS > best_dev_UAS:
best_dev_UAS = dev_UAS
if saver:
print "New best dev UAS! Saving model in ./data/weights/parser.weights"
saver.save(sess, './data/weights/parser.weights')
print
def __init__(self, config, pretrained_embeddings):
self.pretrained_embeddings = pretrained_embeddings
self.config = config
self.build()
def main(debug=False):
print 80 * "="
print "INITIALIZING"
print 80 * "="
config = Config()
parser, embeddings, train_examples, dev_set, test_set = load_and_preprocess_data(debug)
if not os.path.exists('./data/weights/'):
os.makedirs('./data/weights/')
with tf.Graph().as_default():
print "Building model...",
start = time.time()
model = ParserModel(config, embeddings)
parser.model = model
print "took {:.2f} seconds\n".format(time.time() - start)
init = tf.global_variables_initializer()
# If you are using an old version of TensorFlow, you may have to use
# this initializer instead.
# init = tf.initialize_all_variables()
saver = None if debug else tf.train.Saver()
with tf.Session() as session:
parser.session = session
session.run(init)
print 80 * "="
print "TRAINING"
print 80 * "="
model.fit(session, saver, parser, train_examples, dev_set)
if not debug:
print 80 * "="
print "TESTING"
print 80 * "="
print "Restoring the best model weights found on the dev set"
saver.restore(session, './data/weights/parser.weights')
print "Final evaluation on test set",
UAS, dependencies = parser.parse(test_set)
print "- test UAS: {:.2f}".format(UAS * 100.0)
print "Writing predictions"
with open('q2_test.predicted.pkl', 'w') as f:
cPickle.dump(dependencies, f, -1)
print "Done!"
if __name__ == '__main__':
main()
运行结果: