https://github.com/FudanNLP/nlp-beginner
1. BOW+logistic
注:写到task2才发现把SGD理解错了,本来是挨个梯度下降来代替整体平均作为一次下降,我写成了随机找里面的一批数据平均来梯度下降(ry
1. task1.py
import pandas as pd
import representation
train_data = pd.read_table('train.tsv')
# print(train_data.shape[0]) # size返回框个数
bow = representation.BOW(train_data, 5)
bow.match()
# print(bow.w)
# print(bow.vector[0]) # [1. 1. 4. ... 0. 0. 0.]
2. representation.py
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize
import copy
from sklearn.model_selection import train_test_split
def sigmoid(x_data, w):
return 1 / (1 + np.exp(-1 * np.dot(x_data.T, w)))
def logistic(x_data, y_data, w, word_num, w_num):
iter_num = 100 # 循环次数
sample_num = 2000 # SGD样本个数
learning_rate = 1 # 学习率
# m = x_data.shape[0]
# print(x_data.shape) # (18227, 156060)
# print(y_data.shape) # (156060, 1)
# print(w.shape) # (18227, 1)
data = pd.DataFrame({
'x': list(x_data),
'y': list(y_data)
}) # 便于用Dataframe.sample进行随机划分样本
for i in range(iter_num):
sgd_data = data.sample(n=sample_num)
# print(sgd_data['x'])
sgd_x = np.array(list(sgd_data['x'])).reshape(sample_num, word_num).T
sgd_y = np.array(list(sgd_data['y'])).reshape(sample_num, 1)
# print(sgd_x.shape) # (18227, 500)
# print(sgd_y.shape) # (500, 1)
cost_before = -1 * np.sum(sgd_y * np.log(sigmoid(sgd_x, w)) + (1 - sgd_y) * np.log(1 - sigmoid(sgd_x, w))) / sample_num
delta = np.dot(sgd_x, sigmoid(sgd_x, w) - sgd_y) / sample_num
w = w - learning_rate * delta
cost_after = -1 * np.sum(sgd_y * np.log(sigmoid(sgd_x, w)) + (1 - sgd_y) * np.log(1 - sigmoid(sgd_x, w))) / sample_num
print('%d-%d :%.4f ~ %.4f' % (w_num, i, cost_before, cost_after))
return w
class BOW(object):
def __init__(self, train_data, type_num)