# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import math
import collections
import pickle as pkl
from pprint import pprint
#from pymongo import MongoClient
import re
import jieba
import os.path as path
import os
class word2vec():
def __init__(self,
vocab_list=None,
embedding_size=200,
win_len=3, # 单边窗口长
num_sampled=1000,
learning_rate=1.0,
logdir='/tmp/simple_word2vec',
model_path= None
):
# 获得模型的基本参数
self.batch_size = None # 一批中数据个数, 目前是根据情况来的
if model_path!=None:
self.load_model(model_path)
else:
# model parameters
assert type(vocab_list)==list
self.vocab_list = vocab_list
self.vocab_size = vocab_list.__len__()
self.embedding_size = embedding_size
self.win_len = win_len
self.num_sampled = num_sampled
self.learning_rate = learning_rate
self.logdir = logdir
self.word2id = {} # word => id 的映射
for i in range(self.vocab_size):
self.word2id[self.vocab_list[i]] = i
# train times
self.train_words_num = 0 # 训练的单词对数
self.train_sents_num = 0 # 训练的句子数
self.train_times_num = 0 # 训练的次数(一次可以有多个句子)
# train loss records
self.train_loss_records = collections.deque(maxlen=10) # 保存最近10次的误差
self.train_loss_k10 = 0
self.build_graph()
self.init_op()
if model_path!=None:
tf_model_path = os.path.join(model_path,'tf_vars')
self.saver.restore(self.sess,tf_model_path)
def init_op(self):
self.sess = tf.Session(graph=self.graph)
self.sess.run(self.init)
self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
def build_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
self.train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1])
self.embedding_dict = tf.Variable(
tf.random_uniform([self.vocab_size,self.embedding_size],-1.0,1.0)
)
self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
自然语言处理在tensorflow中的构建流程
最新推荐文章于 2022-06-22 21:55:57 发布