用TensorFlow写逻辑斯蒂回归
具体代码实现
import numpy as np
import tensorflow as tf
import math
class data_reader:
file_name = str()
def __init__(self,file_name):
data_reader.file_name = file_name
def get_data_list(self,num_lost):
file_ = open(self.file_name,'r+')
db = list()
for line in file_.readlines():
reader_list = list()
for reader_ in line.split():
try:
eval(reader_)
except:
reader_list.append(num_lost)
else:
reader_list.append(eval(reader_))
db.append(reader_list)
file_.close()
return db
def get_x_y_list(self,db):
if type(db) != type(list()):
return None
x_list = list()
y_list = list()
for reader_list in db:
length_list = len(reader_list)
y_list.append(reader_list[length_list-1])
del reader_list[length_list-1]
x_list.append(reader_list)
return x_list,y_list
if __name__ == '__main__':
reader = data_reader('test.dat')
x_list,y_list = reader.get_x_y_list(reader.get_data_list(num_lost = 1.0))
x_data = tf.constant(np.array(x_list),dtype = tf.float32)
y_data = tf.constant(np.mat(y_list).transpose(),dtype = tf.float32)
Weights = tf.Variable(tf.zeros([len(x_list[0]),1]))
biase = tf.Variable(tf.zeros([1, 1]))
y = 1.0 / (1 + tf.exp(-tf.matmul(x_data,Weights) - biase))
loss = tf.reduce_mean(-(tf.log(y)*y_data + (1 - y_data)*tf.log(1 - y)))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1001):
sess.run(train)
print(step,'\n',sess.run(Weights),sess.run(biase),sess.run(loss))