1. 代码实现:
import numpy as np;
import matplotlib.pyplot as plt;
import tensorflow as tf;
DATA_FILE1 = "./data1.txt";
class LogicRegression:
def sigmoid(self,z):
return 1/(1+np.exp(-z));
def __init__(self):
data = np.loadtxt(DATA_FILE1, delimiter=",");
self.x = data[:,0:2].astype(np.float32);
self.y = data[:,2].astype(np.float32);
self.pos = np.where(self.y==1);
self.neg = np.where(self.y==0);
# rememeber reshape to 1 dimension
self.x = np.reshape(self.x,newshape=(len(self.y),2));
self.y = np.reshape(self.y,newshape=(len(self.y),1));
def train(self):
x = tf.placeholder(tf.float32);
y = tf.placeholder(tf.float32);
#w = tf.Variable(np.random.randn(2,1).astype(np.float32));
#here we need to understand why initialize with zeros
w = tf.Variable(tf.zeros([2,1]));
b = tf.Variable(np.random.rand());
z = tf.matmul(x,w) + b;
h = (y * tf.log(tf.sigmoid(z)) + (1-y)* tf.log(1-tf.sigmoid(z))) * -1;
loss = tf.reduce_mean(h);
optimizer = tf.train.GradientDescentOptimizer(0.002).minimize(loss);
init = tf.global_variables_initializer();
with tf.Session() as sess:
sess.run(init);
for i in range(1000000):
feed = {x:self.x,y:self.y};
sess.run(optimizer,feed_dict = feed);
#'''
if (i % 50000 == 0):
#w = sess.run(w);
print (sess.run(w).flatten(),sess.run(b).flatten());
print ("loss:",sess.run(loss,{x:self.x,y:self.y}));
self.w = sess.run(w).flatten();
self.b = sess.run(b).flatten();
#'''
def show(self):
plt.figure();
plt.scatter(self.x[self.pos,0],self.x[self.pos,1],c='g',marker='o');
plt.scatter(self.x[self.neg,0],self.x[self.neg,1],c='r',marker='x')
x = np.linspace(0, 100, 1000)
y = []
for i in x:
y.append((i * -self.w[0] - self.b) / self.w[1])
plt.plot(x, y)
plt.show();
if __name__ == "__main__":
LR = LogicRegression();
LR.train();
LR.show();
2. 模型介绍: