tensorflow & pytorch对比
以建立一个普通神经网络结构为例。
参考文献:
mofan tensorflow.
mofan pytorch.
tensorflow
首先导入tensorflow和numpy:
import tensorflow as tf
import numpy as np
建立一个神经网络
def add_layer(inputs,in_size,out_size,activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
if activation_function==None:
outputs =tf.matmul(inputs, Weights) + biases
else:
Wx_plus_b = tf.matmul(inputs, Weights) + biases
outputs = activation_function(Wx_plus_b)
return outputs
建立数据集
x_data = np.linspace(-2,2,100, dtype=np.float32)[:, np.newaxis]
noise = np.random.normal(0, 0.01, x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.2 + noise
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
#------------搭建模型----------------------
l1 = add_layer(xs, 1, 20, activation_function=tf.nn.relu)
prediction = add_layer(l1, 20, 1, activation_function=None)
计算误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
传播误差
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
训练网络
init = tf.global_variables_initializer() # 替换成这样就好
sess = tf.Session()
sess.run(init)
for i in range(1000):
# training
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to see the step improvement
print(i,sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
训练结果
pytorch
首先导入pytorch:
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) - 0.2*torch.rand(x.size())
建立数据集
class Net(torch.nn.Module): # 继承 torch 的 Module
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__() # 继承 __init__ 功能
# 定义每层用什么样的形式
self.hidden = torch.nn.Linear(n_feature, n_hidden) # 隐藏层线性输出
self.predict = torch.nn.Linear(n_hidden, n_output) # 输出层线性输出
def forward(self, x): # 这同时也是 Module 中的 forward 功能
# 正向传播输入值, 神经网络分析出输出值
x = F.relu(self.hidden(x)) # 激励函数(隐藏层的线性值)
x = self.predict(x) # 输出值
return x
net = Net(n_feature=1, n_hidden=20, n_output=1)
训练网络
optimizer = torch.optim.SGD(net.parameters(), lr=0.1) # 传入 net 的所有参数, 学习率
loss_func = torch.nn.MSELoss() # 预测值和真实值的误差计算公式 (均方差)
for t in range(1000):
prediction = net(x) # 喂给 net 训练数据 x, 输出预测值
loss = loss_func(prediction, y) # 计算两者的误差
if t%50 == 0:
print(t,loss.detach().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
训练结果