线性回归
import torch
import numpy as np
import matplotlib. pyplot as plt
% matplotlib inline
from torch import nn, optim
from torch. autograd import Variable
import os
os. environ[ "KMP_DUPLICATE_LIB_OK" ] = "TRUE"
x_data = np. random. rand( 100 )
noise = np. random. normal( 0 , 0.01 , x_data. shape)
y_data = x_data* 0.1 + 0.2 + noise
x_data, noise, y_data
plt. scatter( x_data, y_data)
plt. show( )
x_data = x_data. reshape( - 1 , 1 )
y_data = y_data. reshape( - 1 , 1 )
x_data. shape, y_data. shape
x_data = torch. FloatTensor( x_data)
y_data = torch. FloatTensor( y_data)
inputs = Variable( x_data)
target = Variable( y_data)
class LinearRegression ( nn. Module) :
def __init__ ( self) :
super ( LinearRegression, self) . __init__( )
self. fc = nn. Linear( 1 , 1 )
def forward ( self, x) :
out = self. fc( x)
return out
model = LinearRegression( )
mse_loss = nn. MSELoss( )
optimizer = optim. SGD( model. parameters( ) , lr= 0.1 )
for name, parameters in model. named_parameters( ) :
print ( "name:{},param{}" . format ( name, parameters) )
for i in range ( 1001 ) :
out = model( inputs)
loss = mse_loss( out, target)
optimizer. zero_grad( )
loss. backward( )
optimizer. step( )
if i% 200 == 0 :
print ( i, loss. item( ) )
y_pred = model( inputs)
plt. scatter( x_data, y_data)
plt. plot( x_data, y_pred. data. numpy( ) , "r" , lw= 3 )
plt. show( )
非线性回归
import torch
import numpy as np
import matplotlib. pyplot as plt
% matplotlib inline
from torch import nn, optim
from torch. autograd import Variable
import os
os. environ[ "KMP_DUPLICATE_LIB_OK" ] = "TRUE"
x_data = np. linspace( - 2 , 2 , 200 ) [ : , np. newaxis]
noise = np. random. normal( 0 , 0.2 , x_data. shape)
y_data = np. square( x_data) + noise
x_data. shape, y_data. shape
plt. scatter( x_data, y_data)
plt. show( )
x_data = torch. FloatTensor( x_data)
y_data = torch. FloatTensor( y_data)
inputs = Variable( x_data)
target = Variable( y_data)
class LinearRegression ( nn. Module) :
def __init__ ( self) :
super ( LinearRegression, self) . __init__( )
self. fc1 = nn. Linear( 1 , 10 )
self. tanh = nn. Tanh( )
self. fc2 = nn. Linear( 10 , 1 )
def forward ( self, x) :
x = self. fc1( x)
x = self. tanh( x)
x = self. fc2( x)
return x
model = LinearRegression( )
mse_loss = nn. MSELoss( )
optimizer = optim. SGD( model. parameters( ) , lr= 0.1 )
for name, parameters in model. named_parameters( ) :
print ( "name:{},param{}" . format ( name, parameters) )
for i in range ( 2001 ) :
out = model( inputs)
loss = mse_loss( out, target)
optimizer. zero_grad( )
loss. backward( )
optimizer. step( )
if i% 200 == 0 :
print ( i, loss. item( ) )
y_pred = model( inputs)
plt. scatter( x_data, y_data)
plt. plot( x_data, y_pred. data. numpy( ) , "r" , lw= 3 )
plt. show( )