# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#张量
import pytorch as py
#默认是FloatTensor
a = torch.Tensor([2, 3], [4, 8], [7, 9])
print('a is: {}'.format(a))
print('a size is {}'.format(a.size()))
#Long型的Tensor
b = torch.LongTensor([2, 3], [4, 8], [7, 9])
print('b is: {}'.format(b))
#全零的空Tensor或者一个符合正态分布的随机初始值
c = torch.zeros((3, 2))
print('zero tensor: {}'.format(c))
d = torch.randn((3, 2))
print('normal randon is: {}'.format(d))
a[0, 1] = 100
print('change a is: {}'.format(a))
#Tensor与numpy.ndarray相互转换:
numpy_b = b.numpy()
print('conver to numpy is \n {}'.format(numpy_b))
e = np.array([[2, 3], [4, 5]])
torch_e = torch.from_numpy(e)
print('from numpy to torch.Tensor is {}'.format(torch_e))
f_torch_e = torch_e.float()
print('change data type to float tensor: {}'.format(f_torch_e))
#gpu加速
if torch.cuda.is_available():
a_cuda = a.cuda()
print(a_cuda)
#Variable(变量)
#Variable提供了自动求导的功能。有三个比较重要的组成属性:data,grad和grad_fn。数据、正向、反向
#create Variable
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)
#标量求导
#Build a computaional graph.
y = w * x + b
#compute gradients
y.backward()
#Print out the gradients.
print(x.grad)
print(w.grad)
print(b.grad)
#构建Variable,要传入参数requires_grad=True,默认是False,希望得到梯度,就需要传入这个参数。
#矩阵求导
x = torch.randn(3)
x = Variable(x, requires_grad=True)
y = x * 2
print(y)
y.backward(torch.FloatTensor([1, 0.1, 0.01]))
print(x.grad)
#这里不能直接写成y.backward(),需要显式说明是对每个分量做梯度运算。以上是在原有梯度基础上分别乘上
#1, 0.1, 0.01
#Dataset(数据集)
class myDataset(Dataset):
def __init__(self, csv_file, txt_file, root_dir, other_file):
self.csv_data = pd.read_csv(csv_file)
with open(txt_file, 'r') as f:
data_list = f.readlines()
self.txt_data = data_list
self.root_dir = root_dir
def __len__(self):
return len(self.csv_data)
def __getitem__(self, idx):
data = (self.csv_data[idx], self.txt_data[idx])
return data
#torch.utils.data.DataLoader来定义一个新的迭代器,collate_fun有点类似于tensorflow的map
dataiter = DataLoader(myDataset, batch_size=32, shuffle=True,
collate_fn=fault_collate)
#数据读取类
dset = ImageFloder(root='root_path', transform=None, loader=default_loader)
#nn.Module(模组)
#神经网络模板
class net_name(nn.Module):
def __init__(self, other_arguments):
super(net_name, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size)
#other network layer
def forward(self, x):
x = self.conv1(x)
return x
#lost
criterion = nn.CrossEntropyLoss()
loss = criterion(output, target)
#troch.optim(优化)
#一阶优化算法,即梯度下降
#二阶优化算法,使用二阶导数来最小化或最大化损失函数,主要基于牛顿法,计算成本高,无法广泛使用
#torch.optim是一个实现各种优化算法的包。调用函数传入参数要求必须是Variable。
# 第一步:
optimizer = torch.optim.SGD(model.paramenters(), lr=0.01, momentum=0.9)
#学习率0.01, 动量是0.9的随机梯度下降
#第二步:
optimizer.zeros()
#第三步:
loss.backward()
#第四步:
optimizer.step()
#模型的保存和加载
torch.save(model, './model_pth')
torch.save(model.state_dict(), './model_state.pth')
load_model = troch.load('model.pth')
model.load_state_dict(torch.load('model_state.pth'))
参考:
深度学习入门之PyTorch