GRU(序列预测keras)
from tensorflow import keras
model = keras.Sequential()
model.add(keras.layers.GRU(n_neurons, activation='relu', input_shape=(n_steps, n_inputs), return_sequences=True))# GRU Layers
for i in range(n_layers - 1):#GRU可以多层嵌套
model.add(keras.layers.GRU(n_neurons, activation='relu', input_shape=(n_steps, n_inputs), return_sequences=False))
model.add(keras.layers.Dense(n_outputs))# Output Layer
model.summary()#似乎用于输出
model.compile(
loss="mean_squared_error",#采用均方误差
optimizer=keras.optimizers.Adam(learning_rate=1e-3),#输入的是初始学习率,训练过程中学习率逐渐衰减
metrics=['accuracy'],
)
model.fit(
x_train, y_train, # 输入:n*seq*dim 输出:n*dim
validation_data=(x_valid, y_valid),
batch_size=50, #将数据分为多少组 #只分为1组速度反而下降了
epochs=20,#100 #数据使用多少遍
verbose=1,#是否显示进度条
)
y_pred = model.predict(x_test)
LSTM(序列预测)
#1.建模
class LstmRNN(nn.Module):# Define LSTM Neural Networks
def __init__(self, input_size, hidden_size=1, output_size=1, num_layers=1):
super().__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers) # utilize the LSTM model in torch.nn
self.forwardCalculation = nn.Linear(hidden_size, output_size)
def forward(self, _x):
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size)
s, b, h = x.shape # x is output, size (seq_len, batch, hidden_size)
x = x.view(s*b, h)
x = self.forwardCalculation(x)
x = x.view(s, b, -1)
return x
lstm_model = LstmRNN(INPUT_FEATURES_NUM, 16, output_size=OUTPUT_FEATURES_NUM, num_layers=1) # 16 hidden units
#2.训练
def train(max_epochs, model, train_x_tensor, train_y_tensor, optimizer, loss_function):
for epoch in range(max_epochs):
output = model(train_x_tensor)
loss = loss_function(output, train_y_tensor)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print('Epoch: [{}/{}], Loss:{:.5f}'.format(epoch+1, max_epochs, loss.item()))
# torch.save(lstm_model.state_dict(), 'model_params.pkl') # save model parameters to files
train(500,#epochs
lstm_model,
torch.from_numpy(train_x_tensor),# transfer data to pytorch tensor
torch.from_numpy(train_y_tensor),
torch.optim.Adam(lstm_model.parameters(), lr=1e-2),
nn.MSELoss())
# lstm_model.load_state_dict(torch.load('model_params.pkl')) # load model parameters from files
#3.使用
lstm_model = lstm_model.eval() # switch to testing model
pre_y = lstm_model(torch.from_numpy(test_x_tensor))
FPN(图片分割)
#0 数据封装
import torch
from torchvision import transforms as T
class MriDataset(torch.utils.data.Dataset): #这应该是pytorch对数据集封装的固定格式
def __init__(self, df, transform=None, mean=0.5, std=0.25):
super(MriDataset, self).__init__()
self.df = df
self.transform = transform#?
self.mean = mean #均值?
self.std = std #方差?
def __len__(self):
return len(self.df)
def __getitem__(self, idx, raw=False):#作为一个集合,通过‘for i in x’方式进行调用的时候可以用到 #使用的时候再加载图片数据,不使用的时候不进行加载
row = self.df.iloc[idx]
img = cv2.imread(row['image_filename'], cv2.IMREAD_UNCHANGED)
mask = cv2.imread(row['mask_filename'], cv2.IMREAD_GRAYSCALE)
if raw:
return img, mask
if self.transform:
augmented = self.transform(image=img, mask=mask)
image, mask = augmented['image'], augmented['mask']
img = T.functional.to_tensor(img)
mask = mask // 255 #mask原本是黑白图片,经过转换后就成了0-1矩阵
mask = torch.Tensor(mask)
return img, mask
import albumentations as A
transform = A.Compose([#import albumentations as A #这里的A不知道是个啥 #用于数据增强
A.ChannelDropout(p=0.3),
A.RandomBrightnessContrast(p=0.3),
A.ColorJitter(p=0.3),
])
train_dataset = MriDataset(train_df, transform)
valid_dataset = MriDataset(valid_df)
test_dataset = MriDataset(test_df)
batch_size = 1 #batch_size = 16 #将数据分为16个批次进行训练
from torch.utils.data import DataLoader
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
#1.设计模型
def loss(output, target, alpha=0.01):#损失函数
def dice_pytorch(predictions: torch.Tensor, labels: torch.Tensor, e: float = 1e-7):#计算预测张量的骰子系数#Calculates Dice coefficient for a tensor of predictions
predictions = torch.where(predictions > 0.5, 1, 0)
labels = labels.byte()
intersection = (predictions & labels).float().sum((1, 2))
return ((2 * intersection) + e) / (predictions.float().sum((1, 2)) + labels.float().sum((1, 2)) + e)
bce = torch.nn.functional.binary_cross_entropy(output, target)
soft_dice = 1 - dice_pytorch(output, target).mean()
return bce + alpha * soft_dice
import segmentation_models_pytorch as smp
model = smp.FPN(#特征金子塔的结构看不到 #用于图片分割的模型 #输入和输出是尺寸相同的灰度图
encoder_name="efficientnet-b7",
encoder_weights="imagenet",
in_channels=3,
classes=1,
activation='sigmoid',
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
#2.训练模型
from tqdm import tqdm
def train(epochs, model, train_loader, valid_loader, optimizer, loss_fn, lr_scheduler):#训练过程
for epoch in range(1, epochs + 1):#批次
running_loss = 0
model.train()
for i, data in enumerate(tqdm(train_loader)):
img, mask = data
img, mask = img.to(device), mask.to(device)
predictions = model(img)
predictions = predictions.squeeze(1)
loss = loss_fn(predictions, mask)
running_loss += loss.item() * img.size(0)
loss.backward()
optimizer.step()
optimizer.zero_grad()
model.eval()#切换到测试模式
with torch.no_grad():
running_valid_loss = 0
for i, data in enumerate(valid_loader):
img, mask = data
img, mask = img.to(device), mask.to(device)
predictions = model(img)
predictions = predictions.squeeze(1)
loss = loss_fn(predictions, mask)
running_valid_loss += loss.item() * img.size(0)
train_loss = running_loss / len(train_loader.dataset)
val_loss = running_valid_loss / len(valid_loader.dataset)
print(f'Epoch: {epoch}/{epochs} | Training loss: {train_loss} | Validation loss: {val_loss} ')
lr_scheduler.step(val_loss)#?
model.eval()
optimizer=torch.optim.Adam(model.parameters(), lr=0.001)
train(1,#60 #epochs
model,
train_loader,
valid_loader,
optimizer,
loss,
torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, patience=2,factor=0.2))
#model.load_state_dict(torch.load('weights.pt'))
#3.使用模型
for i, data in enumerate(test_loader):
img, mask = data
img, mask = img.to(device), mask.to(device)
predictions = model(img) # 获取预测结果
predictions = predictions.squeeze(1)
print(predictions)
print(mask)
break