基于MLPmixer和图卷积神经网络的WIFI定位

最近正在进行WIFI跟踪方面的项目,记录一下最终的结果。数据集采用Widar2.0。数据集大小大概解压以后两个多g。由于时间非常有限,并且电脑内存不够,因此没有使用最新的Widar3.0数据集。数据集直接网上搜索Widar就可以搜到。想看一看图神经网络在WIFI定位上的能力,但是直接使用图神经网络效果很差,由于CSI数据是由类似图像那种子载波和时隙组成的复数矩阵,因此不能不考虑数据的空间特性,因此首先采用MLPmixer提取空间特征。

数据处理:

引入头文件

import scipy.io
import os
import numpy as np
import torch
import torch.nn as nn
import cv2
from scipy.interpolate import interp1d
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from torch.utils.data import DataLoader
import torch
from torch import nn
from functools import partial
from einops.layers.torch import Rearrange, Reduce
import torch_geometric
print(torch_geometric.__version__)
from torch_cluster import knn_graph
from torch_geometric.nn import GCNConv,GraphMultisetTransformer,GraphUNet,GATConv
from torch_geometric.data import Data
import torch.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
import optuna

print(torch.cuda.is_available())
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

数据集构建

这里记录几个坑爹问题:首先Widar数据集的采样是不均匀的,几乎每个mat文件里面的数据时隙长度都不一样长度,并且轨迹标签也都不一样长度。因此我们需要采用插值将其补齐,或者使用神经微分方程这种适用于不等距非均匀采样的方法,由于第二种方法我不会,因此采用插值。其次widar2.0数据集的时隙维度远远大于子载波个数往往是 1000以上个时隙对应90个子载波,所以没有直接使用卷积神经网络,而是采用将OFDM帧划分为patch之后使用MLPmixer提取特征。最后widar数据集其实是非常小的数据集,总共只有24条数据,三个场景,我是将corridor当成测试集合,office和classroom作为训练集合,这样总共训练数据只有18条。这样经常会过拟合,所以我们采用图神经网络网络的方法,将WIFI的跟踪问题转化为图上的半监督节点回归问题。这也是一种小样本的方法,将原始小样本问题转化为图神经网络上的半监督问题。但是难点在于数据本身不存在图结构,因此我们采用KNN构图。还有就是每个场景里面都有static文件和config文件,代表了这个场景静态也就是不存在人的时候的CSI数据和一些实验设置。我的处理方式是将这两个文件内容和每一个跟踪文件的数据直接拼接起来当成一个多模态特征这样使用。

path1='./Widar2.0Project/data/classroom/'
path2='./Widar2.0Project/data/office/'
path1_config=path1+'device_config.mat'
path2_config=path2+'device_config.mat'
path1_s=path1+'S01.mat'
path2_s=path2+'S01.mat'
name1=os.listdir(path1)
config1=scipy.io.loadmat(path1_config)

config_data1=[]
for key in config1:
    #print(key)
    #print(key, type(config1[key]))
    element=config1[key]
    if isinstance(element, np.ndarray):
        element= element.reshape(-1)
        #print(element.shape)
        config_data1.append(element)
config_data1=np.hstack(config_data1)
# print(config_data1.shape)

s1=scipy.io.loadmat(path1_s)
# for key in s1:
#     print(key, type(s1[key]))
#     element=s1[key]
#     if isinstance(element, np.ndarray):
#         print(element.shape)
s_img1=s1['csi_data']
# s_img1=cv2.resize(s_img1,(9000,90))
real_part = np.real(s_img1)
imaginary_part = np.imag(s_img1)
resized_real = cv2.resize(real_part, (256,256))
resized_imaginary = cv2.resize(imaginary_part, (256,256))
# 合并实部和虚部以恢复复数数组的形状
s_img1= np.stack((resized_real, resized_imaginary), axis=0)
target_size = 131072
s_time1=s1['time_stamp']
s_time1=s_time1.reshape(-1)
x_values = np.linspace(0, len(s_time1)-1, len(s_time1))
x_values = x_values.astype(np.int32)
# print(len(x_values))
# print(x_values.dtype)
# print(len(s_time1))
# print(s_time1.dtype)
# print(x_values.shape)
# print(s_time1.shape)
interpolator = interp1d(x_values, s_time1, kind='cubic')
s_time1= interpolator(np.linspace(0, len(s_time1)-1, target_size))
# print(s_img1.shape,s_img1.dtype)
# print(s_time1.shape,s_time1.dtype)
raw_data=[]
for i in name1:
    path=path1+i
    if(path!=path1_config and path!=path1_s):
        mat_data = scipy.io.loadmat(path)
        mat_img = mat_data['csi_data']
        real_part = np.real(mat_img)
        imaginary_part = np.imag(mat_img)
        resized_real = cv2.resize(real_part, (256,256))
        resized_imaginary = cv2.resize(imaginary_part, (256,256))
        mat_img=np.stack((resized_real, resized_imaginary), axis=0)
        #print(s_img1.shape)
        mat_img=np.concatenate((mat_img,s_img1),axis=0)
        mat_time=mat_data['time_stamp'].reshape(-1)
        x_values = np.linspace(0, len(mat_time)-1, len(mat_time))
        x_values = x_values.astype(np.int32)
        interpolator = interp1d(x_values, mat_time, kind='cubic')
        mat_time= interpolator(np.linspace(0, len(mat_time)-1, target_size))
        mat_time = np.stack((mat_time,s_time1),axis=0).reshape(4,256,256)
        mat_label= mat_data['ground_truth']
        label_size = (2, 100)
        interpolator = interp1d(np.arange(mat_label.shape[1]),mat_label, axis=1, kind='cubic')
        # 使用插值器生成一个新的数组,其形状为(2, 100)
        mat_label = interpolator(np.linspace(0, mat_label.shape[1]-1, label_size[1]))
        data=[mat_img,mat_time,config_data1,mat_label]
        raw_data.append(data)
        for j in data:
            print(j.shape,end="")
        print()
        # for key in mat_data:
        #     print(key, type(mat_data[key]))

name2=os.listdir(path2)
config2=scipy.io.loadmat(path2_config)

config_data2=[]
for key in config2:
    #print(key)
    #print(key, type(config1[key]))
    element=config2[key]
    if isinstance(element, np.ndarray):
        element= element.reshape(-1)
        #print(element.shape)
        config_data2.append(element)
config_data2=np.hstack(config_data2)
# print(config_data1.shape)

s2=scipy.io.loadmat(path2_s)
# for key in s1:
#     print(key, type(s1[key]))
#     element=s1[key]
#     if isinstance(element, np.ndarray):
#         print(element.shape)
s_img2=s2['csi_data']
# s_img1=cv2.resize(s_img1,(9000,90))
real_part = np.real(s_img2)
imaginary_part = np.imag(s_img2)
resized_real = cv2.resize(real_part, (256,256))
resized_imaginary = cv2.resize(imaginary_part, (256,256))
# 合并实部和虚部以恢复复数数组的形状
s_img2= np.stack((resized_real, resized_imaginary), axis=0)
target_size = 131072
s_time2=s2['time_stamp']
s_time2=s_time2.reshape(-1)
x_values = np.linspace(0, len(s_time2)-1, len(s_time2))
x_values = x_values.astype(np.int32)
# print(len(x_values))
# print(x_values.dtype)
# print(len(s_time1))
# print(s_time1.dtype)
# print(x_values.shape)
# print(s_time1.shape)
interpolator = interp1d(x_values, s_time2, kind='cubic')
s_time2= interpolator(np.linspace(0, len(s_time2)-1, target_size))
# print(s_img1.shape,s_img1.dtype)
# print(s_time1.shape,s_time1.dtype)
graph_data=[]
for i in name2:
    path=path2+i
    if(path!=path2_config and path!=path2_s):
        mat_data = scipy.io.loadmat(path)
        mat_img = mat_data['csi_data']
        real_part = np.real(mat_img)
        imaginary_part = np.imag(mat_img)
        resized_real = cv2.resize(real_part, (256,256))
        resized_imaginary = cv2.resize(imaginary_part, (256,256))
        mat_img=np.stack((resized_real, resized_imaginary), axis=0)
        #print(s_img1.shape)
        mat_img=np.concatenate((mat_img,s_img2),axis=0)
        mat_time=mat_data['time_stamp'].reshape(-1)
        x_values = np.linspace(0, len(mat_time)-1, len(mat_time))
        x_values = x_values.astype(np.int32)
        interpolator = interp1d(x_values, mat_time, kind='cubic')
        mat_time= interpolator(np.linspace(0, len(mat_time)-1, target_size))
        mat_time = np.stack((mat_time,s_time2),axis=0).reshape(4,256,256)
        mat_label= mat_data['ground_truth']
        label_size = (2, 100)
        interpolator = interp1d(np.arange(mat_label.shape[1]),mat_label, axis=1, kind='cubic')
        # 使用插值器生成一个新的数组,其形状为(2, 100)
        mat_label = interpolator(np.linspace(0, mat_label.shape[1]-1, label_size[1]))
        data=[mat_img,mat_time,config_data2,mat_label]
        graph_data.append(data)
        for j in data:
            print(j.shape,end="")
        print()
        # for key in mat_data:
        #     print(key, type(mat_data[key]))
print(len(raw_data))
print(len(graph_data))
datalist=raw_data+graph_data
print(len(datalist))

testpath='./Widar2.0Project/data/corridor/'
testpath_config=testpath+'device_config.mat'
testpath_s=testpath+'S01.mat'
testname=os.listdir(testpath)
testconfig=scipy.io.loadmat(testpath_config)
testconfig_data=[]
for key in testconfig:
    element=testconfig[key]
    if isinstance(element, np.ndarray):
        element= element.reshape(-1)
        testconfig_data.append(element)
testconfig_data=np.hstack(testconfig_data)

tests=scipy.io.loadmat(testpath_s)
tests_img=tests['csi_data']
# s_img1=cv2.resize(s_img1,(9000,90))
real_part = np.real(tests_img)
imaginary_part = np.imag(tests_img)
resized_real = cv2.resize(real_part, (256,256))
resized_imaginary = cv2.resize(imaginary_part, (256,256))
# 合并实部和虚部以恢复复数数组的形状
tests_img= np.stack((resized_real, resized_imaginary), axis=0)
target_size = 131072
tests_time=tests['time_stamp']
tests_time=tests_time.reshape(-1)
x_values = np.linspace(0, len(tests_time)-1, len(tests_time))
x_values = x_values.astype(np.int32)
interpolator = interp1d(x_values, tests_time, kind='cubic')
tests_time= interpolator(np.linspace(0, len(tests_time)-1, target_size))
test_data=[]
for i in testname:
    path=testpath+i
    #print(path)
    if(path!=testpath_config and path!=testpath_s):
        mat_data = scipy.io.loadmat(path)
        mat_img = mat_data['csi_data']
        real_part = np.real(mat_img)
        imaginary_part = np.imag(mat_img)
        resized_real = cv2.resize(real_part, (256,256))
        resized_imaginary = cv2.resize(imaginary_part, (256,256))
        mat_img=np.stack((resized_real, resized_imaginary), axis=0)
        #print(s_img1.shape)
        mat_img=np.concatenate((mat_img,tests_img),axis=0)
        mat_time=mat_data['time_stamp'].reshape(-1)
        x_values = np.linspace(0, len(mat_time)-1, len(mat_time))
        x_values = x_values.astype(np.int32)
        interpolator = interp1d(x_values, mat_time, kind='cubic')
        mat_time= interpolator(np.linspace(0, len(mat_time)-1, target_size))
        mat_time = np.stack((mat_time,tests_time),axis=0).reshape(4,256,256)
        mat_label= mat_data['ground_truth']
        label_size = (2, 100)
        interpolator = interp1d(np.arange(mat_label.shape[1]),mat_label, axis=1, kind='cubic')
        # 使用插值器生成一个新的数组,其形状为(2, 100)
        mat_label = interpolator(np.linspace(0, mat_label.shape[1]-1, label_size[1]))
        data=[mat_img,mat_time,testconfig_data,mat_label]
        test_data.append(data)
        for j in data:
            print(j.shape,end="")
        print()
print(len(test_data))

# 自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, data_list, transform=None):
        """
        初始化自定义数据集类
        :param data_list: 包含数据路径和标签的列表
        :param transform: 数据转换操作
        """
        self.data_list = data_list
        self.transform = transform

    def __len__(self):
        """
        返回数据集中的样本数量
        """
        return len(self.data_list)

    def __getitem__(self, idx):
        """
        返回数据集中的一个样本
        """
        # 获取数据路径和标签
        raw= self.data_list[idx]
        raw[1]=raw[1]
        data=raw[0:3]
        label=raw[3]
        # 加载图像
        #image = Image.open(image_path).convert('RGB')
        # 应用数据转换
        # if self.transform:
        #     image = self.transform(image)
        # 返回图像和标签
        return data, label

dataset = CustomDataset(datalist)

模型定义

class PreNormResidual(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.fn = fn
        self.norm = nn.LayerNorm(dim)

    def forward(self, x):
        return self.fn(self.norm(x)) + x

def FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):
    return nn.Sequential(
        dense(dim, dim * expansion_factor),
        nn.GELU(),
        nn.Dropout(dropout),
        dense(dim * expansion_factor, dim),
        nn.Dropout(dropout)
    )

def MLPMixer(*, image_size, channels, patch_size, dim, depth, num_classes, expansion_factor = 4, dropout = 0.):
    assert (image_size % patch_size) == 0, 'image must be divisible by patch size'
    num_patches = (image_size // patch_size) ** 2
    chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
    #print("11111")

    return nn.Sequential(
        Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
        nn.Linear((patch_size ** 2) * channels, dim),
        *[nn.Sequential(
            PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),
            PreNormResidual(dim, FeedForward(dim, expansion_factor, dropout, chan_last))
        ) for _ in range(depth)],
        nn.LayerNorm(dim),
        Reduce('b n c -> b c', 'mean'),
        nn.Linear(dim, num_classes)
    )

class mlpmixer(nn.Module):
    def __init__(self):
        super().__init__()
        self.model1 = MLPMixer(image_size=256, channels=4, patch_size=16, dim=512, depth=12, num_classes=1000)
        self.model2 = MLPMixer(image_size=256, channels=4, patch_size=16, dim=512, depth=12, num_classes=1000)
        self.model3 = MLPMixer(image_size=256, channels=4, patch_size=16, dim=512, depth=12, num_classes=1000)
        self.lin1 = nn.Linear(11, 131072 * 2)
        self.conv1 = GCNConv(1000, 2000)
        self.dropout = nn.Dropout(0.6)
        self.conv2 = GCNConv(2000, 200)

    def forward(self, data):
        x = data[0].float().to(device)
        y = data[1].float().to(device)
        z = data[2].float().to(device)
        num=x.shape[0]
        num_samples = 4
        indices = torch.randperm(len(dataset))[:num_samples]
        s1=[]
        s2=[]
        s3=[]
        #selected_labels=[]
        for idx in indices:
            sample = dataset[idx]
            s1.append(sample[0][0])
            s2.append(sample[0][1])
            s3.append(sample[0][2])
            #selected_labels.append(sample[1])
        s1=torch.tensor(np.array(s1)).float().to(device)
        s2=torch.tensor(np.array(s2)).float().to(device)
        s3=torch.tensor(np.array(s3)).float().to(device)
        # print(x.shape)
        # print(y.shape)
        # print(z.shape)
        x=torch.cat((x, s1), dim=0)
        y=torch.cat((y, s2), dim=0)
        z=torch.cat((z, s3), dim=0)
        # print(x.shape)
        # print(y.shape)
        # print(z.shape)
        z = self.lin1(z)
        z = z.reshape(-1, 4, 256, 256)
        x = self.model1(x)
        y = self.model2(y)
        z = self.model3(z)
        #print(z.shape)
        res=x+y+z
        edge_index = knn_graph(res, k=4, loop=False)
        train_mask = torch.zeros(res.shape[0], dtype=torch.bool)
        train_mask[:num] = True
        test_mask = torch.zeros(res.shape[0], dtype=torch.bool)
        test_mask[num:] = True
        graph_data = Data(x=res, edge_index=edge_index, train_mask=train_mask, test_mask=test_mask)
        x = self.conv1(graph_data.x, graph_data.edge_index)
        x = torch.relu(x)
        x = self.dropout(x)
        x = self.conv2(x, edge_index)
        return x[graph_data.train_mask].reshape(-1,2,100) 

可以看到我们使用了KNN构图,并且每个batch对数据进行随机采样,随机挑选一些数据参与图构建和消息传播。

模型训练

由于widar的数据质量其实是非常糟糕的,因此调参难度十分大,我试着调了几次,经常是神经网络过拟合要不然就不收敛了。因此我们采样optuna这样一个自动调参库来帮忙进行调参。

test_dataset = CustomDataset(test_data)
losslist=[]
losslist.append(float('inf'))

# def custom_loss(pred, true, mse_weight=1.0, l1_weight=1.0):
#     mse_loss = nn.MSELoss()(pred, true)
#     l1_loss = nn.L1Loss()(pred, true)
#     return mse_weight * mse_loss + l1_weight * l1_loss


def objective(trial):
    # 创建模型实例
    model = mlpmixer().to(device)
    
    # 损失函数
    #criterion = nn.MSELoss()
    loss_function_name = trial.suggest_categorical('loss_function', ['MSE loss', 'RMSE loss', 'MAE loss'])
    if loss_function_name == 'MSE loss':
        criterion = nn.MSELoss()
    elif loss_function_name == 'RMSE loss':
        criterion = nn.SmoothL1Loss()
    elif loss_function_name == 'MAE loss':
        criterion = nn.L1Loss()  # MAE通常使用L1损失
    
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=trial.suggest_float('learning_rate', 1e-7, 1e-2))
    optimizer_name = trial.suggest_categorical('optimizer', ['Adam', 'SGD', 'RMSprop'])
    if optimizer_name == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=trial.suggest_float('lr', 1e-7, 1e-2))
    elif optimizer_name == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=trial.suggest_float('lr', 1e-7, 1e-2), momentum=trial.suggest_float('momentum', 0.0, 1.0))
    elif optimizer_name == 'RMSprop':
        optimizer = optim.RMSprop(model.parameters(), lr=trial.suggest_float('lr', 1e-7, 1e-2), alpha=trial.suggest_float('alpha', 0.0, 1.0))
    # 数据集加载
    # 这里需要你自己的数据集
    #X_train, y_train = ...  # 训练数据
    #train_dataset = dataset(torch.Tensor(X_train), torch.Tensor(y_train))
    train_loader = DataLoader(dataset=dataset, batch_size=4,shuffle=True)
    
    # 训练模型
    epochs = trial.suggest_int('epochs', 300, 800)
    for epoch in range(epochs):
        model.train()
        for data, labels in train_loader:
            labels = labels.float().to(device)
            outputs = model(data)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.item()}, Learning Rate: {optimizer.param_groups[0]["lr"]}')
    
    # 评估模型并返回损失
    model.eval()
    losses = []
    best_loss = min(losslist)
    test_loader = DataLoader(dataset=test_dataset, batch_size=4)
    with torch.no_grad():
        for data, labels in test_loader:
            # 前向传播
            labels = labels.float().to(device)
            outputs = model(data)
            # 计算损失
            loss = criterion(outputs, labels)
            # 存储损失值
            losses.append(loss.item())
    average_loss = sum(losses) / len(losses)
    if average_loss < best_loss:
        best_loss = average_loss
        torch.save(model.state_dict(), 'best_model_weights_{}.pth'.format(trial.number))
    else:
        print('This is not an optimal model in this trial.')
    print(f'Epoch [{epoch+1}/{epochs}], Loss: {average_loss}, Best Loss: {best_loss}')
    losslist.append(best_loss)
    return average_loss

# 创建一个study对象
study = optuna.create_study(direction='minimize')
# 运行100次试验
study.optimize(objective, n_trials=400)
# 打印最佳参数
best_trial = study.best_trial
print("Best trial: score={}, params={}".format(best_trial.value, best_trial.params))

效果

其实我本来在看到widar数据的这个质量之后,我压根没想到能调出结果,不过optuna确实是太强大了。竟然真的搜到了一个效果还不错的模型。widar2原始论文说他们的精度在0.8-0.75左右。而optuna在完全没有见过corridor的数据情况下,居然搜索到了精度为0.5721。
可视化结构
其中黄色线条是label,蓝色线条是预测结果。

结论

结论我只能说,optuna yyds。

  • 24
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值