好像还挺好玩的生成式对抗网络生成一维数据(DCGAN)

本文探讨了使用生成式对抗网络(DCGAN)生成一维数据的方法。通过深度学习技术,DCGAN能够创建逼真的数据样本,这对于数据增强、模型训练等方面具有重要意义。文章详细介绍了DCGAN的架构、训练过程及其在生成一维数据上的应用。
摘要由CSDN通过智能技术生成
from __future__ import print_function, division
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
import csv


class GAN():
    def __init__(self):
        self.data_rows = 20
        self.data_cols = 20
        self.channels = 1
        self.data_shape = (self.data_rows, self.data_cols, self.channels)
        self.latent_dim = 100
        self.sample_size = 200
        optimizer = Adam(0.0002, 0.5)
        # 构建和编译判别器
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # 构建生成器
        self.generator = self.build_generator()

        # 生成器输入噪音,生成假的图片
        z = Input(shape=(self.latent_dim,))
        data = self.generator(z)  # 生成器生成的图片

        # 为了组合模型,只训练生成器,不训练判别器
        self.discriminator.trainable = False

        # 判别器将生成的图像作为输入并确定有效性
        validity = self.discriminator(data)  # 这个是判别器判断生成器生成图片的结果

        # The combined model  (stacked generator and discriminator)
        # 训练生成器骗过判别器
        self.combined = Model(z, validity)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

    def build_generator(self):
        model = Sequential()
        # 先全连接到32*7*7的维度上
        model.add(Dense(128 * 5 * 5, activation="relu", input_dim=self.latent_dim)
以下是一个使用PyTorch实现DCGAN生成一维数据的示例代码,假设csv文件中只有一列数据,共121行: ```python import torch import torch.nn as nn import torch.optim as optim import numpy as np import pandas as pd from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils # 定义数据集类 class MyDataset(Dataset): def __init__(self, csv_file): self.data = pd.read_csv(csv_file, header=None).values self.transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((127.5,), (127.5,)) ]) def __len__(self): return len(self.data) def __getitem__(self, idx): return self.transform(self.data[idx]) # 定义生成器模型 class Generator(nn.Module): def __init__(self, input_size, output_size): super(Generator, self).__init__() self.fc1 = nn.Linear(input_size, 64) self.bn1 = nn.BatchNorm1d(64) self.fc2 = nn.Linear(64, 128) self.bn2 = nn.BatchNorm1d(128) self.fc3 = nn.Linear(128, output_size) self.tanh = nn.Tanh() def forward(self, x): x = self.fc1(x) x = self.bn1(x) x = nn.functional.leaky_relu(x, 0.2) x = self.fc2(x) x = self.bn2(x) x = nn.functional.leaky_relu(x, 0.2) x = self.fc3(x) x = self.tanh(x) return x # 定义判别器模型 class Discriminator(nn.Module): def __init__(self, input_size): super(Discriminator, self).__init__() self.fc1 = nn.Linear(input_size, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 1) def forward(self, x): x = nn.functional.leaky_relu(self.fc1(x), 0.2) x = nn.functional.dropout(x, 0.3) x = nn.functional.leaky_relu(self.fc2(x), 0.2) x = nn.functional.dropout(x, 0.3) x = self.fc3(x) return x # 定义训练函数 def train(model_G, model_D, train_loader, epochs, lr): criterion = nn.BCEWithLogitsLoss() optimizer_G = optim.Adam(model_G.parameters(), lr=lr) optimizer_D = optim.Adam(model_D.parameters(), lr=lr) fixed_noise = torch.randn(1, 100) for epoch in range(epochs): for i, data in enumerate(train_loader, 0): real_data = data.to(device) fake_noise = torch.randn(real_data.size(0), 100).to(device) fake_data = model_G(fake_noise) # 训练判别器 optimizer_D.zero_grad() real_labels = torch.ones(real_data.size(0), 1).to(device) fake_labels = torch.zeros(real_data.size(0), 1).to(device) real_output = model_D(real_data) fake_output = model_D(fake_data.detach()) loss_D = criterion(real_output, real_labels) + criterion(fake_output, fake_labels) loss_D.backward() optimizer_D.step() # 训练生成器 optimizer_G.zero_grad() fake_labels = torch.ones(real_data.size(0), 1).to(device) fake_output = model_D(fake_data) loss_G = criterion(fake_output, fake_labels) loss_G.backward() optimizer_G.step() # 每个epoch生成一组样本数据 if epoch % 100 == 0: print("Epoch:", epoch) with torch.no_grad(): fake_data = model_G(fixed_noise).cpu().numpy() fake_data = (fake_data * 127.5 + 127.5).reshape(121) print("Generated data:", fake_data) # 定义训练参数 batch_size = 1 input_size = 100 output_size = 121 epochs = 1000 lr = 0.0002 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("Device:", device) # 加载数据集 dataset = MyDataset("data.csv") train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # 初始化模型并开始训练 model_G = Generator(input_size, output_size).to(device) model_D = Discriminator(output_size).to(device) train(model_G, model_D, train_loader, epochs, lr) ``` 需要注意的是,PyTorch的神经网络模型和训练过程与TensorFlow有所不同,但逻辑类似。此外,对于PyTorch的GPU加速,需要将模型和数据都放到GPU上进行计算,可以通过 `to(device)` 方法实现。
评论 17
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值