有空填坑,如需自取
# Import relevant packages
import torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
import matplotlib.pyplot as plt
# Specify the library to generate papers from
library = '我滴papers'
# Specify number of papers
num_papers = 5
# Specify keywords for papers
keywords = ['算了吧', '都可以', '挺好的', '随缘吧']
# Set seed for reproducibility
torch.manual_seed(10)
# Create a training dataset
dataset = datasets.CIFAR10(root='./data', download=True, transform=transforms.ToTensor())
# Create a dataloader
loader = DataLoader(dataset, batch_size=128, shuffle=True)
# Create a model
class AI_Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.fc1 = nn.Linear(64 * 6 * 6, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 6 * 6 * 64)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Create instance of AI model
model = AI_Model()
# Define loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Train the model
with tqdm(total=num_epochs, unit='epoch') as pbar:
for epoch in range(num_epochs):
ep_loss = 0
for data, target in loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
ep_loss += loss.item()
loss.backward()
optimizer.step()
pbar.update(1) # Update progress bar
# Generate new papers from library using keywords
for i in range(num_papers):
paper = generate_paper(library=library, keywords=keywords)
print('Generated paper {}: {}'.format(i+1, paper))