前提:pycharm+anaconda已经安装,并且配置好了。
在Anaconda Powershell Prompt中的一些操作:
1.查看系统中已有的虚拟环境:conda env list
2.新建一个虚拟环境:conda create -n name(但是他是建在C盘的,不可取)
3.删除错误的环境:conda env remove -n name
4.那么我们如何正确新建一个我们需要的环境呢? conda create -p 绝对地址
5.激活进入虚拟环境:conda activate 绝对地址
6.pypi官网:pypi.org pip所需要的包 例如:pip install torch
7.清华大学开源软件镜像站:清华大学开源软件镜像站 | Tsinghua Open Source Mirror
pip 临时使用:
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple some-package
pip默认使用(推荐):
python -m pip install --upgrade pip
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
由于我们已经新建虚拟环境了,那么只需要打开pycharm。
File->Setting->Project->Python Interpreter->Add Interpreter->Add Local Interpreter->Conda Environment->
Codna Executable: D:\Anaconda\Scripts\conda.exe
Using existing environment
Using existing environment:选择新建虚拟环境所在绝对路径
一个MLP代码:
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
dataset = Planetoid(root='/tmp/Planetoid', name='Cora', transform=NormalizeFeatures()) # transform预处理
data = dataset[0]
print(data)
def visualize(h, color):
z = TSNE(n_components=2).fit_transform(h.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.xticks([])
plt.yticks([])
plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2")
plt.show()
class MLP(torch.nn.Module):
def __init__(self, hidden_channels):
super().__init__()
torch.manual_seed(12345)
self.lin1 = Linear(dataset.num_features, hidden_channels)
self.lin2 = Linear(hidden_channels, dataset.num_classes)
def forward(self, x):
x = self.lin1(x)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return x
model = MLP(hidden_channels=16)
print(model)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
def train():
model.train()
optimizer.zero_grad()
out = model(data.x)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss
def test():
model.eval()
out = model(data.x)
pred = out.argmax(dim=1)
test_correct = pred[data.test_mask] == data.y[data.test_mask]
test_acc = int(test_correct.sum()) / int(data.test_mask.sum())
return test_acc
for epoch in range(1, 201):
loss = train()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
运行结果:
Test Accuracy: 0.5900
若我们换成GCN,效果是否会有优化?
GCN代码:
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
dataset = Planetoid(root='/tmp/Planetoid', name='Cora', transform=NormalizeFeatures()) # transform预处理
data = dataset[0]
print(data)
def visualize(h, color):
z = TSNE(n_components=2).fit_transform(h.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.xticks([])
plt.yticks([])
plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap="Set2")
plt.show()
class GCN(torch.nn.Module):
def __init__(self, hidden_channels):
super().__init__()
torch.manual_seed(1234567)
self.conv1 = GCNConv(dataset.num_features, hidden_channels) # (1433,16)
self.conv2 = GCNConv(hidden_channels, dataset.num_classes) # (16,7)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
x = self.conv2(x, edge_index)
return x
model = GCN(hidden_channels=16)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
print(model)
# model.eval()
# out = model(data.x, data.edge_index)
# visualize(out, color=data.y) # 没训练之前
def train():
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
loss = criterion(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss
def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1)
test_correct = pred[data.test_mask] == data.y[data.test_mask]
test_acc = int(test_correct.sum()) / int(data.test_mask.sum())
return test_acc
for epoch in range(1, 201):
loss = train()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}')
test_acc = test()
print(f'Test Accuracy: {test_acc:.4f}')
model.eval()
out = model(data.x, data.edge_index)
visualize(out, color=data.y) # 训练之后
运行结果对比: