关于PyTorch和TensorFlow的GPU问题
一、测试GPU
PyTorch
# 输入
import torch
print(torch.cuda.is_available())
device = torch.device("cuda:0" if (torch.cuda.is_available() and 1> 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
# 输出
True #能否使用GPU
cuda:0 #GPU序号
GeForce GTX 1080 #显卡型号
Tensorflow
tensorflow-gpu 1.x.x
# 输入
import tensorflow as tf
tf.test.is_gpu_available()
# 输出
True
tensorflow-gpu 2.x.x
# 输入
import tensorflow as tf
tf.config.list_physical_devices('GPU')
# 输出
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
二、用CPU加载GPU模型
PyTorch
#实例化模型
network = CNNModel()
#加载模型
model_path = "GPU模型"
network.load_state_dict(torch.load(model_path, map_location = torch.device('cpu')))
network.eval()
TensorFlow-gpu
用CPU和系统内存来运行程序
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"