import tensorflow as tf
# 判断GPU是否可用
flag = tf.test.is_gpu_available()
if flag:
# 获取GPU信息
print("CUDA可使用")
gpu_device_name = tf.test.gpu_device_name()
print("GPU型号: ", gpu_device_name)
else:
print("CUDA不可用")
import torch
flag = torch.cuda.is_available()
if flag:
print("CUDA可使用")
print("GPU型号: ",torch.cuda.get_device_name())
else:
print("CUDA不可用")
ngpu= 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
print(torch.rand(3,3).cuda())
# mindspore gpu测试
import numpy as np
from mindspore import Tensor
from mindspore.ops import functional as F
import mindspore.context as context
context.set_context(device_target="GPU")
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
print(F.tensor_add(x, y))
```
[[[[2. 2. 2. 2.]
[2. 2. 2. 2.]
[2. 2. 2. 2.]]
[[2. 2. 2. 2.]
[2. 2. 2. 2.]
[2. 2. 2. 2.]]
[[2. 2. 2. 2.]
[2. 2. 2. 2.]
[2. 2. 2. 2.]]]]
```