# gpu_is_ok.py
import torch
def check_torch_gpu():
# 打印PyTorch版本
print(f"PyTorch version: {torch.__version__}")
# 检查CUDA是否可用
cuda_available = torch.cuda.is_available()
print(f"CUDA available: {cuda_available}")
if cuda_available:
# 打印CUDA版本
print(f"CUDA version: {torch.version.cuda}")
# 打印当前GPU设备数量
device_count = torch.cuda.device_count()
print(f"Number of available GPUs: {device_count}")
# 打印每个GPU的信息
for i in range(device_count):
print(f"\nGPU {i}:")
print(f" Name: {torch.cuda.get_device_name(i)}")
print(f" Compute Capability: {torch.cuda.get_device_capability(i)}")
print(f" Total Memory: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.2f} GB")
else:
print("No GPU available. PyTorch will use CPU.")
if __name__ == "__main__":
check_torch_gpu()
pytorch对应gpu版本是否可用判断逻辑
最新推荐文章于 2025-04-30 18:52:29 发布