import tensorflow as tf
import numpy as np
import os
os.environ['CUDA_VISIBLE_DEVICES']='0'
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=0.6
with tf.Session(graph=...,config=config) as sess:
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only allocate 1GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
根据需要配置显存,避免爆掉
import os
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
import tensorflow as tf
import os
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
os.environ['CUDA_VISIBLE_DEVICES']='2'
检测下面有效:其他的有时候有效,有时候无效
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#这句希望把GPU限定在10G范围内,避免死机,测试效果没多大用。nvidia-smi 查看GPU使用率,都超过了。
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=10240)
strategy = tf.distribute.MirroredStrategy()
下面引用:https://blog.csdn.net/zkbaba/article/details/104101584
通过 tf.config.experimental.set_visible_devices ,可以设置当前程序可见的设备范围(当前程序只会使用自己可见的设备,不可见的设备不会被当前程序使用)。例如,如果在上述 4 卡的机器中我们需要限定当前程序只使用下标为 0、1 的两块显卡(GPU:0 和 GPU:1),可以使用以下代码:
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_visible_devices(devices=gpus[0:2], device_type='GPU')
指定训练GPU:确实可行的方案。
#方法一,指定训练GPU
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_visible_devices(gpus[1], device_type='GPU')
print("gpus",gpus)
方法二,指定训练GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#配置限制GPU使用大小,避免GPU崩溃 ,接在方法一,或者方法二代码后面即可
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=10240)])
tf.config.experimental.set_virtual_device_configuration(
gpus[1],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*10)])
logical_gpus = logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print("logical_gpus",logical_gpus)