@tf_export("test.is_gpu_available")defis_gpu_available(cuda_only=False, min_cuda_compute_capability=None):"""Returns whether TensorFlow can access a GPU.
返回TensorFlow是否可以访问GPU。
Args:
cuda_only: limit the search to CUDA gpus. 将搜索限制在CUDA GPU中。
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
一个(主要,次要)对,指示所需的最低CUDA计算能力;如果没有要求,则为无。
Returns:
True iff a gpu device of the requested kind is available.
如果请求的类型的gpu设备可用,则为true。
"""defcompute_capability_from_device_desc(device_desc):# TODO(jingyue): The device description generator has to be in sync with# this file. Another option is to put compute capability in# DeviceAttributes, but I avoided that to keep DeviceAttributes# target-independent. Reconsider this option when we have more things like# this to keep in sync.# LINT.IfChange# TODO(jingyue):设备描述生成器必须与此文件同步。 # 另一个选择是将计算功能放到DeviceAttributes中,但是我避免了使DeviceAttributes与目标无关的问题。 # 当我们有更多类似的东西保持同步时,请重新考虑此选项。 LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)# LINT.ThenChange(//tensorflow/core/\# common_runtime/gpu/gpu_device.cc)ifnot match:return0,0returnint(match.group(1)),int(match.group(2))try:for local_device in device_lib.list_local_devices():if local_device.device_type =="GPU":if(min_cuda_compute_capability isNoneor
compute_capability_from_device_desc(
local_device.physical_device_desc)>=
min_cuda_compute_capability):returnTrueif local_device.device_type =="SYCL"andnot cuda_only:returnTruereturnFalseexcept errors_impl.NotFoundError as e:ifnotall([x instr(e)for x in["CUDA","not find"]]):raise e
else:
logging.error(str(e))returnFalse