TensorFlow 设置程序可见GPU(多GPU情况)
import matplotlib as mpl
import matplotlib. pyplot as plt
% matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow_core. python. keras. api. _v2 import keras
print ( tf. __version__)
print ( sys. version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print ( module. __name__, module. __version__)
tf. debugging. set_log_device_placement( True )
gpus = tf. config. experimental. list_physical_devices( "GPU" )
if len ( gpus) >= 1 :
tf. config. experimental. set_visible_devices( gpus[ 0 ] , "GPU" )
print ( "物理GPU个数:" , len ( gpus) )
logical_gpus = tf. config. experimental. list_logical_devices( "GPU" )
print ( "逻辑GPU个数:" , len ( logical_gpus) )
TensorFlow 设置GPU的 逻辑分区
import matplotlib as mpl
import matplotlib. pyplot as plt
% matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow_core. python. keras. api. _v2 import keras
print ( tf. __version__)
print ( sys. version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print ( module. __name__, module. __version__)
tf. debugging. set_log_device_placement( True )
gpus = tf. config. experimental. list_physical_devices( "GPU" )
if len ( gpus) >= 1 :
tf. config. experimental. set_visible_devices( gpus[ 0 ] , "GPU" )
tf. config. experimental. set_virtual_device_configuration(
gpus[ 0 ] ,
[ tf. config. experimental. VirtualDeviceConfiguration( memory_limit= 3072 ) ,
tf. config. experimental. VirtualDeviceConfiguration( memory_limit= 3072 ) ] )
print ( "物理GPU个数:" , len ( gpus) )
logical_gpus = tf. config. experimental. list_logical_devices( "GPU" )
print ( "逻辑GPU个数:" , len ( logical_gpus) )
TensorFlow 手动设置处理GPU
import matplotlib as mpl
import matplotlib. pyplot as plt
% matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow_core. python. keras. api. _v2 import keras
print ( tf. __version__)
print ( sys. version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print ( module. __name__, module. __version__)
tf. debugging. set_log_device_placement( True )
tf. config. set_soft_device_placement( True )
gpus = tf. config. experimental. list_physical_devices( "GPU" )
for gpu in gpus:
tf. config. experimental. set_memory_growth( gpu, True )
print ( "物理GPU个数:" , len ( gpus) )
logical_gpus = tf. config. experimental. list_logical_devices( "GPU" )
print ( "逻辑GPU个数:" , len ( logical_gpus) )
c = [ ]
for gpu in logical_gpus:
print ( gpu. name)
with tf. device( gpu. name) :
a = tf. constant( [ [ 1.0 , 2.0 , 3.0 ] , [ 4.0 , 5.0 , 6.0 ] ] )
b = tf. constant( [ [ 1.0 , 2.0 ] , [ 3.0 , 4.0 ] , [ 5.0 , 6.0 ] ] )
c. append( tf. matmul( a, b) )
with tf. device( "/GPU:0" ) :
matmul_sum = tf. add_n( c)
print ( matmul_sum)