import tensorflow as tf
import numpy as np
batchsize2X = 1
batchsize2Y = 2
dataX_indim = 4
dataY_indim = 1
with tf.device('/gpu:%s' % (0)):
with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):
X = tf.placeholder(tf.float32, name='X_recv', shape=[batchsize2X, dataX_indim])
Y = tf.placeholder(tf.float32, name='Y_recv', shape=[batchsize2Y, dataY_indim])
Z = Y - X
# ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了
config = tf.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置
config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
x_batch = np.array([[1, 2, 3, 4]])
y_batch = np.array([[1], [2]])
print('x', x_batch)
print('y', y_batch)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
z = sess.run(Z, feed_dict={X: x_batch, Y: y_batch})
print(z)
结果:
x = [1,2,3,4]
y = [
1
2
]
y-x = [[ 0. -1. -2. -3.]
[ 1. 0. -1. -2.]]