# -*- coding: utf-8 -*-
"""
测试NVIDIA cuda。
"""
import tensorflow as tf
import numpy as np
import time
# 产生用于计算测试的数据
value = np.random.randn(5000,1000)
a = tf.constant(value)
# 计算方式
b = a*a
# gpu
tic = time.time()
with tf.Session() as sess:
for i in range(1000):
sess.run(b)
toc = time.time()
t_cost = toc - tic
print(t_cost)