import GPUtil as GPU
import time
import numpy as np
from visdom import Visdom
class GPU_stati():
def __init__(self,time_gaps=1,time_sample=60):
self.GPU_num =len(GPU.getGPUs())
self.gpus_Util = np.zeros(self.GPU_num)
self.gpus = GPU.getGPUs()
self.tgs = time_gaps
self.tsample = time_sample
def compute_util(self):
self.gpus_Util = np.zeros(self.GPU_num)
#gpu0_Util = 0
#gpu1_Util = 0
i_th =0
#time_gap =0.99
#time_sample =60
while(1):
self.gpus = GPU.getGPUs()
for i in range(self.GPU_num):
self.gpus_Util[i] += self.gpus[i].load
time.sleep(self.tgs)
i_th +=self.tgs
if i_th >=self.tsample:
break
for i in range(self.GPU_num):
self.gpus_Util[i] = self.gpus_Util[i]/self.tsample
return self.gpus_Util
#main
gpus = GPU_stati()
minute = 0
'''
viz = Visdom(env='gpu_utili')
x0_name = GPU.getGPUs()[0].name+'id_'+str(GPU.getGPUs()[0].id)
x1_name = GPU.getGPUs()[1].name+'id_'+str (GPU.getGPUs()[1].id)
viz.line([[0.,0.]],[0.],win='gpu_utilization',opts=dict(title='gpu_utilization',legend=[x0_name,x1_name]))
'''
while(1):
gpus = GPU_stati(time_gaps=1,time_sample=1800)
x=gpus.compute_util()
localtime = time.asctime( time.localtime(time.time()) )
print ("\n本地时间为 :", localtime)
for i in range(len(x)):
name = GPU.getGPUs()[i].name+'id_'+str(GPU.getGPUs()[i].id)
print(name+': ','{:.2f}%'.format(x[i]*100))
~
~
gpu统计示例
最新推荐文章于 2023-11-30 20:02:29 发布