GpuCloudSim示例五
这里仅仅介绍与其他示例不同的地方
package org.cloudbus.cloudsim.examples.gpu;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.UtilizationModel;
import org.cloudbus.cloudsim.UtilizationModelFull;
import org.cloudbus.cloudsim.VmScheduler;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.gpu.BusTags;
import org.cloudbus.cloudsim.gpu.GpuCloudlet;
import org.cloudbus.cloudsim.gpu.GpuCloudletSchedulerTimeShared;
import org.cloudbus.cloudsim.gpu.GpuHost;
import org.cloudbus.cloudsim.gpu.GpuHostTags;
import org.cloudbus.cloudsim.gpu.GpuTask;
import org.cloudbus.cloudsim.gpu.GpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.GpuVmAllocationPolicySimple;
import org.cloudbus.cloudsim.gpu.GpuVmTags;
import org.cloudbus.cloudsim.gpu.Pgpu;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.VgpuScheduler;
import org.cloudbus.cloudsim.gpu.VideoCard;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicy;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicyBreadthFirst;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVgpuTags;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVideoCardTags;
import org.cloudbus.cloudsim.gpu.hardware_assisted.PerformanceGridVgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.hardware_assisted.VideoCardPowerModelNvidiaGridK1;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModel;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModelGpuConstant;
import org.cloudbus.cloudsim.gpu.power.PowerGpuDatacenter;
import org.cloudbus.cloudsim.gpu.power.PowerGpuDatacenterBroker;
import org.cloudbus.cloudsim.gpu.power.PowerGpuHost;
import org.cloudbus.cloudsim.gpu.power.PowerVideoCard;
import org.cloudbus.cloudsim.gpu.power.models.GpuHostPowerModelLinear;
import org.cloudbus.cloudsim.gpu.power.models.VideoCardPowerModel;
import org.cloudbus.cloudsim.gpu.provisioners.BwProvisionerRelaxed;
import org.cloudbus.cloudsim.gpu.provisioners.GpuBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.provisioners.GpuGddramProvisionerSimple;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisioner;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicy;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicyBreadthFirst;
import org.cloudbus.cloudsim.lists.VmList;
import org.cloudbus.cloudsim.power.models.PowerModel;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import de.vandermeer.asciitable.AsciiTable;
/**
* This example demonstrates the use of gpu package for the simulation of mixed workloads. <br>
* GPU virtualization mode: hardware-assisted <br>
* Performance Model: on <br>
* Interference Model: off <br>
* Power Model: on
* 这个例子演示了如何使用gpu包来模拟混合工作负载。< br >
* GPU虚拟化模式:硬件辅助
*性能模型:on
*干扰模式:off
*电源型号:on
*
* @author Ahmad Siavashi
*
*/
public class CloudSimGpuExample5 {
/** The cloudlet list. 任务列表*/
private static List<GpuCloudlet> cloudletList;
/** The vmlist. 虚拟机列表 */
private static List<GpuVm> vmlist;
/** The datacenter list. 数据中心列表 */
private static List<PowerGpuDatacenter> datacenterList;
/** number of VMs. 虚拟机数量*/
private static int numVms = 1;
/** number of gpu-cloudlets gpu任务的数量*/
private static int numGpuCloudlets = 1;
/**
* The resolution in which progress in evaluated. 评估进展的决议
*/
private static double schedulingInterval = 20;
/**
* Creates main() to run this example.
*
* @param args
* the args
*/
@SuppressWarnings("unused")
public static void main(String[] args) {
Log.printLine("Starting CloudSimGpuExample5...");
try {
// number of cloud users 云用户的数量
int num_user = 1;
Calendar calendar = Calendar.getInstance();
// trace events 追踪事件机制
boolean trace_flag = true;
// CloudSim initialization CloudSim初始化
CloudSim.init(num_user, calendar, trace_flag);
// Create a list to hold created datacenters 创建一个列表来保存一个数据中心
datacenterList = new ArrayList<PowerGpuDatacenter>();
// Create one Datacenter 创建数据中心
PowerGpuDatacenter datacenter = createDatacenter("Datacenter");
// add the datacenter to the datacenterList 把数据中心添加到列表中
datacenterList.add(datacenter);
// Create one Broker 创建一个代理
PowerGpuDatacenterBroker broker = createBroker("Broker");
int brokerId = broker.getId();
// Create a list to hold created VMs 创建一个列表来保存虚拟机
vmlist = new ArrayList<GpuVm>();
// Create a list to hold issued Cloudlets
cloudletList = new ArrayList<GpuCloudlet>();
// Create VMs 创建虚拟机
for (int i = 0; i < numVms; i++) {
int vmId = i;
int vgpuId = i;
// Create a VM
GpuVm vm = createGpuVm(vmId, vgpuId, brokerId);
// add the VM to the vmList
vmlist.add(vm);
}
// Create GpuCloudlets 创建Gpu任务
for (int i = 0; i < numGpuCloudlets; i++) {
int gpuCloudletId = i;
int gpuTaskId = i;
// Create Cloudlet 创建Cloudlet
GpuCloudlet cloudlet = createGpuCloudlet(gpuCloudletId, gpuTaskId, brokerId);
// add the cloudlet to the list 把任务添加到列表当中
cloudletList.add(cloudlet);
}
// Cloudlet-VM assignment 把任务调度到虚拟机
for (int i = 0; i < numGpuCloudlets; i++) {
GpuCloudlet cloudlet = cloudletList.get(i);
cloudlet.setVmId(i % numVms);
}
// submit vm list to the broker 把虚拟机列表交给代理
broker.submitVmList(vmlist);
// submit cloudlet list to the broker 提交任务列表给代理
broker.submitCloudletList(cloudletList);
// Disable Logs
Log.disable();
// Starts the simulation
CloudSim.startSimulation();
CloudSim.stopSimulation();
Log.enable();
// Print results when simulation is over
List<Cloudlet> newList = broker.getCloudletReceivedList();
printCloudletList(newList);
Log.printLine("CloudSimGpuExample5 finished!");
} catch (Exception e) {
e.printStackTrace();
Log.printLine("Unwanted errors happen");
}
}
/**
* Create a GpuCloudlet.
*/
private static GpuCloudlet createGpuCloudlet(int gpuCloudletId, int gpuTaskId, int brokerId) {
// Cloudlet properties Cloudlet属性
long length = (long) (400 * GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS);
long fileSize = 300;
long outputSize = 300;
int pesNumber = 1;
//利用率模型
UtilizationModel cpuUtilizationModel = new UtilizationModelFull();
UtilizationModel ramUtilizationModel = new UtilizationModelFull();
UtilizationModel bwUtilizationModel = new UtilizationModelFull();
// GpuTask properties GpuTask属性
long taskLength = (long) (GridVideoCardTags.NVIDIA_K1_CARD_PE_MIPS * 150);
long taskInputSize = 128;
long taskOutputSize = 128;
long requestedGddramSize = 4 * 1024;
int numberOfBlocks = 2;
// 利用率模型
UtilizationModel gpuUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramBwUtilizationModel = new UtilizationModelFull();
GpuTask gpuTask = new GpuTask(gpuTaskId, taskLength, numberOfBlocks, taskInputSize, taskOutputSize,
requestedGddramSize, gpuUtilizationModel, gddramUtilizationModel, gddramBwUtilizationModel);
GpuCloudlet gpuCloudlet = new GpuCloudlet(gpuCloudletId, length, pesNumber, fileSize, outputSize,
cpuUtilizationModel, ramUtilizationModel, bwUtilizationModel, gpuTask, false);
gpuCloudlet.setUserId(brokerId);
return gpuCloudlet;
}
/**
* Create a VM. 创建虚拟机
*/
private static GpuVm createGpuVm(int vmId, int vgpuId, int brokerId) {
// VM description 虚拟机属性
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
// image size (GB) 镜像大小
int size = 10;
// vm memory (GB) 虚拟机内存大小
int ram = 2;
long bw = 100;
// number of cpus cpu个数
int pesNumber = 4;
// VMM name VMM名称
String vmm = "Xen";
// Create VM 创建虚拟机
GpuVm vm = new GpuVm(vmId, brokerId, mips, pesNumber, ram, bw, size, vmm, GpuVmTags.GPU_VM_CUSTOM,
new GpuCloudletSchedulerTimeShared());
// Create GpuTask Scheduler 创建GpuTask调度器
GpuTaskSchedulerLeftover gpuTaskScheduler = new GpuTaskSchedulerLeftover();
// Create a Vgpu 创建虚拟Gpu
Vgpu vgpu = GridVgpuTags.getK180Q(vgpuId, gpuTaskScheduler);
vm.setVgpu(vgpu);
return vm;
}
/**
* Create a datacenter. 创建数据中心
*
* @param name
* the name of the datacenter 数据中心的名字
*
* @return the datacenter
*/
private static PowerGpuDatacenter createDatacenter(String name) {
// We need to create a list to store our machine 创建一个列表来存储我们的机器
List<GpuHost> hostList = new ArrayList<GpuHost>();
/* Create 2 hosts, one is GPU-equipped 创建2台主机,其中一台安装了gpu */
// Number of host's video cards 主机显卡个数
int numVideoCards = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_VIDEO_CARDS;
// To hold video cards 用来存放显卡
List<VideoCard> videoCards = new ArrayList<VideoCard>(numVideoCards);
for (int videoCardId = 0; videoCardId < numVideoCards; videoCardId++) {
List<Pgpu> pgpus = new ArrayList<Pgpu>();
// Adding an NVIDIA K1 Card
double mips = GridVideoCardTags.NVIDIA_K1_CARD_PE_MIPS;
int gddram = GridVideoCardTags.NVIDIA_K1_CARD_GPU_MEM;
long bw = GridVideoCardTags.NVIDIA_K1_CARD_BW_PER_BUS;
for (int pgpuId = 0; pgpuId < GridVideoCardTags.NVIDIA_K1_CARD_GPUS; pgpuId++) {
List<Pe> pes = new ArrayList<Pe>();
for (int peId = 0; peId < GridVideoCardTags.NVIDIA_K1_CARD_GPU_PES; peId++) {
pes.add(new Pe(peId, new PeProvisionerSimple(mips)));
}
pgpus.add(
new Pgpu(pgpuId, pes, new GpuGddramProvisionerSimple(gddram), new GpuBwProvisionerShared(bw)));
}
// Pgpu selection policy
PgpuSelectionPolicy pgpuSelectionPolicy = new PgpuSelectionPolicyBreadthFirst();
// Performance Model 性能模型
double performanceLoss = 0.1;
PerformanceModel<VgpuScheduler, Vgpu> performanceModel = new PerformanceModelGpuConstant(performanceLoss);
// vgpu Scheduler 调度
PerformanceGridVgpuSchedulerFairShare vgpuScheduler = new PerformanceGridVgpuSchedulerFairShare(
GridVideoCardTags.NVIDIA_K1_CARD, pgpus, pgpuSelectionPolicy, performanceModel);
// PCI Express Bus Bw Provisioner Bw提供者
VideoCardBwProvisioner videoCardBwProvisioner = new VideoCardBwProvisionerShared(
BusTags.PCI_E_3_X16_BW);
// Video Card Power Model 显卡功率型号
VideoCardPowerModel videoCardPowerModel = new VideoCardPowerModelNvidiaGridK1(false);
// Create a video card
PowerVideoCard videoCard = new PowerVideoCard(videoCardId, GridVideoCardTags.NVIDIA_K1_CARD, vgpuScheduler,
videoCardBwProvisioner, videoCardPowerModel);
videoCards.add(videoCard);
}
// Create a host
int hostId = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3;
// A Machine contains one or more PEs or CPUs/Cores. 一台机器包含一个或多个pe或cpu /核心。
List<Pe> peList = new ArrayList<Pe>();
// PE's MIPS power PE的MIPS的功耗
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_PES; peId++) {
// Create PEs and add these into a list. 创建pe并将其添加到列表中
peList.add(new Pe(0, new PeProvisionerSimple(mips)));
}
// Create Host with its id and list of PEs and add them to the list of machines
// host memory (MB)
//创建主机及其id和pe列表,并将其添加到机器列表中
//主机内存(MB)
int ram = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_RAM;
// host storage
long storage = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_STORAGE;
// host BW
int bw = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_BW;
// Set VM Scheduler
VmScheduler vmScheduler = new VmSchedulerTimeShared(peList);
// Host Power Model 主机功耗模型
double hostMaxPower = 200;
double hostStaticPowerPercent = 0.70;
PowerModel powerModel = new GpuHostPowerModelLinear(hostMaxPower, hostStaticPowerPercent);
// Video Card Selection Policy 显卡选择策略
VideoCardAllocationPolicy videoCardAllocationPolicy = new VideoCardAllocationPolicyBreadthFirst(videoCards);
PowerGpuHost newHost = new PowerGpuHost(hostId, GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3,
new RamProvisionerSimple(ram), new BwProvisionerRelaxed(bw), storage, peList, vmScheduler,
videoCardAllocationPolicy, powerModel);
hostList.add(newHost);
// A host without GPU
// A Machine contains one or more PEs or CPUs/Cores.
peList = new ArrayList<Pe>();
for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_NUM_PES; peId++) {
// Create PEs and add these into a list. 创建pe并将其添加到列表中
peList.add(new Pe(0, new PeProvisionerSimple(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_PE_MIPS)));
}
powerModel = new GpuHostPowerModelLinear(hostMaxPower, hostStaticPowerPercent);
newHost = new PowerGpuHost(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4, GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4,
new RamProvisionerSimple(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_RAM),
new BwProvisionerRelaxed(GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_BW),
GpuHostTags.DUAL_INTEL_XEON_E5_2690_V4_STORAGE, peList, new VmSchedulerTimeShared(peList), null,
powerModel);
hostList.add(newHost);
// Create a DatacenterCharacteristics object that stores the
// properties of a data center: architecture, OS, list of
// Machines, allocation policy: time- or space-shared, time zone
// and its price (G$/Pe time unit).
// system architecture
//创建一个DatacenterCharacteristics对象,用于存储
//数据中心的属性:架构,操作系统,列表
//机器,分配策略:时间或空间共享,时区
//及其价格(G$/Pe时间单位)。
//系统架构
String arch = "x86";
// operating system 操作系统
String os = "Linux";
// VM Manager 虚拟管理者
String vmm = "Horizen";
// time zone this resource located (Tehran) 该资源所在时区(德黑兰)
double time_zone = +3.5;
// the cost of using processing in this resource 在此资源中使用处理的成本
double cost = 0.0;
// the cost of using memory in this resource 在这个资源中使用内存的代价
double costPerMem = 0.00;
// the cost of using storage in this resource
double costPerStorage = 0.000;
// the cost of using bw in this resource
double costPerBw = 0.0;
// we are not adding SAN devices by now
LinkedList<Storage> storageList = new LinkedList<Storage>();
DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
cost, costPerMem, costPerStorage, costPerBw);
// We need to create a Datacenter object.
PowerGpuDatacenter datacenter = null;
try {
datacenter = new PowerGpuDatacenter(name, characteristics, new GpuVmAllocationPolicySimple(hostList),
storageList, schedulingInterval);
} catch (Exception e) {
e.printStackTrace();
}
return datacenter;
}
/**
* Creates the broker.
*
* * @param name the name
*
* @return the datacenter broker
*/
private static PowerGpuDatacenterBroker createBroker(String name) {
PowerGpuDatacenterBroker broker = null;
try {
broker = new PowerGpuDatacenterBroker(name);
} catch (Exception e) {
e.printStackTrace();
return null;
}
return broker;
}
/**
* Prints the GpuCloudlet objects.
*
* @param list
* list of GpuCloudlets
*/
private static void printCloudletList(List<Cloudlet> gpuCloudlets) {
Log.printLine(String.join("", Collections.nCopies(100, "-")));
DecimalFormat dft = new DecimalFormat("###.##");
for (GpuCloudlet gpuCloudlet : (List<GpuCloudlet>) (List<?>) gpuCloudlets) {
// Cloudlet
AsciiTable at = new AsciiTable();
at.addRule();
at.addRow("Cloudlet ID", "Status", "Datacenter ID", "VM ID", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuCloudlet.getCloudletStatus() == Cloudlet.SUCCESS) {
at.addRow(gpuCloudlet.getCloudletId(), "SUCCESS", gpuCloudlet.getResourceId(), gpuCloudlet.getVmId(),
dft.format(gpuCloudlet.getActualCPUTime()).toString(),
dft.format(gpuCloudlet.getExecStartTime()).toString(),
dft.format(gpuCloudlet.getFinishTime()).toString());
at.addRule();
}
GpuTask gpuTask = gpuCloudlet.getGpuTask();
// Host-Device Memory Transfer
AsciiTable atMT = new AsciiTable();
atMT.addRule();
atMT.addRow("Direction", "Time", "Start Time", "End Time");
atMT.addRule();
atMT.addRow("H2D", dft.format(gpuTask.getMemoryTransferHostToDevice().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().startTime).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().endTime).toString());
atMT.addRule();
// Gpu Task
at.addRow("Task ID", "Cloudlet ID", "Status", "vGPU Profile", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuTask.getTaskStatus() == GpuTask.FINISHED) {
at.addRow(gpuTask.getTaskId(), gpuTask.getCloudlet().getCloudletId(), "SUCCESS",
GridVgpuTags.getVgpuTypeString(
((GpuVm) VmList.getById(vmlist, gpuTask.getCloudlet().getVmId())).getVgpu().getType()),
dft.format(gpuTask.getActualGPUTime()).toString(),
dft.format(gpuTask.getExecStartTime()).toString(),
dft.format(gpuTask.getFinishTime()).toString());
at.addRule();
}
// Device-Host Memory Transfer
atMT.addRow("D2H", dft.format(gpuTask.getMemoryTransferDeviceToHost().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().startTime).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().endTime).toString());
atMT.addRule();
at.getContext().setWidth(100);
atMT.getContext().setWidth(100);
Log.printLine(at.render());
Log.printLine(atMT.render());
Log.printLine(String.join("", Collections.nCopies(100, "-")));
}
AsciiTable at = new AsciiTable();
at.addRule();
at.addRow("Entity", "Energy Consumed (Joules)");
at.addRule();
for (PowerGpuDatacenter datacenter : datacenterList) {
String depth = "#" + datacenter.getId();
at.addRow("Datacenter " + depth, dft.format(datacenter.getConsumedEnergy()).toString());
at.addRule();
for (Entry<PowerGpuHost, Double> entry : datacenter.getHostEnergyMap().entrySet()) {
PowerGpuHost host = entry.getKey();
depth = "#" + host.getId() + " / " + depth;
at.addRow("Host " + depth, dft.format(datacenter.getHostCpuEnergyMap().get(host)).toString() + " / "
+ dft.format(datacenter.getHostEnergyMap().get(host)).toString());
at.addRule();
if (host.getVideoCardAllocationPolicy() != null) {
for (PowerVideoCard videoCard : (List<PowerVideoCard>) host.getVideoCardAllocationPolicy()
.getVideoCards()) {
depth = "#" + videoCard.getId() + " / " + depth;
at.addRow("Video Card " + depth,
dft.format(datacenter.getHostVideoCardEnergyMap().get(host).get(videoCard)).toString()
+ " / " + dft.format(datacenter.getHostEnergyMap().get(host)).toString());
at.addRule();
}
}
depth = "#" + datacenter.getId();
}
}
at.getContext().setWidth(100);
Log.printLine(at.render());
}
}
补充
// PCI Express Bus Bw Provisioner Bw提供者
VideoCardBwProvisioner videoCardBwProvisioner = new VideoCardBwProvisionerShared(
BusTags.PCI_E_3_X16_BW);
# 这里仅仅只是设置了一个带宽而已
public VideoCardBwProvisionerShared(long bw) {
super(bw);
}
public VideoCardBwProvisioner(long bw) {
setBw(bw);
setPgpuBwMap(new HashMap<Pgpu, Long>());
setPgpuRequestedBwMap(new HashMap<Pgpu, Long>());
}
# 有意思的是:带宽分配也采用了缩放策略
protected void redistributeAllocatedBw() {
// calculating the scaling factor
long totalBw = getBw();
long totalRequestedBw = 0;
for (Long bw : getPgpuRequestedBwMap().values()) {
totalRequestedBw += bw;
}
final double scaleFactor = ((double) totalBw) / totalRequestedBw;
for (Pgpu pgpu : getPgpuRequestedBwMap().keySet()) {
long scaledBw = (long) (getPgpuRequestedBwMap().get(pgpu).longValue() * scaleFactor);
long requestedBw = getPgpuRequestedBwMap().get(pgpu);
if (scaledBw > requestedBw) {
getPgpuBwMap().put(pgpu, requestedBw);
} else {
getPgpuBwMap().put(pgpu, scaledBw);
}
}
}