GpuCloudSim示例三
这里仅出示出与示例二和示例一的区别
package org.cloudbus.cloudsim.examples.gpu;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.UtilizationModel;
import org.cloudbus.cloudsim.UtilizationModelFull;
import org.cloudbus.cloudsim.VmScheduler;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.gpu.BusTags;
import org.cloudbus.cloudsim.gpu.GpuCloudlet;
import org.cloudbus.cloudsim.gpu.GpuCloudletSchedulerTimeShared;
import org.cloudbus.cloudsim.gpu.GpuDatacenter;
import org.cloudbus.cloudsim.gpu.GpuDatacenterBroker;
import org.cloudbus.cloudsim.gpu.GpuHost;
import org.cloudbus.cloudsim.gpu.GpuHostTags;
import org.cloudbus.cloudsim.gpu.GpuTask;
import org.cloudbus.cloudsim.gpu.GpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.GpuVmAllocationPolicySimple;
import org.cloudbus.cloudsim.gpu.GpuVmTags;
import org.cloudbus.cloudsim.gpu.Pgpu;
import org.cloudbus.cloudsim.gpu.ResGpuTask;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.VgpuScheduler;
import org.cloudbus.cloudsim.gpu.VideoCard;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicy;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicyBreadthFirst;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVgpuTags;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVideoCardTags;
import org.cloudbus.cloudsim.gpu.hardware_assisted.PerformanceGridVgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.interference.InterferenceGpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.interference.models.InterferenceModel;
import org.cloudbus.cloudsim.gpu.interference.models.InterferenceModelGpuMemory;
import org.cloudbus.cloudsim.gpu.performance.PerformanceGpuHost;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModel;
import org.cloudbus.cloudsim.gpu.performance.models.PerformanceModelGpuConstant;
import org.cloudbus.cloudsim.gpu.provisioners.BwProvisionerRelaxed;
import org.cloudbus.cloudsim.gpu.provisioners.GpuBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.provisioners.GpuGddramProvisionerSimple;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisioner;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicy;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicyBreadthFirst;
import org.cloudbus.cloudsim.lists.VmList;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;
import de.vandermeer.asciitable.AsciiTable;
/**
* This example demonstrates the use of gpu package in simulations. <br>
* GPU virtualization mode: hardware-assisted <br>
* Performance Model: on <br>
* Interference Model: on <br>
* Power Model: off
* 这个例子演示了gpu包在模拟中的使用。< br >
*
* * GPU虚拟化模式:硬件辅助
*
* *性能模型:on
*
* *干扰模型:on
*
* *电源型号:关闭
*
* @author Ahmad Siavashi
*
*/
public class CloudSimGpuExample3 {
/** The cloudlet list. 任务列表*/
private static List<GpuCloudlet> cloudletList;
/** The vmlist. 虚拟机列表*/
private static List<GpuVm> vmlist;
/**
* The resolution in which progress in evaluated.
*/
private static double schedulingInterval = 20;
/**
* Creates main() to run this example.
*
* @param args
* the args
*/
@SuppressWarnings("unused")
public static void main(String[] args) {
Log.printLine("Starting CloudSimGpuExample3...");
try {
// number of cloud users 云用户数量
int num_user = 1;
Calendar calendar = Calendar.getInstance();
// trace events 事件追踪机制
boolean trace_flag = true;
// CloudSim initialization CloudSim初始化
CloudSim.init(num_user, calendar, trace_flag);
// Create one Datacenter 创建一个数据中心
GpuDatacenter datacenter = createDatacenter("Datacenter");
// Create one Broker 创建一个代理
GpuDatacenterBroker broker = createBroker("Broker");
int brokerId = broker.getId();
// Create a list to hold created VMs 创建一个列表保存虚拟机
vmlist = new ArrayList<GpuVm>();
// Create a list to hold issued Cloudlets 创建一个列表来保存已发出的Cloudlets
cloudletList = new ArrayList<GpuCloudlet>();
final int numCloudlets = 3;
// Create GpuCloudlets 创建一个GpuCloudlet任务
{
for (int i = 0; i < numCloudlets; i++) {
// Cloudlet properties Cloudlet 属性
int gpuCloudletId = i;
long length = (long) (400 * GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS);
long fileSize = 300;
long outputSize = 300;
int pesNumber = 1;
// 与之前类似
UtilizationModel cpuUtilizationModel = new UtilizationModelFull();
UtilizationModel ramUtilizationModel = new UtilizationModelFull();
UtilizationModel bwUtilizationModel = new UtilizationModelFull();
// GpuTask properties GpuTask属性
int gpuTaskId = i;
long taskLength = (long) (GridVideoCardTags.NVIDIA_K2_CARD_PE_MIPS * 150);
long taskInputSize = (i < 2 ? 2 : 1) * 1024;
long taskOutputSize = (i < 2 ? 2 : 1) * 1024;
long requestedGddramSize = (i < 2 ? 2 : 1) * 1024;
int numberOfBlocks = 2;
UtilizationModel gpuUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramUtilizationModel = new UtilizationModelFull();
UtilizationModel gddramBwUtilizationModel = new UtilizationModelFull();
GpuTask gpuTask = new GpuTask(gpuTaskId, taskLength, numberOfBlocks, taskInputSize, taskOutputSize,
requestedGddramSize, gpuUtilizationModel, gddramUtilizationModel, gddramBwUtilizationModel);
GpuCloudlet gpuCloudlet = new GpuCloudlet(gpuCloudletId, length, pesNumber, fileSize, outputSize,
cpuUtilizationModel, ramUtilizationModel, bwUtilizationModel, gpuTask, false);
gpuCloudlet.setUserId(brokerId);
cloudletList.add(gpuCloudlet);
}
}
final int numVMs = 2;
// Create VMs
{
for (int i = 0; i < numVMs; i++) {
int vmId = i;
// VM description
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
// image size (GB)
int size = 10;
// vm memory (GB)
int ram = 2;
long bw = 100;
// number of cpus
int pesNumber = 4;
// VMM name
String vmm = "vSphere";
// Create VM
GpuVm vm = new GpuVm(vmId, brokerId, mips, pesNumber, ram, bw, size, vmm, GpuVmTags.GPU_VM_CUSTOM,
new GpuCloudletSchedulerTimeShared());
// Interference Model 干扰模型
InterferenceModel<ResGpuTask> interferenceModel = new InterferenceModelGpuMemory();
// Create GpuTask Scheduler
GpuTaskSchedulerLeftover gpuTaskScheduler = new InterferenceGpuTaskSchedulerLeftover(
interferenceModel);
// Create a Vgpu
int vgpuId = i;
Vgpu vgpu = i < 1 ? GridVgpuTags.getK280Q(vgpuId, gpuTaskScheduler)
: GridVgpuTags.getK260Q(vgpuId, gpuTaskScheduler);
vm.setVgpu(vgpu);
vmlist.add(vm);
}
}
// Cloudlet-VM assignment 任务调度 调度到虚拟机上
cloudletList.get(0).setVmId(vmlist.get(0).getId());
cloudletList.get(1).setVmId(vmlist.get(0).getId());
cloudletList.get(2).setVmId(vmlist.get(1).getId());
// submit vm list to the broker
broker.submitVmList(vmlist);
// submit cloudlet list to the broker
broker.submitCloudletList(cloudletList);
// Disable Logs
Log.disable();
// Starts the simulation
CloudSim.startSimulation();
CloudSim.stopSimulation();
Log.enable();
// Print results when simulation is over
List<Cloudlet> newList = broker.getCloudletReceivedList();
printCloudletList(newList);
Log.printLine("CloudSimGpuExample3 finished!");
} catch (Exception e) {
e.printStackTrace();
Log.printLine("Unwanted errors happen");
}
}
/**
* Create a datacenter.
*
* @param name
* the name of the datacenter
*
* @return the datacenter
*/
private static GpuDatacenter createDatacenter(String name) {
// We need to create a list to store our machine 创建一个列表来存储我们的机器
List<GpuHost> hostList = new ArrayList<GpuHost>();
// Number of host's video cards 主机显卡个数
int numVideoCards = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_VIDEO_CARDS;
// To hold video cards 用来存放视频卡
List<VideoCard> videoCards = new ArrayList<VideoCard>(numVideoCards);
for (int videoCardId = 0; videoCardId < numVideoCards; videoCardId++) {
List<Pgpu> pgpus = new ArrayList<Pgpu>();
// Adding an NVIDIA K2 Card
double mips = GridVideoCardTags.NVIDIA_K2_CARD_PE_MIPS;
int gddram = GridVideoCardTags.NVIDIA_K2_CARD_GPU_MEM;
long bw = GridVideoCardTags.NVIDIA_K2_CARD_BW_PER_BUS;
for (int pgpuId = 0; pgpuId < GridVideoCardTags.NVIDIA_K2_CARD_GPUS; pgpuId++) {
List<Pe> pes = new ArrayList<Pe>();
for (int peId = 0; peId < GridVideoCardTags.NVIDIA_K2_CARD_GPU_PES; peId++) {
pes.add(new Pe(peId, new PeProvisionerSimple(mips)));
}
pgpus.add(
new Pgpu(pgpuId, pes, new GpuGddramProvisionerSimple(gddram), new GpuBwProvisionerShared(bw)));
}
// Pgpu selection policy Pgpu选择策略
PgpuSelectionPolicy pgpuSelectionPolicy = new PgpuSelectionPolicyBreadthFirst();
// Performance Model 性能模型
PerformanceModel<VgpuScheduler, Vgpu> performanceModel = new PerformanceModelGpuConstant(0.1);
// Scheduler 调度器
VgpuScheduler vgpuScheduler = new PerformanceGridVgpuSchedulerFairShare(
GridVideoCardTags.NVIDIA_K2_CARD, pgpus, pgpuSelectionPolicy, performanceModel);
// PCI Express Bus Bw Provisioner PCI快速总线Bw供应器
VideoCardBwProvisioner videoCardBwProvisioner = new VideoCardBwProvisionerShared(
BusTags.PCI_E_3_X16_BW);
// Create a video card 生产一个显卡
VideoCard videoCard = new VideoCard(videoCardId, GridVideoCardTags.NVIDIA_K2_CARD, vgpuScheduler,
videoCardBwProvisioner);
videoCards.add(videoCard);
}
// Create a host 创建一个主机
int hostId = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3;
// A Machine contains one or more PEs or CPUs/Cores. 一台机器包含一个或多个pe或cpu /核心。
List<Pe> peList = new ArrayList<Pe>();
// PE's MIPS power PE的MIPS力量
double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_PES; peId++) {
// Create PEs and add these into a list. 创建pe并将其添加到列表中。
peList.add(new Pe(0, new PeProvisionerSimple(mips)));
}
// Create Host with its id and list of PEs and add them to the list of machines
// 创建主机及其id和pe列表,并将它们添加到机器列表中
// host memory (MB)
// 主机调度
int ram = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_RAM;
// host storage
// 主机存储
long storage = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_STORAGE;
// host BW
// 主机带宽
int bw = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_BW;
// Set VM Scheduler
// 设置VM调度器
VmScheduler vmScheduler = new VmSchedulerTimeShared(peList);
// Video Card Selection Policy
// 显卡调度政策
VideoCardAllocationPolicy videoCardAllocationPolicy = new VideoCardAllocationPolicyBreadthFirst(videoCards);
PerformanceGpuHost newHost = new PerformanceGpuHost(hostId, GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3,
new RamProvisionerSimple(ram), new BwProvisionerRelaxed(bw), storage, peList, vmScheduler,
videoCardAllocationPolicy);
hostList.add(newHost);
// Create a DatacenterCharacteristics object that stores the
// properties of a data center: architecture, OS, list of
// Machines, allocation policy: time- or space-shared, time zone
// and its price (G$/Pe time unit).
// system architecture
// 创建一个DatacenterCharacteristics对象,用于存储
// 数据中心的属性:架构,操作系统,列表
// 机器,分配策略:时间或空间共享,时区
// 及其价格(G$/Pe时间单位)。
// 系统架构
String arch = "x86";
// operating system
String os = "Linux";
// VM Manager
String vmm = "Horizen";
// time zone this resource located (Tehran)
double time_zone = +3.5;
// the cost of using processing in this resource
double cost = 0.0;
// the cost of using memory in this resource
double costPerMem = 0.00;
// the cost of using storage in this resource
double costPerStorage = 0.000;
// the cost of using bw in this resource
double costPerBw = 0.0;
// we are not adding SAN devices by now
LinkedList<Storage> storageList = new LinkedList<Storage>();
DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
cost, costPerMem, costPerStorage, costPerBw);
// We need to create a Datacenter object.
GpuDatacenter datacenter = null;
try {
datacenter = new GpuDatacenter(name, characteristics, new GpuVmAllocationPolicySimple(hostList),
storageList, schedulingInterval);
} catch (Exception e) {
e.printStackTrace();
}
return datacenter;
}
/**
* Creates the broker.
*
* * @param name the name
*
* @return the datacenter broker
*/
private static GpuDatacenterBroker createBroker(String name) {
GpuDatacenterBroker broker = null;
try {
broker = new GpuDatacenterBroker(name);
} catch (Exception e) {
e.printStackTrace();
return null;
}
return broker;
}
/**
* Prints the GpuCloudlet objects.
*
* @param list
* list of GpuCloudlets
*/
private static void printCloudletList(List<Cloudlet> gpuCloudlets) {
Log.printLine(String.join("", Collections.nCopies(100, "-")));
DecimalFormat dft = new DecimalFormat("###.##");
for (GpuCloudlet gpuCloudlet : (List<GpuCloudlet>) (List<?>) gpuCloudlets) {
// Cloudlet
AsciiTable at = new AsciiTable();
at.addRule();
at.addRow("Cloudlet ID", "Status", "Datacenter ID", "VM ID", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuCloudlet.getCloudletStatus() == Cloudlet.SUCCESS) {
at.addRow(gpuCloudlet.getCloudletId(), "SUCCESS", gpuCloudlet.getResourceId(), gpuCloudlet.getVmId(),
dft.format(gpuCloudlet.getActualCPUTime()).toString(),
dft.format(gpuCloudlet.getExecStartTime()).toString(),
dft.format(gpuCloudlet.getFinishTime()).toString());
at.addRule();
}
GpuTask gpuTask = gpuCloudlet.getGpuTask();
// Host-Device Memory Transfer
AsciiTable atMT = new AsciiTable();
atMT.addRule();
atMT.addRow("Direction", "Time", "Start Time", "End Time");
atMT.addRule();
atMT.addRow("H2D", dft.format(gpuTask.getMemoryTransferHostToDevice().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().startTime).toString(),
dft.format(gpuTask.getMemoryTransferHostToDevice().endTime).toString());
atMT.addRule();
// Gpu Task
at.addRow("Task ID", "Cloudlet ID", "Status", "vGPU Profile", "Time", "Start Time", "Finish Time");
at.addRule();
if (gpuTask.getTaskStatus() == GpuTask.FINISHED) {
at.addRow(gpuTask.getTaskId(), gpuTask.getCloudlet().getCloudletId(), "SUCCESS",
GridVgpuTags.getVgpuTypeString(
((GpuVm) VmList.getById(vmlist, gpuTask.getCloudlet().getVmId())).getVgpu().getType()),
dft.format(gpuTask.getActualGPUTime()).toString(),
dft.format(gpuTask.getExecStartTime()).toString(),
dft.format(gpuTask.getFinishTime()).toString());
at.addRule();
}
// Device-Host Memory Transfer
atMT.addRow("D2H", dft.format(gpuTask.getMemoryTransferDeviceToHost().getTime()).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().startTime).toString(),
dft.format(gpuTask.getMemoryTransferDeviceToHost().endTime).toString());
atMT.addRule();
at.getContext().setWidth(100);
atMT.getContext().setWidth(100);
Log.printLine(at.render());
Log.printLine(atMT.render());
Log.printLine(String.join("", Collections.nCopies(100, "-")));
}
}
}
补充说明
# PerformanceModel<VgpuScheduler, Vgpu> performanceModel = new
# PerformanceModelGpuConstant(0.1);
# 对非直通施加了持续的性能下降 应该是一个[0,1]区间的数字。
"""
{@link PerformanceModelGpuConstant}强加了一个恒定的性能
*退化所有vgpu;无论它们是否居住在同一个Pgpu上。
*然而,直通vgpu能够获得完整的性能
底层Pgpu *。
即每次衰弱0.1 即vgpu通过pgpu进行映射会造成衰弱
"""
public PerformanceModelGpuConstant(double performanceLoss) {
this.gain = 1 - performanceLoss;
}