GpuCloudSim示例二

GpuCloudSim示例二

该实例中去掉了示例一中已经有的部分,将会着重介绍示例一中没有的部分

package org.cloudbus.cloudsim.examples.gpu;

import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;

import org.cloudbus.cloudsim.Cloudlet;
import org.cloudbus.cloudsim.DatacenterCharacteristics;
import org.cloudbus.cloudsim.Log;
import org.cloudbus.cloudsim.Pe;
import org.cloudbus.cloudsim.Storage;
import org.cloudbus.cloudsim.UtilizationModel;
import org.cloudbus.cloudsim.UtilizationModelFull;
import org.cloudbus.cloudsim.VmScheduler;
import org.cloudbus.cloudsim.VmSchedulerTimeShared;
import org.cloudbus.cloudsim.core.CloudSim;
import org.cloudbus.cloudsim.gpu.BusTags;
import org.cloudbus.cloudsim.gpu.GpuCloudlet;
import org.cloudbus.cloudsim.gpu.GpuCloudletSchedulerTimeShared;
import org.cloudbus.cloudsim.gpu.GpuDatacenter;
import org.cloudbus.cloudsim.gpu.GpuDatacenterBroker;
import org.cloudbus.cloudsim.gpu.GpuHost;
import org.cloudbus.cloudsim.gpu.GpuHostTags;
import org.cloudbus.cloudsim.gpu.GpuTask;
import org.cloudbus.cloudsim.gpu.GpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.GpuVm;
import org.cloudbus.cloudsim.gpu.GpuVmAllocationPolicySimple;
import org.cloudbus.cloudsim.gpu.GpuVmTags;
import org.cloudbus.cloudsim.gpu.Pgpu;
import org.cloudbus.cloudsim.gpu.ResGpuTask;
import org.cloudbus.cloudsim.gpu.Vgpu;
import org.cloudbus.cloudsim.gpu.VgpuScheduler;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVgpuSchedulerFairShare;
import org.cloudbus.cloudsim.gpu.VideoCard;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicy;
import org.cloudbus.cloudsim.gpu.allocation.VideoCardAllocationPolicyBreadthFirst;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVgpuTags;
import org.cloudbus.cloudsim.gpu.hardware_assisted.GridVideoCardTags;
import org.cloudbus.cloudsim.gpu.interference.InterferenceGpuTaskSchedulerLeftover;
import org.cloudbus.cloudsim.gpu.interference.models.InterferenceModel;
import org.cloudbus.cloudsim.gpu.interference.models.InterferenceModelGpuMemory;
import org.cloudbus.cloudsim.gpu.provisioners.BwProvisionerRelaxed;
import org.cloudbus.cloudsim.gpu.provisioners.GpuBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.provisioners.GpuGddramProvisionerSimple;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisioner;
import org.cloudbus.cloudsim.gpu.provisioners.VideoCardBwProvisionerShared;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicy;
import org.cloudbus.cloudsim.gpu.selection.PgpuSelectionPolicyBreadthFirst;
import org.cloudbus.cloudsim.lists.VmList;
import org.cloudbus.cloudsim.provisioners.PeProvisionerSimple;
import org.cloudbus.cloudsim.provisioners.RamProvisionerSimple;

import de.vandermeer.asciitable.AsciiTable;

/**
 * This example demonstrates the use of gpu package in simulations. <br>
 * GPU virtualization mode: hardware-assisted <br>
 * Performance Model: off <br>
 * Interference Model: on <br>
 * Power Model: off
 * 这个例子演示了gpu包在模拟中的使用。< br >
 * GPU虚拟化模式:硬件辅助
 * 性能模型:off
 * 干扰模式:on
 * 电源型号:关闭
 * 
 * @author Ahmad Siavashi
 * 
 */
public class CloudSimGpuExample2 {
	/** The cloudlet list.   任务列表*/
	private static List<GpuCloudlet> cloudletList;
	/** The vmlist.    虚拟机列表*/
	private static List<GpuVm> vmlist;
	/** number of VMs.  虚拟机数量 */
	private static int numVms = 1;
	/** number of gpuCloudlets */
	private static int numGpuCloudlets = 2;
	/**
	 * The resolution in which progress in evaluated.
	 * 评估进展的决议。
	 */
	private static double schedulingInterval = 20;

	/**
	 * Creates main() to run this example.
	 * 
	 * @param args
	 *            the args
	 */
	@SuppressWarnings("unused")
	public static void main(String[] args) {
		Log.printLine("Starting CloudSimGpuExample2...");

		try {
			// number of cloud users   云用户的数量
			int num_user = 1;
			Calendar calendar = Calendar.getInstance();
			// trace events   时间跟踪机制
			boolean trace_flag = true;

			// CloudSim initialization   初始化
			CloudSim.init(num_user, calendar, trace_flag);

			// Create one Datacenter   创建诗数据中心
			GpuDatacenter datacenter = createDatacenter("Datacenter");

			// Create one Broker    创建一个数据中心代理
			GpuDatacenterBroker broker = createBroker("Broker");
			int brokerId = broker.getId();

			// Create a list to hold created VMs  创建一个列表来保存虚拟机
			vmlist = new ArrayList<GpuVm>();
			// Create a list to hold issued Cloudlets
			cloudletList = new ArrayList<GpuCloudlet>();

			// Create VMs   创建虚拟机
			for (int i = 0; i < numVms; i++) {
				int vmId = i;
				int vgpuId = i;
				// Create a VM  创建一个虚拟机
				GpuVm vm = createGpuVm(vmId, vgpuId, brokerId);
				// add the VM to the vmList
				vmlist.add(vm);
			}

			// Create gpuCloudlets   创建一个gpu Cloudlet
			for (int i = 0; i < numGpuCloudlets; i++) {
				int gpuCloudletId = i;
				int gpuTaskId = i;
				// Create Cloudlet  创建Cloudlet
				GpuCloudlet cloudlet = createGpuCloudlet(gpuCloudletId, gpuTaskId, brokerId);
				// add the cloudlet to the list   增加cloudlet到列表中
				cloudletList.add(cloudlet);
			}

			for (int i = 0; i < numGpuCloudlets; i++) {
				// Cloudlet-VM assignment   任务调度
				GpuCloudlet cloudlet = cloudletList.get(i);
				cloudlet.setVmId(i % numVms);
			}

			// submit vm list to the broker  提交虚拟机
			broker.submitVmList(vmlist);

			// submit cloudlet list to the broker 把任务列表提交给代理
			broker.submitCloudletList(cloudletList);

			// Disable Logs 
			Log.disable();
			// Starts the simulation
			CloudSim.startSimulation();

			CloudSim.stopSimulation();
			Log.enable();

			// Print results when simulation is over  打印出仿真结果
			List<Cloudlet> newList = broker.getCloudletReceivedList();
			printCloudletList(newList);

			Log.printLine("CloudSimGpuExample2 finished!");
		} catch (Exception e) {
			e.printStackTrace();
			Log.printLine("Unwanted errors happen");
		}
	}

	/**
	 * Create a GpuCloudlet
	 * 
	 * @param gpuCloudletId
	 *            gpuCloudlet id
	 * @param gpuTaskId
	 *            gpuCloudlet's gpuTask id
	 * @param brokerId
	 *            the broker to which the gpuCloudlet belongs
	 * @return the gpuCloudlet
	 */
	private static GpuCloudlet createGpuCloudlet(int gpuCloudletId, int gpuTaskId, int brokerId) {
		// Cloudlet properties   与示例一一样
		long length = (long) (400 * GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS);
		long fileSize = 300;
		long outputSize = 300;
		int pesNumber = 1;
		UtilizationModel cpuUtilizationModel = new UtilizationModelFull();
		UtilizationModel ramUtilizationModel = new UtilizationModelFull();
		UtilizationModel bwUtilizationModel = new UtilizationModelFull();

		// GpuTask properties
		long taskLength = (long) (GridVideoCardTags.NVIDIA_K2_CARD_PE_MIPS * 150);
		long taskInputSize = 2 * 1024;
		long taskOutputSize = 2 * 1024;
		long requestedGddramSize = 2 * 1024;
		int numberOfBlocks = 2;
		UtilizationModel gpuUtilizationModel = new UtilizationModelFull();
		UtilizationModel gddramUtilizationModel = new UtilizationModelFull();
		UtilizationModel gddramBwUtilizationModel = new UtilizationModelFull();

		GpuTask gpuTask = new GpuTask(gpuTaskId, taskLength, numberOfBlocks, taskInputSize, taskOutputSize,
				requestedGddramSize, gpuUtilizationModel, gddramUtilizationModel, gddramBwUtilizationModel);

		GpuCloudlet gpuCloudlet = new GpuCloudlet(gpuCloudletId, length, pesNumber, fileSize, outputSize,
				cpuUtilizationModel, ramUtilizationModel, bwUtilizationModel, gpuTask, false);

		gpuCloudlet.setUserId(brokerId);
		return gpuCloudlet;
	}

	/**
	 * Create a GpuVM
	 * 
	 * @param vmId
	 *            vm id
	 * @param vgpuId
	 *            vm's vgpu id
	 * @param brokerId
	 *            the broker to which this vm belongs
	 * @return the GpuVm
	 */
	private static GpuVm createGpuVm(int vmId, int vgpuId, int brokerId) {
		// VM description
		double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;
		// image size (GB)
		int size = 10;
		// vm memory (GB)
		int ram = 2;
		long bw = 100;
		// number of cpus
		int pesNumber = 4;
		// VMM name
		String vmm = "vSphere";

		// Create VM
		GpuVm vm = new GpuVm(vmId, brokerId, mips, pesNumber, ram, bw, size, vmm, GpuVmTags.GPU_VM_CUSTOM,
				new GpuCloudletSchedulerTimeShared());
		// Interference Model  干扰模型
		InterferenceModel<ResGpuTask> interferenceModel = new InterferenceModelGpuMemory();
		// Create GpuTask Scheduler  创建GpuTask调度
		GpuTaskSchedulerLeftover gpuTaskScheduler = new InterferenceGpuTaskSchedulerLeftover(interferenceModel);
		// Create a Vgpu  创建一个Vgpu
		Vgpu vgpu = GridVgpuTags.getK280Q(vgpuId, gpuTaskScheduler);
		vm.setVgpu(vgpu);
		return vm;
	}

	/**
	 * Create a datacenter.
	 * 
	 * @param name
	 *            the name of the datacenter
	 * 
	 * @return the datacenter
	 */
	private static GpuDatacenter createDatacenter(String name) {
		// We need to create a list to store our machine  
		List<GpuHost> hostList = new ArrayList<GpuHost>();
		// Number of host's video cards
		int numVideoCards = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_VIDEO_CARDS;
		// To hold video cards
		List<VideoCard> videoCards = new ArrayList<VideoCard>(numVideoCards);
		for (int videoCardId = 0; videoCardId < numVideoCards; videoCardId++) {
			List<Pgpu> pgpus = new ArrayList<Pgpu>();
			// Adding an NVIDIA K2 Card
			double mips = GridVideoCardTags.NVIDIA_K2_CARD_PE_MIPS;
			int gddram = GridVideoCardTags.NVIDIA_K2_CARD_GPU_MEM;
			long bw = GridVideoCardTags.NVIDIA_K2_CARD_BW_PER_BUS;
			for (int pgpuId = 0; pgpuId < GridVideoCardTags.NVIDIA_K2_CARD_GPUS; pgpuId++) {
				List<Pe> pes = new ArrayList<Pe>();
				for (int peId = 0; peId < GridVideoCardTags.NVIDIA_K2_CARD_GPU_PES; peId++) {
					pes.add(new Pe(peId, new PeProvisionerSimple(mips)));
				}
				pgpus.add(
						new Pgpu(pgpuId, pes, new GpuGddramProvisionerSimple(gddram), new GpuBwProvisionerShared(bw)));
			}
			// Pgpu selection policy   gpu分配策略
			PgpuSelectionPolicy pgpuSelectionPolicy = new PgpuSelectionPolicyBreadthFirst();
			// Scheduler  调度
			VgpuScheduler vgpuScheduler = new GridVgpuSchedulerFairShare(GridVideoCardTags.NVIDIA_K2_CARD,
					pgpus, pgpuSelectionPolicy);
			// PCI Express Bus Bw Provisioner  PCI快速总线Bw供应器
			VideoCardBwProvisioner videoCardBwProvisioner = new VideoCardBwProvisionerShared(
					BusTags.PCI_E_3_X16_BW);
			// Create a video card
			VideoCard videoCard = new VideoCard(videoCardId, GridVideoCardTags.NVIDIA_K2_CARD, vgpuScheduler,
					videoCardBwProvisioner);
			videoCards.add(videoCard);
		}

		// Create a host
		int hostId = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3;

		// A Machine contains one or more PEs or CPUs/Cores.
		List<Pe> peList = new ArrayList<Pe>();

		// PE's MIPS power
		double mips = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_PE_MIPS;

		for (int peId = 0; peId < GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_NUM_PES; peId++) {
			// Create PEs and add these into a list.
			peList.add(new Pe(0, new PeProvisionerSimple(mips)));
		}

		// Create Host with its id and list of PEs and add them to the list of machines
        // 创建主机及其id和pe列表,并将它们添加到机器列表中
		// host memory (MB)
		int ram = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_RAM;
		// host storage
		long storage = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_STORAGE;
		// host BW
		int bw = GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3_BW;
		// Set VM Scheduler
		VmScheduler vmScheduler = new VmSchedulerTimeShared(peList);
		// Video Card Selection Policy
		VideoCardAllocationPolicy videoCardAllocationPolicy = new VideoCardAllocationPolicyBreadthFirst(videoCards);
		GpuHost newHost = new GpuHost(hostId, GpuHostTags.DUAL_INTEL_XEON_E5_2620_V3, new RamProvisionerSimple(ram),
				new BwProvisionerRelaxed(bw), storage, peList, vmScheduler, videoCardAllocationPolicy);
		hostList.add(newHost);

		// Create a DatacenterCharacteristics object that stores the
		// properties of a data center: architecture, OS, list of
		// Machines, allocation policy: time- or space-shared, time zone
		// and its price (G$/Pe time unit).
		// system architecture
        //创建一个DatacenterCharacteristics对象,用于存储
		//数据中心的属性:架构,操作系统,列表
		//机器,分配策略:时间或空间共享,时区
		//及其价格(G$/Pe时间单位)。
		//系统架构
		String arch = "x86";
		// operating system
		String os = "Linux";
		// VM Manager
		String vmm = "Horizen";
		// time zone this resource located (Tehran)
		double time_zone = +3.5;
		// the cost of using processing in this resource
		double cost = 0.0;
		// the cost of using memory in this resource
		double costPerMem = 0.00;
		// the cost of using storage in this resource
		double costPerStorage = 0.000;
		// the cost of using bw in this resource
		double costPerBw = 0.0;
		// we are not adding SAN devices by now
		LinkedList<Storage> storageList = new LinkedList<Storage>();

		DatacenterCharacteristics characteristics = new DatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
				cost, costPerMem, costPerStorage, costPerBw);

		// We need to create a Datacenter object.
		GpuDatacenter datacenter = null;
		try {
			datacenter = new GpuDatacenter(name, characteristics, new GpuVmAllocationPolicySimple(hostList),
					storageList, schedulingInterval);
		} catch (Exception e) {
			e.printStackTrace();
		}

		return datacenter;
	}

	/**
	 * Create a broker.
	 * 
	 * * @param name the name of the broker
	 * 
	 * @return the datacenter broker
	 */
	private static GpuDatacenterBroker createBroker(String name) {
		GpuDatacenterBroker broker = null;
		try {
			broker = new GpuDatacenterBroker(name);
		} catch (Exception e) {
			e.printStackTrace();
			return null;
		}
		return broker;
	}

	/**
	 * Prints the GpuCloudlet objects.
	 * 
	 * @param list
	 *            list of GpuCloudlets
	 */
	private static void printCloudletList(List<Cloudlet> gpuCloudlets) {
		Log.printLine(String.join("", Collections.nCopies(100, "-")));
		DecimalFormat dft = new DecimalFormat("###.##");
		for (GpuCloudlet gpuCloudlet : (List<GpuCloudlet>) (List<?>) gpuCloudlets) {
			// Cloudlet
			AsciiTable at = new AsciiTable();
			at.addRule();
			at.addRow("Cloudlet ID", "Status", "Datacenter ID", "VM ID", "Time", "Start Time", "Finish Time");
			at.addRule();
			if (gpuCloudlet.getCloudletStatus() == Cloudlet.SUCCESS) {
				at.addRow(gpuCloudlet.getCloudletId(), "SUCCESS", gpuCloudlet.getResourceId(), gpuCloudlet.getVmId(),
						dft.format(gpuCloudlet.getActualCPUTime()).toString(),
						dft.format(gpuCloudlet.getExecStartTime()).toString(),
						dft.format(gpuCloudlet.getFinishTime()).toString());
				at.addRule();
			}
			GpuTask gpuTask = gpuCloudlet.getGpuTask();
			// Host-Device Memory Transfer
			AsciiTable atMT = new AsciiTable();
			atMT.addRule();
			atMT.addRow("Direction", "Time", "Start Time", "End Time");
			atMT.addRule();
			atMT.addRow("H2D", dft.format(gpuTask.getMemoryTransferHostToDevice().getTime()).toString(),
					dft.format(gpuTask.getMemoryTransferHostToDevice().startTime).toString(),
					dft.format(gpuTask.getMemoryTransferHostToDevice().endTime).toString());
			atMT.addRule();
			// Gpu Task
			at.addRow("Task ID", "Cloudlet ID", "Status", "vGPU Profile", "Time", "Start Time", "Finish Time");
			at.addRule();
			if (gpuTask.getTaskStatus() == GpuTask.FINISHED) {
				at.addRow(gpuTask.getTaskId(), gpuTask.getCloudlet().getCloudletId(), "SUCCESS",
						GridVgpuTags.getVgpuTypeString(
								((GpuVm) VmList.getById(vmlist, gpuTask.getCloudlet().getVmId())).getVgpu().getType()),
						dft.format(gpuTask.getActualGPUTime()).toString(),
						dft.format(gpuTask.getExecStartTime()).toString(),
						dft.format(gpuTask.getFinishTime()).toString());
				at.addRule();
			}
			// Device-Host Memory Transfer  Device-Host记忆转移
			atMT.addRow("D2H", dft.format(gpuTask.getMemoryTransferDeviceToHost().getTime()).toString(),
					dft.format(gpuTask.getMemoryTransferDeviceToHost().startTime).toString(),
					dft.format(gpuTask.getMemoryTransferDeviceToHost().endTime).toString());
			atMT.addRule();
			at.getContext().setWidth(100);
			atMT.getContext().setWidth(100);
			Log.printLine(at.render());
			Log.printLine(atMT.render());
			Log.printLine(String.join("", Collections.nCopies(100, "-")));
		}
	}
}

补充说明

# 这里与上面最大的变化,显然就是增加了干扰模型:所以我也只关注了干扰模型:
# 对于Vgpu,使用了GpuTask运行时的GddRam带宽来评估进程间的干扰
# 感觉这里存在一个提要:你的利用率模型必须提前先约定好,也就是说,如果你要实现干扰,你必须先给予一个利用率模型,而通过第一个示例,我们已经知道了,利用率为,所以我们这里就可以很好的发现代码是如何实现干扰的
	public InterferenceModelGpuMemory() {
	}

	@Override
	public List<Double> getAvailableMips(ResGpuTask rcl, List<Double> mipsShare, List<ResGpuTask> execList) {
		List<Double> allocatedMips = new ArrayList<Double>(Collections.nCopies(mipsShare.size(), 0.0));
		for (Integer peId : rcl.getPeIdList()) {
			allocatedMips.set(peId, mipsShare.get(peId));
		}
        # 首先其初始化一个内存的总利用率
		double totalMemoryUtilization = 0.0;
		for (ResGpuTask rgt : execList) {
            # 初始化总的内存利用率,该值中间会变成2
			totalMemoryUtilization += rgt.getGpuTask().getUtilizationOfGddram(CloudSim.clock());
		}
		if (totalMemoryUtilization <= 1) {
			return allocatedMips;
		}
        # 求出缩减幅度,如果利用率超过1的话
		double scaleFactor = 1.0 / totalMemoryUtilization;
        # 如果利用率超过1,则利用上面的缩减幅度减小
		for (int i = 0; i < allocatedMips.size(); i++) {
			allocatedMips.set(i, allocatedMips.get(i) * scaleFactor);
		}
		return allocatedMips;
	}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值