*Copyright2012-2013UniversityOfSouthernCalifornia**Licensed under the ApacheLicense,Version2.0(the "License"); you may not
* use this file except in compliance withtheLicense. You may obtain a copy of
* the License at
** http://www.apache.org/licenses/LICENSE-2.0**Unless required by applicable law or agreed toin writing, software
* distributed under the License is distributed on an "AS IS"BASIS,WITHOUT*WARRANTIESORCONDITIONSOFANYKIND, either express or implied. See the
*Licensefor the specific language governing permissions and limitations under
* the License.*/packageorg.workflowsim.examples;importjava.io.File;importjava.text.DecimalFormat;importjava.util.ArrayList;importjava.util.Calendar;importjava.util.LinkedList;importjava.util.List;importorg.cloudbus.cloudsim.Cloudlet;importorg.cloudbus.cloudsim.CloudletSchedulerSpaceShared;importorg.cloudbus.cloudsim.DatacenterCharacteristics;importorg.cloudbus.cloudsim.HarddriveStorage;importorg.cloudbus.cloudsim.Host;importorg.cloudbus.cloudsim.Log;importorg.cloudbus.cloudsim.Pe;importorg.cloudbus.cloudsim.Storage;importorg.cloudbus.cloudsim.VmAllocationPolicySimple;importorg.cloudbus.cloudsim.VmSchedulerTimeShared;importorg.cloudbus.cloudsim.core.CloudSim;importorg.cloudbus.cloudsim.provisioners.BwProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.PeProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.RamProvisionerSimple;importorg.workflowsim.CondorVM;importorg.workflowsim.Task;importorg.workflowsim.WorkflowDatacenter;importorg.workflowsim.Job;importorg.workflowsim.WorkflowEngine;importorg.workflowsim.WorkflowPlanner;importorg.workflowsim.utils.ClusteringParameters;importorg.workflowsim.utils.OverheadParameters;importorg.workflowsim.utils.Parameters;importorg.workflowsim.utils.ReplicaCatalog;importorg.workflowsim.utils.Parameters.ClassType;/**
* This WorkflowSimExample creates a workflow planner, a workflow engine, and
* one schedulers, one data centers and 20 vms. You should change daxPath at
* least. You may change other parameters as well.
*
* @author Weiwei Chen
* @since WorkflowSim Toolkit 1.0
* @date Apr 9, 2013
*/publicclassWorkflowSimBasicExample1{protectedstaticList<CondorVM>createVM(int userId,int vms){//Creates a container to store VMs. This list is passed to the broker laterLinkedList<CondorVM> list =newLinkedList<>();//VM Parameterslong size =10000;//image size (MB)int ram =512;//vm memory (MB)int mips =1000;long bw =1000;int pesNumber =1;//number of cpusString vmm ="Xen";//VMM name//create VMsCondorVM[] vm =newCondorVM[vms];for(int i =0; i < vms; i++){double ratio =1.0;
vm[i]=newCondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm,newCloudletSchedulerSpaceShared());
list.add(vm[i]);}return list;}// STATIC METHODS ////**
* Creates main() to run this example This example has only one datacenter
* and one storage
*/publicstaticvoidmain(String[] args){try{// First step: Initialize the WorkflowSim package. /**
* However, the exact number of vms may not necessarily be vmNum If
* the data center or the host doesn't have sufficient resources the
* exact vmNum would be smaller than that. Take care.
*/int vmNum =20;//number of vms;/**
* Should change this based on real physical path
*///String daxPath = "/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";String daxPath ="F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";File daxFile =newFile(daxPath);if(!daxFile.exists()){Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");return;}/**
* Since we are using MINMIN scheduling algorithm, the planning
* algorithm should be INVALID such that the planner would not
* override the result of the scheduler
*/Parameters.SchedulingAlgorithm sch_method =Parameters.SchedulingAlgorithm.MINMIN;Parameters.PlanningAlgorithm pln_method =Parameters.PlanningAlgorithm.INVALID;ReplicaCatalog.FileSystem file_system =ReplicaCatalog.FileSystem.SHARED;/**
* No overheads
*/OverheadParameters op =newOverheadParameters(0,null,null,null,null,0);/**
* No Clustering
*/ClusteringParameters.ClusteringMethod method =ClusteringParameters.ClusteringMethod.NONE;ClusteringParameters cp =newClusteringParameters(0,0, method,null);/**
* Initialize static parameters
*/Parameters.init(vmNum, daxPath,null,null, op, cp, sch_method, pln_method,null,0);ReplicaCatalog.init(file_system);// before creating any entities.int num_user =1;// number of grid usersCalendar calendar =Calendar.getInstance();boolean trace_flag =false;// mean trace events// Initialize the CloudSim libraryCloudSim.init(num_user, calendar, trace_flag);WorkflowDatacenter datacenter0 =createDatacenter("Datacenter_0");/**
* Create a WorkflowPlanner with one schedulers.
*/WorkflowPlanner wfPlanner =newWorkflowPlanner("planner_0",1);/**
* Create a WorkflowEngine.
*/WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();/**
* Create a list of VMs.The userId of a vm is basically the id of
* the scheduler that controls this vm.
*/List<CondorVM> vmlist0 =createVM(wfEngine.getSchedulerId(0),Parameters.getVmNum());/**
* Submits this list of vms to this WorkflowEngine.
*/
wfEngine.submitVmList(vmlist0,0);/**
* Binds the data centers with the scheduler.
*/
wfEngine.bindSchedulerDatacenter(datacenter0.getId(),0);CloudSim.startSimulation();List<Job> outputList0 = wfEngine.getJobsReceivedList();CloudSim.stopSimulation();printJobList(outputList0);}catch(Exception e){Log.printLine("The simulation has been terminated due to an unexpected error");}}protectedstaticWorkflowDatacentercreateDatacenter(String name){// Here are the steps needed to create a PowerDatacenter:// 1. We need to create a list to store one or more// MachinesList<Host> hostList =newArrayList<>();// 2. A Machine contains one or more PEs or CPUs/Cores. Therefore, should// create a list to store these PEs before creating// a Machine.for(int i =1; i <=20; i++){List<Pe> peList1 =newArrayList<>();int mips =2000;// 3. Create PEs and add these into the list.//for a quad-core machine, a list of 4 PEs is required:
peList1.add(newPe(0,newPeProvisionerSimple(mips)));// need to store Pe id and MIPS Rating
peList1.add(newPe(1,newPeProvisionerSimple(mips)));int hostId =0;int ram =2048;//host memory (MB)long storage =1000000;//host storageint bw =10000;
hostList.add(newHost(
hostId,newRamProvisionerSimple(ram),newBwProvisionerSimple(bw),
storage,
peList1,newVmSchedulerTimeShared(peList1)));// This is our first machine//hostId++;}// 4. Create a DatacenterCharacteristics object that stores the// properties of a data center: architecture, OS, list of// Machines, allocation policy: time- or space-shared, time zone// and its price (G$/Pe time unit).String arch ="x86";// system architectureString os ="Linux";// operating systemString vmm ="Xen";double time_zone =10.0;// time zone this resource locateddouble cost =3.0;// the cost of using processing in this resourcedouble costPerMem =0.05;// the cost of using memory in this resourcedouble costPerStorage =0.1;// the cost of using storage in this resourcedouble costPerBw =0.1;// the cost of using bw in this resourceLinkedList<Storage> storageList =newLinkedList<>();//we are not adding SAN devices by nowWorkflowDatacenter datacenter =null;DatacenterCharacteristics characteristics =newDatacenterCharacteristics(
arch, os, vmm, hostList, time_zone, cost, costPerMem, costPerStorage, costPerBw);// 5. Finally, we need to create a storage object./**
* The bandwidth within a data center in MB/s.
*/int maxTransferRate =15;// the number comes from the futuregrid site, you can specify your bwtry{// Here we set the bandwidth to be 15MB/sHarddriveStorage s1 =newHarddriveStorage(name,1e12);
s1.setMaxTransferRate(maxTransferRate);
storageList.add(s1);
datacenter =newWorkflowDatacenter(name, characteristics,newVmAllocationPolicySimple(hostList), storageList,0);}catch(Exception e){
e.printStackTrace();}return datacenter;}/**
* Prints the job objects
*
* @param list list of jobs
*/protectedstaticvoidprintJobList(List<Job> list){String indent =" ";Log.printLine();Log.printLine("========== OUTPUT ==========");Log.printLine("Job ID"+ indent +"Task ID"+ indent +"STATUS"+ indent
+"Data center ID"+ indent +"VM ID"+ indent + indent
+"Time"+ indent +"Start Time"+ indent +"Finish Time"+ indent +"Depth");DecimalFormat dft =newDecimalFormat("###.##");for(Job job : list){Log.print(indent + job.getCloudletId()+ indent + indent);if(job.getClassType()==ClassType.STAGE_IN.value){Log.print("Stage-in");}for(Task task : job.getTaskList()){Log.print(task.getCloudletId()+",");}Log.print(indent);if(job.getCloudletStatus()==Cloudlet.SUCCESS){Log.print("SUCCESS");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent + indent + indent + dft.format(job.getActualCPUTime())+ indent + indent + dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}elseif(job.getCloudletStatus()==Cloudlet.FAILED){Log.print("FAILED");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent + indent + indent + dft.format(job.getActualCPUTime())+ indent + indent + dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}}}}
packageorg.workflowsim.examples;importjava.io.File;importjava.text.DecimalFormat;importjava.util.ArrayList;importjava.util.Calendar;importjava.util.LinkedList;importjava.util.List;importorg.cloudbus.cloudsim.Cloudlet;importorg.cloudbus.cloudsim.CloudletSchedulerSpaceShared;importorg.cloudbus.cloudsim.DatacenterCharacteristics;importorg.cloudbus.cloudsim.HarddriveStorage;importorg.cloudbus.cloudsim.Host;importorg.cloudbus.cloudsim.Log;importorg.cloudbus.cloudsim.Pe;importorg.cloudbus.cloudsim.Storage;importorg.cloudbus.cloudsim.VmAllocationPolicySimple;importorg.cloudbus.cloudsim.VmSchedulerTimeShared;importorg.cloudbus.cloudsim.core.CloudSim;importorg.cloudbus.cloudsim.provisioners.BwProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.PeProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.RamProvisionerSimple;importorg.workflowsim.CondorVM;importorg.workflowsim.Task;importorg.workflowsim.WorkflowDatacenter;importorg.workflowsim.Job;importorg.workflowsim.WorkflowEngine;importorg.workflowsim.WorkflowPlanner;importorg.workflowsim.utils.ClusteringParameters;importorg.workflowsim.utils.OverheadParameters;importorg.workflowsim.utils.Parameters;importorg.workflowsim.utils.ReplicaCatalog;importorg.workflowsim.utils.Parameters.ClassType;/**
这个WorkflowSimExample 创建了
一个工作流计划器(workflow planner)、
一个工作流引擎(workflow engine)、
一个调度器(schedulers)、
一个数据中心(data centers)、
20个虚拟机(20 vms)。
您至少应该更改daxPath。您还可以更改其他参数。
*/publicclassWorkflowSimBasicExampleTest{protectedstaticList<CondorVM>createVM(int userId,int vms){//创建用于存放虚拟机的容器。该列表稍后被传递给代理LinkedList<CondorVM> list =newLinkedList<>();// 虚拟机参数(VM Parameters)long size =10000;// image size (MB) 划分的虚拟硬盘大小int ram =512;// vm memory (MB) 虚拟内存int mips =1000;// mips:Million Instructions Per Second 衡量cpu的性能指标long bw =1000;// bindwidth:带宽 任务到任务的传输成本int pesNumber =1;// number of cpus cpu的数量String vmm ="Xen";// VMM name VMM: Virtual Machine Manager 虚拟机管理名称// create VMs 创建虚拟机集群CondorVM[] vm =newCondorVM[vms];// CondorVm扩展一个Vmfor(int i =0; i < vms; i++){double ratio =1.0;
vm[i]=newCondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm,newCloudletSchedulerSpaceShared());// cloudletschedulerspacesshared 实现了由虚拟机执行的调度策略 云任务调度时间共享,所有任务同时运行
list.add(vm[i]);}return list;}// STATIC METHODS ////**
创建main()以运行此示例。此示例只有一个数据中心(datacenter)和一个存储(storage)
*/publicstaticvoidmain(String[] args){try{// 第一步:初始化WorkflowSim包./**
但是,虚拟机的确切数量不一定是vmNum。如果数据中心或主机没有足够的资源,那么准确的vmNum可能会小于vmNum。当心
*/int vmNum =20;// number of vms; 虚拟机的数量/**
应该根据实际物理路径进行更改
*/// String daxPath ="/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";String daxPath ="F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";File daxFile =newFile(daxPath);if(!daxFile.exists()){Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");return;}/**
由于我们使用MINMIN调度算法(MINMIN scheduling algorithm),所以规划算法(planning algorithm)应该是INVALID,这样规划程序就不会覆盖调度程序的结果
*/Parameters.SchedulingAlgorithm sch_method =Parameters.SchedulingAlgorithm.MINMIN;Parameters.PlanningAlgorithm pln_method =Parameters.PlanningAlgorithm.INVALID;ReplicaCatalog.FileSystem file_system =ReplicaCatalog.FileSystem.SHARED;/**
* No overheads 费用开支
*/OverheadParameters op =newOverheadParameters(0,null,null,null,null,0);/**
* No Clustering 集群
*/ClusteringParameters.ClusteringMethod method =ClusteringParameters.ClusteringMethod.NONE;ClusteringParameters cp =newClusteringParameters(0,0, method,null);// 初始化静态参数Parameters.init(vmNum, daxPath,null,null, op, cp, sch_method, pln_method,null,0);ReplicaCatalog.init(file_system);// 在创建任何实体之前int num_user =1;// number of grid users 网格用户数Calendar calendar =Calendar.getInstance();// 日历boolean trace_flag =false;// mean trace events 平均跟踪事件// 初始化CloudSim库CloudSim.init(num_user, calendar, trace_flag);WorkflowDatacenter datacenter0 =createDatacenter("Datacenter_0");// 创建带有一个调度器的WorkflowPlanner工作流计划器。WorkflowPlanner wfPlanner =newWorkflowPlanner("planner_0",1);// 创建一个工作流引擎WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();// 创建虚拟机列表。虚拟机的userId基本上就是调度程序的id控制这个vmList<CondorVM> vmlist0 =createVM(wfEngine.getSchedulerId(0),Parameters.getVmNum());// 将这个虚拟机列表提交给这个WorkflowEngine
wfEngine.submitVmList(vmlist0,0);// 将数据中心(data centers)绑定到调度程序
wfEngine.bindSchedulerDatacenter(datacenter0.getId(),0);CloudSim.startSimulation();// 开始仿真List<Job> outputList0 = wfEngine.getJobsReceivedList();// 获取已接收的作业列表CloudSim.stopSimulation();printJobList(outputList0);}catch(Exception e){Log.printLine("The simulation has been terminated due to an unexpected error");}}protectedstaticWorkflowDatacentercreateDatacenter(String name){// 创建数据中心// 以下是创建PowerDatacenter所需的步骤:// 1. 我们需要创建一个列表来存储一个或多个machineList<Host> hostList =newArrayList<>();// 2. 一台机器包含一个或多个pe或内核/cpu(CPUs/Cores)。因此,应该在创建Machine之前创建一个列表来存储这些PE。// Pe代表cpu单元,字段:pe状态和分配策略peProvisioner 同一台主机所有pe有相同的处理能力for(int i =1; i <=20; i++){List<Pe> peList1 =newArrayList<>();int mips =2000;// 3. 创建PE并将其添加到列表中。// 对于四核机器,需要列出4个pe(PEid,MIPS速度):
peList1.add(newPe(0,newPeProvisionerSimple(mips)));// 需要存储Pe id和MIPS等级
peList1.add(newPe(1,newPeProvisionerSimple(mips)));// 下面是声明了host的参数,并将其添加到机器列表中// host参数:hostID、内存、存储容量、带宽int hostId =0;int ram =2048;// host memory (MB)long storage =1000000;// host storageint bw =10000;
hostList.add(newHost(hostId,newRamProvisionerSimple(ram),newBwProvisionerSimple(bw), storage, peList1,newVmSchedulerTimeShared(peList1)));// 创建机器完成}/*
4、属性创建一个DatacenterCharacteristics对象数据中心的属性:体系结构,操作系统,列表机器,
分配策略:时间或空间共享,时区及其价格(G$/Pe时间单位)。
*/String arch ="x86";// system architectureString os ="Linux";// operating systemString vmm ="Xen";double time_zone =10.0;// time zone this resource located 该资源所在的时区double cost =3.0;// the cost of using processing in this resource 在此资源中使用处理的成本double costPerMem =0.05;// the cost of using memory in this resource 在此资源中使用内存的成本double costPerStorage =0.1;// the cost of using storage in this resource 在此资源中使用存储的成本double costPerBw =0.1;// the cost of using bw in this resource 在此资源中使用bw带宽的成本LinkedList<Storage> storageList =newLinkedList<>();// we are not adding SAN devices by now 我们现在还没有添加SAN设备WorkflowDatacenter datacenter =null;DatacenterCharacteristics characteristics =newDatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
cost, costPerMem, costPerStorage, costPerBw);// 5. 最后,我们需要创建一个数据中心对象。// 数据中心内的带宽,以MB/s为单位。int maxTransferRate =15;// 这个数字来自futuregrid网站,你可以指定你的带宽bwtry{// 这里我们将带宽设置为15MB/sHarddriveStorage s1 =newHarddriveStorage(name,1e12);
s1.setMaxTransferRate(maxTransferRate);
storageList.add(s1);
datacenter =newWorkflowDatacenter(name, characteristics,newVmAllocationPolicySimple(hostList),
storageList,0);}catch(Exception e){
e.printStackTrace();}return datacenter;}/**
* Prints the job objects
*
* @param list list of jobs
*/protectedstaticvoidprintJobList(List<Job> list){String indent =" ";Log.printLine();Log.printLine("========== OUTPUT ==========");Log.printLine("Job ID"+ indent +"Task ID"+ indent +"STATUS"+ indent +"Data center ID"+ indent +"VM ID"+ indent + indent+"Time"+ indent +"Start Time"+ indent +"Finish Time"+ indent +"Depth");DecimalFormat dft =newDecimalFormat("###.##");for(Job job : list){Log.print(indent + job.getCloudletId()+ indent + indent);if(job.getClassType()==ClassType.STAGE_IN.value){Log.print("Stage-in");}for(Task task : job.getTaskList()){Log.print(task.getCloudletId()+",");}Log.print(indent);if(job.getCloudletStatus()==Cloudlet.SUCCESS){Log.print("SUCCESS");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent
+ indent + indent + dft.format(job.getActualCPUTime())+ indent + indent
+ dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}elseif(job.getCloudletStatus()==Cloudlet.FAILED){Log.print("FAILED");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent
+ indent + indent + dft.format(job.getActualCPUTime())+ indent + indent
+ dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}}}}
packageorg.workflowsim.examples;importjava.io.File;importjava.text.DecimalFormat;importjava.util.ArrayList;importjava.util.Calendar;importjava.util.LinkedList;importjava.util.List;importorg.cloudbus.cloudsim.Cloudlet;importorg.cloudbus.cloudsim.CloudletSchedulerSpaceShared;importorg.cloudbus.cloudsim.DatacenterCharacteristics;importorg.cloudbus.cloudsim.HarddriveStorage;importorg.cloudbus.cloudsim.Host;importorg.cloudbus.cloudsim.Log;importorg.cloudbus.cloudsim.Pe;importorg.cloudbus.cloudsim.Storage;importorg.cloudbus.cloudsim.VmAllocationPolicySimple;importorg.cloudbus.cloudsim.VmSchedulerTimeShared;importorg.cloudbus.cloudsim.core.CloudSim;importorg.cloudbus.cloudsim.provisioners.BwProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.PeProvisionerSimple;importorg.cloudbus.cloudsim.provisioners.RamProvisionerSimple;importorg.workflowsim.CondorVM;importorg.workflowsim.Task;importorg.workflowsim.WorkflowDatacenter;importorg.workflowsim.Job;importorg.workflowsim.WorkflowEngine;importorg.workflowsim.WorkflowPlanner;importorg.workflowsim.utils.ClusteringParameters;importorg.workflowsim.utils.OverheadParameters;importorg.workflowsim.utils.Parameters;importorg.workflowsim.utils.ReplicaCatalog;importorg.workflowsim.utils.Parameters.ClassType;/**
这个WorkflowSimExample 创建了
一个工作流计划器(workflow planner)、
一个工作流引擎(workflow engine)、
一个调度器(schedulers)、
一个数据中心(data centers)、
20个虚拟机(20 vms)。
您至少应该更改daxPath。您还可以更改其他参数。
*/publicclassWorkflowSimBasicExampleTest{protectedstaticList<CondorVM>createVM(int userId,int vms){//创建用于存放虚拟机的容器。该列表稍后被传递给代理LinkedList<CondorVM> list =newLinkedList<>();// 虚拟机参数(VM Parameters)long size =10000;// image size (MB) 划分的虚拟硬盘大小int ram =512;// vm memory (MB) 虚拟内存int mips =1000;// mips:Million Instructions Per Second 衡量cpu的性能指标long bw =1000;// bindwidth:带宽 任务到任务的传输成本int pesNumber =1;// number of cpus cpu的数量String vmm ="Xen";// VMM name VMM: Virtual Machine Manager 虚拟机管理名称// create VMs 创建虚拟机集群CondorVM[] vm =newCondorVM[vms];// CondorVm扩展一个Vmfor(int i =0; i < vms; i++){double ratio =1.0;
vm[i]=newCondorVM(i, userId, mips * ratio, pesNumber, ram, bw, size, vmm,newCloudletSchedulerSpaceShared());// cloudletschedulerspacesshared 实现了由虚拟机执行的调度策略 云任务调度时间共享,所有任务同时运行
list.add(vm[i]);}return list;}// STATIC METHODS ////**
创建main()以运行此示例。此示例只有一个数据中心(datacenter)和一个存储(storage)
*/publicstaticvoidmain(String[] args){try{// 第一步:初始化WorkflowSim包./**
但是,虚拟机的确切数量不一定是vmNum。如果数据中心或主机没有足够的资源,那么准确的vmNum可能会小于vmNum。当心
*/int vmNum =20;// number of vms; 虚拟机的数量/**
应该根据实际物理路径进行更改
*/// String daxPath ="/Users/weiweich/NetBeansProjects/WorkflowSim-1.0/config/dax/Montage_100.xml";String daxPath ="F:\\WorkFlowSim\\WorkflowSim-1.0-master\\config\\dax\\Montage_100.xml";File daxFile =newFile(daxPath);if(!daxFile.exists()){Log.printLine("Warning: Please replace daxPath with the physical path in your working environment!");return;}/**
由于我们使用MINMIN调度算法(MINMIN scheduling algorithm),所以规划算法(planning algorithm)应该是INVALID,这样规划程序就不会覆盖调度程序的结果
*/// 枚举类型 直接引用Parameters.SchedulingAlgorithm sch_method =Parameters.SchedulingAlgorithm.MINMIN;Parameters.PlanningAlgorithm pln_method =Parameters.PlanningAlgorithm.INVALID;ReplicaCatalog.FileSystem file_system =ReplicaCatalog.FileSystem.SHARED;/**
* No overheads 没有费用开支
*/OverheadParameters op =newOverheadParameters(0,null,null,null,null,0);/**
* No Clustering 没有集群
*/ClusteringParameters.ClusteringMethod method =ClusteringParameters.ClusteringMethod.NONE;ClusteringParameters cp =newClusteringParameters(0,0, method,null);// 初始化静态参数Parameters.init(vmNum, daxPath,null,null, op, cp, sch_method, pln_method,null,0);ReplicaCatalog.init(file_system);// 在创建任何实体之前int num_user =1;// number of grid users 网格用户数Calendar calendar =Calendar.getInstance();// 日历boolean trace_flag =false;// mean trace events 平均跟踪事件// 初始化CloudSim库CloudSim.init(num_user, calendar, trace_flag);// 创建数据中心 datacenter0WorkflowDatacenter datacenter0 =createDatacenter("Datacenter_0");// 创建带有一个调度器的WorkflowPlanner工作流计划器。WorkflowPlanner wfPlanner =newWorkflowPlanner("planner_0",1);// 创建一个工作流引擎WorkflowEngine wfEngine = wfPlanner.getWorkflowEngine();// 创建虚拟机列表。虚拟机的userId基本上就是调度程序的id控制这个vmList<CondorVM> vmlist0 =createVM(wfEngine.getSchedulerId(0),Parameters.getVmNum());// 将这个虚拟机列表提交给这个WorkflowEngine(将包含虚拟机的列表发送给代理必须创建)
wfEngine.submitVmList(vmlist0,0);// 将数据中心(data centers)绑定到调度程序
wfEngine.bindSchedulerDatacenter(datacenter0.getId(),0);// 开始仿真CloudSim.startSimulation();// 获取已接收的作业列表List<Job> outputList0 = wfEngine.getJobsReceivedList();// 仿真结束CloudSim.stopSimulation();// 打印仿真结果printJobList(outputList0);}catch(Exception e){Log.printLine("The simulation has been terminated due to an unexpected error");}}protectedstaticWorkflowDatacentercreateDatacenter(String name){// 创建数据中心// 以下是创建PowerDatacenter所需的步骤:// 1. 我们需要创建一个列表来存储一个或多个machineList<Host> hostList =newArrayList<>();// 2. 一台机器包含一个或多个pe或内核/cpu(CPUs/Cores)。因此,应该在创建Machine之前创建一个列表来存储这些PE。// Pe代表cpu单元,字段:pe状态和分配策略peProvisioner 同一台主机所有pe有相同的处理能力for(int i =1; i <=20; i++){List<Pe> peList1 =newArrayList<>();int mips =2000;// 3. 创建PE并将其添加到列表中。// 对于四核机器,需要列出4个pe(PEid,MIPS速度):
peList1.add(newPe(0,newPeProvisionerSimple(mips)));// 需要存储Pe id和MIPS等级
peList1.add(newPe(1,newPeProvisionerSimple(mips)));// 下面是声明了host的参数,并将其添加到机器列表中// host参数:hostID、内存、存储容量、带宽int hostId =0;int ram =2048;// host memory (MB)long storage =1000000;// host storageint bw =10000;
hostList.add(newHost(hostId,newRamProvisionerSimple(ram),newBwProvisionerSimple(bw), storage, peList1,newVmSchedulerTimeShared(peList1)));// 创建机器完成}/*
4、属性创建一个DatacenterCharacteristics对象数据中心的属性:体系结构,操作系统,列表机器,
分配策略:时间或空间共享,时区及其价格(G$/Pe时间单位)。
*/String arch ="x86";// system architectureString os ="Linux";// operating systemString vmm ="Xen";double time_zone =10.0;// time zone this resource located 该资源所在的时区double cost =3.0;// the cost of using processing in this resource 在此资源中使用处理的成本double costPerMem =0.05;// the cost of using memory in this resource 在此资源中使用内存的成本double costPerStorage =0.1;// the cost of using storage in this resource 在此资源中使用存储的成本double costPerBw =0.1;// the cost of using bw in this resource 在此资源中使用bw带宽的成本LinkedList<Storage> storageList =newLinkedList<>();// we are not adding SAN devices by now 我们现在还没有添加SAN设备WorkflowDatacenter datacenter =null;DatacenterCharacteristics characteristics =newDatacenterCharacteristics(arch, os, vmm, hostList, time_zone,
cost, costPerMem, costPerStorage, costPerBw);// 5. 最后,我们需要创建一个数据中心对象。// 数据中心内的带宽,以MB/s为单位。int maxTransferRate =15;// 这个数字来自futuregrid网站,你可以指定你的带宽bwtry{// 这里我们将带宽设置为15MB/sHarddriveStorage s1 =newHarddriveStorage(name,1e12);
s1.setMaxTransferRate(maxTransferRate);
storageList.add(s1);
datacenter =newWorkflowDatacenter(name, characteristics,newVmAllocationPolicySimple(hostList),
storageList,0);}catch(Exception e){
e.printStackTrace();}return datacenter;}/**
* Prints the job objects
*
* @param list list of jobs
*/protectedstaticvoidprintJobList(List<Job> list){String indent =" ";Log.printLine();Log.printLine("========== OUTPUT ==========");Log.printLine("Job ID"+ indent +"Task ID"+ indent +"STATUS"+ indent +"Data center ID"+ indent +"VM ID"+ indent + indent+"Time"+ indent +"Start Time"+ indent +"Finish Time"+ indent +"Depth");DecimalFormat dft =newDecimalFormat("###.##");for(Job job : list){Log.print(indent + job.getCloudletId()+ indent + indent);if(job.getClassType()==ClassType.STAGE_IN.value){Log.print("Stage-in");}for(Task task : job.getTaskList()){Log.print(task.getCloudletId()+",");}Log.print(indent);if(job.getCloudletStatus()==Cloudlet.SUCCESS){Log.print("SUCCESS");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent
+ indent + indent + dft.format(job.getActualCPUTime())+ indent + indent
+ dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}elseif(job.getCloudletStatus()==Cloudlet.FAILED){Log.print("FAILED");Log.printLine(indent + indent + job.getResourceId()+ indent + indent + indent + job.getVmId()+ indent
+ indent + indent + dft.format(job.getActualCPUTime())+ indent + indent
+ dft.format(job.getExecStartTime())+ indent + indent + indent
+ dft.format(job.getFinishTime())+ indent + indent + indent + job.getDepth());}}}}