<?xml version="1.0" encoding="UTF-8"?>
<workload name="create-bucket" description="create s3 bucket" config="">
<auth type="none" config=""/>
<workflow config="">
<!-- 创建桶 -->
<workstage name="create bucket" closuredelay="0" config="">
<auth type="none" config=""/>
<work name="rgw1" type="init" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(1,4)">
<auth type="none" config=""/>
<storage type="s3" config="accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7481;path_style_access=true"/>
</work>
<work name="rgw2" type="init" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(4,8)">
<auth type="none" config=""/>
<storage type="s3" config="accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7482;path_style_access=true"/>
</work>
</workstage>
<!-- 写入对象 -->
<workstage name="putobject" closuredelay="0" config="">
<auth type="none" config=""/>
<work name="rgw1-put-4M" type="normal" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="200000" totalOps="240" totalBytes="0" config="">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7481;path_style_access=true"/>
<operation type="write" ratio="100" division="none"
config="cprefix=zp;containers=r(1,4);oprefix=hj;objects=r(1,240);sizes=c(64)KB" id="op1"/>
</work>
<work name="rgw2-put-4M" type="normal" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="200000" totalOps="240" totalBytes="0" config="">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7482;path_style_access=true"/>
<operation type="write" ratio="100" division="none"
config="cprefix=zp;containers=r(5,8);oprefix=hj;objects=r(1,240);sizes=c(64)KB" id="op1"/>
</work>
</workstage>
<!-- 读取对象 -->
<workstage name="readobject" closuredelay="0" config="">
<auth type="none" config=""/>
<work name="rgw1-put-4M" type="normal" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="200000" totalOps="240" totalBytes="0" config="">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7481;path_style_access=true"/>
<operation type="read" ratio="100" division="none"
config="cprefix=zp;containers=r(1,4);oprefix=hj;objects=r(1,240);sizes=c(64)KB" id="op1"/>
</work>
<work name="rgw2-read-4M" type="normal" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="200000" totalOps="240" totalBytes="0" config="">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7482;path_style_access=true"/>
<operation type="read" ratio="100" division="none"
config="cprefix=zp;containers=r(5,8);oprefix=hj;objects=r(1,240);sizes=c(64)KB" id="op1"/>
</work>
</workstage>
<!-- 删除对象 -->
<workstage name="rgw1-cleanup" closuredelay="0" config="">
<auth type="none" config=""/>
<work name="rgw1-cleanup" type="cleanup" workers="2" interval="5"
division="object" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(1,4);oprefix=hj;objects=r(1,240);deleteContainer=false;">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7481;path_style_access=true"/>
</work>
<work name="rgw2-cleanup" type="cleanup" workers="2" interval="5"
division="object" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(5,8);oprefix=hj;objects=r(1,240);deleteContainer=false;">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7482;path_style_access=true"/>
</work>
</workstage>
<!-- 删除桶 -->
<workstage name="dispose" closuredelay="0" config="">
<auth type="none" config=""/>
<work name="rgw1-dispose" type="dispose" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(1,4);">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7481;path_style_access=true"/>
</work>
<work name="rgw2-dispose" type="dispose" workers="2" interval="5"
division="container" runtime="0" rampup="0" rampdown="0"
afr="0" totalOps="1" totalBytes="0" config="cprefix=zp;containers=r(5,8);">
<auth type="none" config=""/>
<storage type="s3" config="timeout=300000;accesskey=test1;secretkey=test1;endpoint=http://66.66.66.63:7482;path_style_access=true"/>
</work>
</workstage>
</workflow>
</workload>
work 相关的说明
- 可以通过写入时间,写入容量,写入 iops 来控制什么时候结束
- interval 默认是 5s 是用来对性能快照的间隔,可以理解为采样点
- division 控制 workers 之间的分配工作的方式是 bucket 还是对象还是 none
- 默认全部的 driver 参与工作,也可以通过参数控制部分 driver 参与
- 时间会控制执行,如果时间没到,但是指定的对象已经写完了的话就会去进行复写的操作,这里要注意是进行对象的控制还是时间的控制进行的测试
- 可以通过配置多个 work 的方式来实现并发,而在 work 内通过增加 worker 的方式增加并发,从而实现多对多的访问,worker 的分摊是分到了 driver 上面,注意多 work 的时候的 containers 不要重名,划分好 bucker 的空间
同一节点添加多个driver
添加不同节点的driver
参数指标
Avg-ResTime
响应平均时间Avg-ProcTime
平均处理时间Throughput
吞吐量,也就是我们常说的TPSbandwith
带宽succ-ratio
成功数