1. 列出所有的shard Server
switched to db admin
mongos> db.runCommand({listshards:1}) -- --列出所有的shard Server
{
"shards" : [
{
"_id" : "shard0000",
"host" : "192.168.56.87:27017"
},
{
"_id" : "shard0001",
"host" : "192.168.56.88:27017"
}
],
"ok" : 1
}
mongos>
刚才我们是对表db.users进行分片了,下面我们将对库中现有的未分片的表 db.users_2进行分片处理
> for (var i = 1; i <= 5000; i++) db.users2.insert({age:i, name:"wangwenlong", addr:"Beijing",country:"China"})
1./mongo 192.168.56.90:27017 #这里必须连接路由节点
mongos> use admin
switched to db admin
mongos> db.runCommand({listshards:1}) -- --列出所有的shard Server
{
"shards" : [
{
"_id" : "shard0000",
"host" : "192.168.56.87:27017"
},
{
"_id" : "shard0001",
"host" : "192.168.56.88:27017"
}
],
"ok" : 1
}
mongos>
2. 查看 sharding 信息
> printShardingStatus() --查看Sharding信息
mongos> printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
1 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
1 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
3.判断是否是 sharding
mongos> db.runCommand({isdbgrid:1})
{ "isdbgrid" : 1, "hostname" : "node1", "ok" : 1 }
{ "isdbgrid" : 1, "hostname" : "node1", "ok" : 1 }
4.对现有的表进行 sharding
表最初状态如下,可以看出他没有被分片过:
4.1 查看db.users2的状态
> use test
switched to db test> for (var i = 1; i <= 5000; i++) db.users2.insert({age:i, name:"wangwenlong", addr:"Beijing",country:"China"})
WriteResult({ "nInserted" : 1 })
mongos> db.users2.stats()
{
"sharded" : false,
"primary" : "shard0000",
"ns" : "test.users",
"count" : 5000,
"size" : 560000,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"ok" : 1
}
{
"sharded" : false,
"primary" : "shard0000",
"ns" : "test.users",
"count" : 5000,
"size" : 560000,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"ok" : 1
}
4.2 对其进行分片处理:
> use admin
switched to db admin
> db.runCommand({ shardcollection: "test.users2", key: { _id:1 }}) --已经存在db.users
{ "collectionsharded" : "test.users2", "ok" : 1 }
{ "collectionsharded" : "test.users2", "ok" : 1 }
再次查看分片后的表的状态,可以看到它已经被我们分片了
> use test
switched to db test
switched to db test
mongos> db.users2.stats()
{
"sharded" : true, --是shard状态
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"ns" : "test.users",
"count" : 5000,
"numExtents" : 4,
"size" : 560000,
"storageSize" : 696320,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"avgObjSize" : 112,
"nindexes" : 1,
"nchunks" : 1,
"shards" : {
"shard0000" : {
"ns" : "test.users",
"count" : 5000,
"size" : 560000,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"ok" : 1
}
},
"ok" : 1
}
mongos>
{
"sharded" : true, --是shard状态
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"ns" : "test.users",
"count" : 5000,
"numExtents" : 4,
"size" : 560000,
"storageSize" : 696320,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"avgObjSize" : 112,
"nindexes" : 1,
"nchunks" : 1,
"shards" : {
"shard0000" : {
"ns" : "test.users",
"count" : 5000,
"size" : 560000,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 171696,
"indexSizes" : {
"_id_" : 171696
},
"ok" : 1
}
},
"ok" : 1
}
mongos>
5. 新增shard Server
刚才我们演示的是新增分片表,接下来我们演示如何新增Shard Server
5.1. 建立数据文件夹
mkdir -p /mongodb/data/shared3 --shared3
mkdir -p /mongodb/log
touch /mongodb/log/shared3.log
touch /mongodb/shared3.pid
chmod -R 755 /mongodb
5.2 启动一个新Shard Server 进程
./mongod --shardsvr --port 27017 --fork --dbpath /mongodb/data/shared3/ --logpath /mongodb/log/shared3.log --directoryperdb #192.168.56.91
about to fork child process, waiting until server is ready for connections.
forked process: 3438
child process started successfully, parent exiting
forked process: 3438
child process started successfully, parent exiting
5.3 配置新Shard Server --在路由节点执行
[root@node1 bin]# ./mongo 192.168.56.90:27017
> use admin
> db.runCommand({ addshard:"192.168.56.91:27017" })
{ "shardAdded" : "shard0002", "ok" : 1 }
> printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" }
{ "_id" : "shard0002", "host" : "192.168.56.91:27017" } 这是刚新添加的
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
11 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
test.users
shard key: { "_id" : 1 }
chunks:
shard0000 4
shard0001 4
shard0002 4
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : ObjectId("555075a4a9c1857072f9384f") } on : shard0002 Timestamp(9, 0)
{ "_id" : ObjectId("555075a4a9c1857072f9384f") } -->> { "_id" : ObjectId("555075a5a9c1857072f93b95") } on : shard0002 Timestamp(11, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93b95") } -->> { "_id" : ObjectId("555075a5a9c1857072f93d39") } on : shard0001 Timestamp(11, 1)
{ "_id" : ObjectId("555075a5a9c1857072f93d39") } -->> { "_id" : ObjectId("555075a5a9c1857072f93edd") } on : shard0001 Timestamp(5, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93edd") } -->> { "_id" : ObjectId("555075a5a9c1857072f94081") } on : shard0001 Timestamp(6, 0)
{ "_id" : ObjectId("555075a5a9c1857072f94081") } -->> { "_id" : ObjectId("555075a6a9c1857072f94225") } on : shard0001 Timestamp(7, 0)
{ "_id" : ObjectId("555075a6a9c1857072f94225") } -->> { "_id" : ObjectId("555075a6a9c1857072f943c9") } on : shard0002 Timestamp(8, 0)
{ "_id" : ObjectId("555075a6a9c1857072f943c9") } -->> { "_id" : ObjectId("555075a6a9c1857072f9456d") } on : shard0002 Timestamp(10, 0)
{ "_id" : ObjectId("555075a6a9c1857072f9456d") } -->> { "_id" : ObjectId("555075a6a9c1857072f94711") } on : shard0000 Timestamp(10, 1)
{ "_id" : ObjectId("555075a6a9c1857072f94711") } -->> { "_id" : ObjectId("555075a6a9c1857072f948b5") } on : shard0000 Timestamp(1, 10)
{ "_id" : ObjectId("555075a6a9c1857072f948b5") } -->> { "_id" : ObjectId("555075a7a9c1857072f94a59") } on : shard0000 Timestamp(1, 11)
{ "_id" : ObjectId("555075a7a9c1857072f94a59") } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 12)
test.users2
shard key: { "_id" : 1 }
chunks:
shard0000 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0)
{ "_id" : "db", "partitioned" : false, "primary" : "shard0000" }
{ "_id" : "zw", "partitioned" : false, "primary" : "shard0001" }
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" }
{ "_id" : "shard0002", "host" : "192.168.56.91:27017" } 这是刚新添加的
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
11 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
test.users
shard key: { "_id" : 1 }
chunks:
shard0000 4
shard0001 4
shard0002 4
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : ObjectId("555075a4a9c1857072f9384f") } on : shard0002 Timestamp(9, 0)
{ "_id" : ObjectId("555075a4a9c1857072f9384f") } -->> { "_id" : ObjectId("555075a5a9c1857072f93b95") } on : shard0002 Timestamp(11, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93b95") } -->> { "_id" : ObjectId("555075a5a9c1857072f93d39") } on : shard0001 Timestamp(11, 1)
{ "_id" : ObjectId("555075a5a9c1857072f93d39") } -->> { "_id" : ObjectId("555075a5a9c1857072f93edd") } on : shard0001 Timestamp(5, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93edd") } -->> { "_id" : ObjectId("555075a5a9c1857072f94081") } on : shard0001 Timestamp(6, 0)
{ "_id" : ObjectId("555075a5a9c1857072f94081") } -->> { "_id" : ObjectId("555075a6a9c1857072f94225") } on : shard0001 Timestamp(7, 0)
{ "_id" : ObjectId("555075a6a9c1857072f94225") } -->> { "_id" : ObjectId("555075a6a9c1857072f943c9") } on : shard0002 Timestamp(8, 0)
{ "_id" : ObjectId("555075a6a9c1857072f943c9") } -->> { "_id" : ObjectId("555075a6a9c1857072f9456d") } on : shard0002 Timestamp(10, 0)
{ "_id" : ObjectId("555075a6a9c1857072f9456d") } -->> { "_id" : ObjectId("555075a6a9c1857072f94711") } on : shard0000 Timestamp(10, 1)
{ "_id" : ObjectId("555075a6a9c1857072f94711") } -->> { "_id" : ObjectId("555075a6a9c1857072f948b5") } on : shard0000 Timestamp(1, 10)
{ "_id" : ObjectId("555075a6a9c1857072f948b5") } -->> { "_id" : ObjectId("555075a7a9c1857072f94a59") } on : shard0000 Timestamp(1, 11)
{ "_id" : ObjectId("555075a7a9c1857072f94a59") } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 12)
test.users2
shard key: { "_id" : 1 }
chunks:
shard0000 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0)
{ "_id" : "db", "partitioned" : false, "primary" : "shard0000" }
{ "_id" : "zw", "partitioned" : false, "primary" : "shard0001" }
5.4 查看分片表状态,以验证新Shard Server
> use test
switched to db test
> db.users.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"ns" : "test.users",
"count" : 10000,
"numExtents" : 13,
"size" : 1120000,
"storageSize" : 4186112,
"totalIndexSize" : 384272,
"indexSizes" : {
"_id_" : 384272
},
"avgObjSize" : 112,
"nindexes" : 1,
"nchunks" : 12,
"shards" : {
"shard0000" : {
"ns" : "test.users",
"count" : 6641,
"size" : 743792,
"avgObjSize" : 112,
"numExtents" : 5,
"storageSize" : 2793472,
"lastExtentSize" : 2097152,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 228928,
"indexSizes" : {
"_id_" : 228928
},
"ok" : 1
},
"shard0001" : {
"ns" : "test.users",
"count" : 1680,
"size" : 188160,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 65408,
"indexSizes" : {
"_id_" : 65408
},
"ok" : 1
},
"shard0002" : {
"ns" : "test.users",
"count" : 1679,
"size" : 188048,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 89936,
"indexSizes" : {
"_id_" : 89936
},
"ok" : 1
}
},
"ok" : 1
}
我们可以发现,当我们新增Shard Server 后数据自动分布到了新Shard上,这是由MongoDB内部自已实现的。
6.移除 shard Server
有些时候有于硬件资源有限,所以我们不得不进行一些回收工作,下面我们就要将刚刚启用的Shard Server回收,系统首先会将在这个即将被移除的 Shard Server上的数据先平均分配到其它的 Shard Server 上,然后最终在将这个 Shard Server 踢下线, 我们需要不停的调用
db.runCommand({"removeshard" : "192.168.56.91:27017"});来观察这个移除操作进行到哪里了:
> use admin
switched to db admin
> db.runCommand({"removeshard" : "192.168.56.91:27017"});
switched to db admin
> db.runCommand({"removeshard" : "192.168.56.91:27017"});
{
"msg" : "draining started successfully",
"state" : "started",
"shard" : "shard0002",
"ok" : 1
}
"msg" : "draining started successfully",
"state" : "started",
"shard" : "shard0002",
"ok" : 1
}
> db.runCommand({"removeshard" : "192.168.56.91:27017"});
{
"msg" : "draining started successfully",
"state" : "started",
"shard" : "shard0002",
"ok" : 1
}
"msg" : "draining started successfully",
"state" : "started",
"shard" : "shard0002",
"ok" : 1
}
mongos> db.runCommand({"removeshard" : "192.168.56.91:27017"});
{
"code" : 13129,
"ok" : 0,
"errmsg" : "exception: can't find shard for: 192.168.56.91:27017"
{
"code" : 13129,
"ok" : 0,
"errmsg" : "exception: can't find shard for: 192.168.56.91:27017"
最终移除后,当我们再次调用db.runCommand({"removeshard" : "192.168.56.91:27017"});的时候系统
会报错,已便通知我们不存在 27017这个端口的 Shard Server 了,因为它已经被移除掉了。
printShardingStatus() --查看sharding的信息
mongos> printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" } --可以看到这里只有两个了
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
15 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
test.users
shard key: { "_id" : 1 }
chunks:
shard0000 6
shard0001 6
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : ObjectId("555075a4a9c1857072f9384f") } on : shard0000 Timestamp(12, 0)
{ "_id" : ObjectId("555075a4a9c1857072f9384f") } -->> { "_id" : ObjectId("555075a5a9c1857072f93b95") } on : shard0001 Timestamp(13, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93b95") } -->> { "_id" : ObjectId("555075a5a9c1857072f93d39") } on : shard0001 Timestamp(11, 1)
{ "_id" : ObjectId("555075a5a9c1857072f93d39") } -->> { "_id" : ObjectId("555075a5a9c1857072f93edd") } on : shard0001 Timestamp(5, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93edd") } -->> { "_id" : ObjectId("555075a5a9c1857072f94081") } on : shard0001 Timestamp(6, 0)
{ "_id" : ObjectId("555075a5a9c1857072f94081") } -->> { "_id" : ObjectId("555075a6a9c1857072f94225") } on : shard0001 Timestamp(7, 0)
{ "_id" : ObjectId("555075a6a9c1857072f94225") } -->> { "_id" : ObjectId("555075a6a9c1857072f943c9") } on : shard0000 Timestamp(14, 0)
{ "_id" : ObjectId("555075a6a9c1857072f943c9") } -->> { "_id" : ObjectId("555075a6a9c1857072f9456d") } on : shard0001 Timestamp(15, 0)
{ "_id" : ObjectId("555075a6a9c1857072f9456d") } -->> { "_id" : ObjectId("555075a6a9c1857072f94711") } on : shard0000 Timestamp(10, 1)
{ "_id" : ObjectId("555075a6a9c1857072f94711") } -->> { "_id" : ObjectId("555075a6a9c1857072f948b5") } on : shard0000 Timestamp(1, 10)
{ "_id" : ObjectId("555075a6a9c1857072f948b5") } -->> { "_id" : ObjectId("555075a7a9c1857072f94a59") } on : shard0000 Timestamp(1, 11)
{ "_id" : ObjectId("555075a7a9c1857072f94a59") } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 12)
test.users2
shard key: { "_id" : 1 }
chunks:
shard0000 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0)
{ "_id" : "db", "partitioned" : false, "primary" : "shard0000" }
{ "_id" : "zw", "partitioned" : false, "primary" : "shard0001" }
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5550731f38c2a6ca9b4e1ee5")
}
shards:
{ "_id" : "shard0000", "host" : "192.168.56.87:27017" }
{ "_id" : "shard0001", "host" : "192.168.56.88:27017" } --可以看到这里只有两个了
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
15 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard0000" }
test.users
shard key: { "_id" : 1 }
chunks:
shard0000 6
shard0001 6
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : ObjectId("555075a4a9c1857072f9384f") } on : shard0000 Timestamp(12, 0)
{ "_id" : ObjectId("555075a4a9c1857072f9384f") } -->> { "_id" : ObjectId("555075a5a9c1857072f93b95") } on : shard0001 Timestamp(13, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93b95") } -->> { "_id" : ObjectId("555075a5a9c1857072f93d39") } on : shard0001 Timestamp(11, 1)
{ "_id" : ObjectId("555075a5a9c1857072f93d39") } -->> { "_id" : ObjectId("555075a5a9c1857072f93edd") } on : shard0001 Timestamp(5, 0)
{ "_id" : ObjectId("555075a5a9c1857072f93edd") } -->> { "_id" : ObjectId("555075a5a9c1857072f94081") } on : shard0001 Timestamp(6, 0)
{ "_id" : ObjectId("555075a5a9c1857072f94081") } -->> { "_id" : ObjectId("555075a6a9c1857072f94225") } on : shard0001 Timestamp(7, 0)
{ "_id" : ObjectId("555075a6a9c1857072f94225") } -->> { "_id" : ObjectId("555075a6a9c1857072f943c9") } on : shard0000 Timestamp(14, 0)
{ "_id" : ObjectId("555075a6a9c1857072f943c9") } -->> { "_id" : ObjectId("555075a6a9c1857072f9456d") } on : shard0001 Timestamp(15, 0)
{ "_id" : ObjectId("555075a6a9c1857072f9456d") } -->> { "_id" : ObjectId("555075a6a9c1857072f94711") } on : shard0000 Timestamp(10, 1)
{ "_id" : ObjectId("555075a6a9c1857072f94711") } -->> { "_id" : ObjectId("555075a6a9c1857072f948b5") } on : shard0000 Timestamp(1, 10)
{ "_id" : ObjectId("555075a6a9c1857072f948b5") } -->> { "_id" : ObjectId("555075a7a9c1857072f94a59") } on : shard0000 Timestamp(1, 11)
{ "_id" : ObjectId("555075a7a9c1857072f94a59") } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 12)
test.users2
shard key: { "_id" : 1 }
chunks:
shard0000 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0)
{ "_id" : "db", "partitioned" : false, "primary" : "shard0000" }
{ "_id" : "zw", "partitioned" : false, "primary" : "shard0001" }
接下来我们看一下表中的数据分布:
> use test switched to db test
> db.users.stats()
mongos> db.users.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"ns" : "test.users",
"count" : 10000,
"numExtents" : 13,
"size" : 1120000,
"storageSize" : 4186112,
"totalIndexSize" : 392448,
"indexSizes" : {
"_id_" : 392448
},
"avgObjSize" : 112,
"nindexes" : 1,
"nchunks" : 12,
"shards" : {
"shard0000" : {
"ns" : "test.users",
"count" : 7062,
"size" : 790944,
"avgObjSize" : 112,
"numExtents" : 5,
"storageSize" : 2793472,
"lastExtentSize" : 2097152,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 253456,
"indexSizes" : {
"_id_" : 253456
},
"ok" : 1
},
"shard0001" : {
"ns" : "test.users",
"count" : 2938,
"size" : 329056,
"avgObjSize" : 112,
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 130816,
"indexSizes" : {
"_id_" : 130816
},
"ok" : 1
},
"shard0002" : {
"ns" : "test.users",
"count" : 0,
"size" : 0, --可以看到这里 size 0 ,count也是0 了
"numExtents" : 4,
"storageSize" : 696320,
"lastExtentSize" : 524288,
"paddingFactor" : 1,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1,
"capped" : false,
"nindexes" : 1,
"totalIndexSize" : 8176,
"indexSizes" : {
"_id_" : 8176
},
"ok" : 1
}
},
"ok" : 1
}
可以看出数据又被平均分配到了另外 2台Shard Server上了.