学习Linux的第六十五天

Mongodb分片介绍

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

Mongodb的分片搭建

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
三台机器的IP分别是:
A机器:192.168.70.128
B机器:192.168.70.129
C机器:192.168.70.130

分片搭建 – 创建目录:
分别在三台机器上创建各个角色所需要的目录:

mkdir -p /data/mongodb/mongos/log
mkdir -p /data/mongodb/config/{data,log}
mkdir -p /data/mongodb/shard1/{data,log}
mkdir -p /data/mongodb/shard2/{data,log}
mkdir -p /data/mongodb/shard3/{data,log}

分片搭建–config server配置:
mongodb3.4版本以后需要对config server创建副本集
添加配置文件(三台机器都操作)

[root@localhost ~]# mkdir /etc/mongod/
[root@localhost ~]# vim /etc/mongod/config.conf # 加入如下内容
pidfilepath = /var/run/mongodb/configsrv.pid
dbpath = /data/mongodb/config/data
logpath = /data/mongodb/config/log/congigsrv.log
logappend = true
bind_ip = 0.0.0.0 # 绑定你的监听ip
port = 21000
fork = true
configsvr = true #declare this is a config db of a cluster;
replSet=configs #副本集名称
maxConns=20000 #设置最大连接数
启动三台机器的config server:

[root@localhost ~]# mongod -f /etc/mongod/config.conf # 三台机器都要操作
about to fork child process, waiting until server is ready for connections.
forked process: 4183
child process started successfully, parent exiting
[root@localhost ~]# ps aux |grep mongo
mongod 2518 1.1 2.3 1544488 89064 ? Sl 09:57 0:42 /usr/bin/mongod -f /etc/mongod.conf
root 4183 1.1 1.3 1072404 50992 ? Sl 10:56 0:00 mongod -f /etc/mongod/config.conf
root 4240 0.0 0.0 112660 964 pts/0 S+ 10:57 0:00 grep --color=auto mongo
[root@localhost ~]# netstat -lntp |grep mongod
tcp 0 0 192.168.70.128:21000 0.0.0.0:* LISTEN 4183/mongod
tcp 0 0 192.168.70.128:27017 0.0.0.0:* LISTEN 2518/mongod
tcp 0 0 127.0.0.1:27017 0.0.0.0:* LISTEN 2518/mongod
[root@localhost ~]#
登录任意一台机器的21000端口,初始化副本集:

[root@localhost ~]# mongo --host 192.168.70.128 --port 21000

config = { _id: “configs”, members: [ {_id : 0, host : “192.168.70.128:21000”},{_id : 1, host : “192.168.70.129:21000”},{_id : 2, host : “192.168.70.130:21000”}] }
{
“_id” : “configs”,
“members” : [
{
“_id” : 0,
“host” : “192.168.70.128:21000”
},
{
“_id” : 1,
“host” : “192.168.70.129:21000”
},
{
“_id” : 2,
“host” : “192.168.70.130:21000”
}
]
}
rs.initiate(config) # 初始化副本集
{
“ok” : 1,
“operationTime” : Timestamp(1515553318, 1),
g l e S t a t s " : " l a s t O p T i m e " : T i m e s t a m p ( 1515553318 , 1 ) , " e l e c t i o n I d " : O b j e c t I d ( " 000000000000000000000000 " ) , " gleStats" : { "lastOpTime" : Timestamp(1515553318, 1), "electionId" : ObjectId("000000000000000000000000") }, " gleStats":"lastOpTime":Timestamp(1515553318,1),"electionId":ObjectId("000000000000000000000000"),"clusterTime” : {
“clusterTime” : Timestamp(1515553318, 1),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
}
}
configs:SECONDARY> rs.status() # 确保每台机器都正常
{
“set” : “configs”,
“date” : ISODate(“2019-12-18T16:03:40.244Z”),
“myState” : 1,
“term” : NumberLong(1),
“configsvr” : true,
“heartbeatIntervalMillis” : NumberLong(2000),
“optimes” : {
“lastCommittedOpTime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“readConcernMajorityOpTime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“appliedOpTime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“durableOpTime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
}
},
“members” : [
{
“_id” : 0,
“name” : “192.168.70.128:21000”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 415,
“optime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T16:03:31Z”),
“infoMessage” : “could not find member to sync from”,
“electionTime” : Timestamp(1515553329, 1),
“electionDate” : ISODate(“2019-12-18T16:02:09Z”),
“configVersion” : 1,
“self” : true
},
{
“_id” : 1,
“name” : “192.168.70.129:21000”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”,
“uptime” : 101,
“optime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“optimeDurable” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T16:03:31Z”),
“optimeDurableDate” : ISODate(“2019-12-18T16:03:31Z”),
“lastHeartbeat” : ISODate(“2019-12-18T16:03:39.973Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T16:03:38.804Z”),
“pingMs” : NumberLong(0),
“syncingTo” : “192.168.70.130:21000”,
“configVersion” : 1
},
{
“_id” : 2,
“name” : “192.168.70.130:21000”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”,
“uptime” : 101,
“optime” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“optimeDurable” : {
“ts” : Timestamp(1515553411, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T16:03:31Z”),
“optimeDurableDate” : ISODate(“2019-12-18T16:03:31Z”),
“lastHeartbeat” : ISODate(“2019-12-18T16:03:39.945Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T16:03:38.726Z”),
“pingMs” : NumberLong(0),
“syncingTo” : “192.168.70.128:21000”,
“configVersion” : 1
}
],
“ok” : 1,
“operationTime” : Timestamp(1515553411, 1),
g l e S t a t s " : " l a s t O p T i m e " : T i m e s t a m p ( 1515553318 , 1 ) , " e l e c t i o n I d " : O b j e c t I d ( " 7 f f f f f f f 0000000000000001 " ) , " gleStats" : { "lastOpTime" : Timestamp(1515553318, 1), "electionId" : ObjectId("7fffffff0000000000000001") }, " gleStats":"lastOpTime":Timestamp(1515553318,1),"electionId":ObjectId("7fffffff0000000000000001"),"clusterTime” : {
“clusterTime” : Timestamp(1515553411, 1),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
}
}
configs:PRIMARY>
分片搭建–分片配置:
添加配置文件(三台机器都需要操作):

[root@localhost ~]# vim /etc/mongod/shard1.conf
pidfilepath = /var/run/mongodb/shard1.pid
dbpath = /data/mongodb/shard1/data
logpath = /data/mongodb/shard1/log/shard1.log
logappend = true
logRotate=rename
bind_ip = 0.0.0.0 # 绑定你的监听IP
port = 27001
fork = true
replSet=shard1 #副本集名称
shardsvr = true #declare this is a shard db of a cluster;
maxConns=20000 #设置最大连接数

[root@localhost ~]# vim /etc/mongod/shard2.conf //加入如下内容
pidfilepath = /var/run/mongodb/shard2.pid
dbpath = /data/mongodb/shard2/data
logpath = /data/mongodb/shard2/log/shard2.log
logappend = true
logRotate=rename
bind_ip = 0.0.0.0 # 绑定你的监听IP
port = 27002
fork = true
replSet=shard2 #副本集名称
shardsvr = true #declare this is a shard db of a cluster;
maxConns=20000 #设置最大连接数

[root@localhost ~]# vim /etc/mongod/shard3.conf //加入如下内容
pidfilepath = /var/run/mongodb/shard3.pid
dbpath = /data/mongodb/shard3/data
logpath = /data/mongodb/shard3/log/shard3.log
logappend = true
logRotate=rename
bind_ip = 0.0.0.0 # 绑定你的监听IP
port = 27003
fork = true
replSet=shard3 #副本集名称
shardsvr = true #declare this is a shard db of a cluster;
maxConns=20000 #设置最大连接数
都配置完成之后逐个进行启动,三台机器都需要启动:

1.先启动shard1:

[root@localhost ~]# mongod -f /etc/mongod/shard1.conf # 三台机器都要操作
about to fork child process, waiting until server is ready for connections.
forked process: 13615
child process started successfully, parent exiting
[root@localhost ~]# ps aux |grep shard1
root 13615 0.7 1.3 1023224 52660 ? Sl 17:16 0:00 mongod -f /etc/mongod/shard1.conf
root 13670 0.0 0.0 112660 964 pts/0 R+ 17:17 0:00 grep --color=auto shard1
[root@localhost ~]#
然后登录128或者129机器的27001端口初始化副本集,130之所以不行,是因为shard1我们把134这台机器的27001端口作为了仲裁节点:

[root@localhost ~]# mongo --host 192.168.70.128 --port 27001

use admin
switched to db admin
config = { _id: “shard1”, members: [ {_id : 0, host : “192.168.70.128:27001”}, {_id: 1,host : “192.168.70.129:27001”},{_id : 2, host : “192.168.70.130:27001”,arbiterOnly:true}] }
{
“_id” : “shard1”,
“members” : [
{
“_id” : 0,
“host” : “192.168.70.128:27001”
},
{
“_id” : 1,
“host” : “192.168.70.129:27001”
},
{
“_id” : 2,
“host” : “192.168.70.134:27001”,
“arbiterOnly” : true
}
]
}
rs.initiate(config) # 初始化副本集
{ “ok” : 1 }
shard1:SECONDARY> rs.status() # 查看状态
{
“set” : “shard1”,
“date” : ISODate(“2019-12-18T17:21:37.682Z”),
“myState” : 1,
“term” : NumberLong(1),
“heartbeatIntervalMillis” : NumberLong(2000),
“optimes” : {
“lastCommittedOpTime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“readConcernMajorityOpTime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“appliedOpTime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“durableOpTime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
}
},
“members” : [
{
“_id” : 0,
“name” : “192.168.70.128:27001”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”,
“uptime” : 317,
“optime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:21:37Z”),
“infoMessage” : “could not find member to sync from”,
“electionTime” : Timestamp(1515576075, 1),
“electionDate” : ISODate(“2019-12-18T17:21:15Z”),
“configVersion” : 1,
“self” : true
},
{
“_id” : 1,
“name” : “192.168.70.129:27001”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”,
“uptime” : 33,
“optime” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“optimeDurable” : {
“ts” : Timestamp(1515576097, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:21:37Z”),
“optimeDurableDate” : ISODate(“2019-12-18T17:21:37Z”),
“lastHeartbeat” : ISODate(“2019-12-18T17:21:37.262Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:21:36.213Z”),
“pingMs” : NumberLong(0),
“syncingTo” : “192.168.70.128:27001”,
“configVersion” : 1
},
{
“_id” : 2,
“name” : “192.168.70.130:27001”,
“health” : 1,
“state” : 7,
“stateStr” : “ARBITER”, # 可以看到134是仲裁节点
“uptime” : 33,
“lastHeartbeat” : ISODate(“2019-12-18T17:21:37.256Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:21:36.024Z”),
“pingMs” : NumberLong(0),
“configVersion” : 1
}
],
“ok” : 1
}
shard1:PRIMARY>
2.shard1配置完毕之后启动shard2:

[root@localhost ~]# mongod -f /etc/mongod/shard2.conf # 三台机器都要进行启动操作
about to fork child process, waiting until server is ready for connections.
forked process: 13910
child process started successfully, parent exiting
[root@localhost ~]# ps aux |grep shard2
root 13910 1.9 1.2 1023224 50096 ? Sl 17:25 0:00 mongod -f /etc/mongod/shard2.conf
root 13943 0.0 0.0 112660 964 pts/0 S+ 17:25 0:00 grep --color=auto shard2
[root@localhost ~]#
登录129或者130任何一台机器的27002端口初始化副本集,128之所以不行,是因为shard2我们把128这台机器的27002端口作为了仲裁节点:

[root@localhost ~]# mongo --host 192.168.70.129–port 27002

use admin
switched to db admin
config = { _id: “shard2”, members: [ {_id : 0, host : “192.168.70.128:27002” ,arbiterOnly:true},{_id : 1, host : “192.168.70.129:27002”},{_id : 2, host : “192.168.70.130:27002”}] }
{
“_id” : “shard2”,
“members” : [
{
“_id” : 0,
“host” : “192.168.70.128:27002”,
“arbiterOnly” : true
},
{
“_id” : 1,
“host” : “192.168.70.129:27002”
},
{
“_id” : 2,
“host” : “192.168.70.130:27002”
}
]
}
rs.initiate(config)
{ “ok” : 1 }
shard2:SECONDARY> rs.status()
{
“set” : “shard2”,
“date” : ISODate(“2019-12-18T17:26:12.250Z”),
“myState” : 1,
“term” : NumberLong(1),
“heartbeatIntervalMillis” : NumberLong(2000),
“optimes” : {
“lastCommittedOpTime” : {
“ts” : Timestamp(1515605171, 1),
“t” : NumberLong(1)
},
“readConcernMajorityOpTime” : {
“ts” : Timestamp(1515605171, 1),
“t” : NumberLong(1)
},
“appliedOpTime” : {
“ts” : Timestamp(1515605171, 1),
“t” : NumberLong(1)
},
“durableOpTime” : {
“ts” : Timestamp(1515605171, 1),
“t” : NumberLong(1)
}
},
“members” : [
{
“_id” : 0,
“name” : “192.168.70.128:27002”,
“health” : 1,
“state” : 7,
“stateStr” : “ARBITER”, # 仲裁节点
“uptime” : 42,
“lastHeartbeat” : ISODate(“2019-12-18T17:26:10.792Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:26:11.607Z”),
“pingMs” : NumberLong(0),
“configVersion” : 1
},
{
“_id” : 1,
“name” : “192.168.70.129:27002”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”, # 主节点
“uptime” : 546,
“optime” : {
“ts” : Timestamp(1515605171, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:26:11Z”),
“infoMessage” : “could not find member to sync from”,
“electionTime” : Timestamp(1515605140, 1),
“electionDate” : ISODate(“2019-12-18T17:25:40Z”),
“configVersion” : 1,
“self” : true
},
{
“_id” : 2,
“name” : “192.168.70.130:27002”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”, # 从节点
“uptime” : 42,
“optime” : {
“ts” : Timestamp(1515605161, 1),
“t” : NumberLong(1)
},
“optimeDurable” : {
“ts” : Timestamp(1515605161, 1),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:26:01Z”),
“optimeDurableDate” : ISODate(“2019-12-18T17:26:01Z”),
“lastHeartbeat” : ISODate(“2019-12-18T17:26:10.776Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:26:10.823Z”),
“pingMs” : NumberLong(0),
“syncingTo” : “192.168.70.129:27002”,
“configVersion” : 1
}
],
“ok” : 1
}
shard2:PRIMARY>
3.接着启动shard3:

[root@localhost ~]# mongod -f /etc/mongod/shard3.conf # 三台机器都要操作
about to fork child process, waiting until server is ready for connections.
forked process: 14204
child process started successfully, parent exiting
[root@localhost ~]# ps aux |grep shard3
root 14204 2.2 1.2 1023228 50096 ? Sl 17:36 0:00 mongod -f /etc/mongod/shard3.conf
root 14237 0.0 0.0 112660 960 pts/0 S+ 17:36 0:00 grep --color=auto shard3
[root@localhost ~]#
然后登录128或者130任何一台机器的27003端口初始化副本集,129之所以不行,是因为shard3我们把129这台机器的27003端口作为了仲裁节点:

[root@localhost ~]# mongo --host 192.168.70.128 --port 27003

use admin
switched to db admin
config = { _id: “shard3”, members: [ {_id : 0, host : “192.168.70.128:27003”}, {_id : 1, host : “192.168.70.129:27003”, arbiterOnly:true}, {_id : 2, host : “192.168.70.130:27003”}] }
{
“_id” : “shard3”,
“members” : [
{
“_id” : 0,
“host” : “192.168.70.128:27003”
},
{
“_id” : 1,
“host” : “192.168.70.129:27003”,
“arbiterOnly” : true
},
{
“_id” : 2,
“host” : “192.168.70.130:27003”
}
]
}
rs.initiate(config)
{ “ok” : 1 }
shard3:SECONDARY> rs.status()
{
“set” : “shard3”,
“date” : ISODate(“2019-12-18T17:39:47.530Z”),
“myState” : 1,
“term” : NumberLong(1),
“heartbeatIntervalMillis” : NumberLong(2000),
“optimes” : {
“lastCommittedOpTime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“readConcernMajorityOpTime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“appliedOpTime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“durableOpTime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
}
},
“members” : [
{
“_id” : 0,
“name” : “192.168.70.128:27003”,
“health” : 1,
“state” : 1,
“stateStr” : “PRIMARY”, # 主节点
“uptime” : 221,
“optime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:39:40Z”),
“infoMessage” : “could not find member to sync from”,
“electionTime” : Timestamp(1515577179, 1),
“electionDate” : ISODate(“2019-12-18T17:39:39Z”),
“configVersion” : 1,
“self” : true
},
{
“_id” : 1,
“name” : “192.168.70.129:27003”,
“health” : 1,
“state” : 7,
“stateStr” : “ARBITER”, # 仲裁节点
“uptime” : 18,
“lastHeartbeat” : ISODate(“2019-12-18T17:39:47.477Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:39:45.715Z”),
“pingMs” : NumberLong(0),
“configVersion” : 1
},
{
“_id” : 2,
“name” : “192.168.70.130:27003”,
“health” : 1,
“state” : 2,
“stateStr” : “SECONDARY”, # 从节点
“uptime” : 18,
“optime” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“optimeDurable” : {
“ts” : Timestamp(1515577180, 2),
“t” : NumberLong(1)
},
“optimeDate” : ISODate(“2019-12-18T17:39:40Z”),
“optimeDurableDate” : ISODate(“2019-12-18T17:39:40Z”),
“lastHeartbeat” : ISODate(“2019-12-18T17:39:47.477Z”),
“lastHeartbeatRecv” : ISODate(“2019-12-18T17:39:45.779Z”),
“pingMs” : NumberLong(0),
“syncingTo” : “192.168.70.128:27003”,
“configVersion” : 1
}
],
“ok” : 1
}
shard3:PRIMARY>
分片搭建–配置路由服务器
mongos放在最后面配置是因为它需要知道作为config server的是哪个机器,以及作为shard副本集的机器。

1添加配置文件(三台机器都操作):

[root@localhost ~]# vim /etc/mongod/mongos.conf # 加入如下内容
pidfilepath = /var/run/mongodb/mongos.pid
logpath = /data/mongodb/mongos/log/mongos.log
logappend = true
bind_ip = 0.0.0.0 # 绑定你的监听ip
port = 20000
fork = true

#监听的配置服务器,只能有1个或者3个,configs为配置服务器的副本集名字
configdb = configs/192.168.70.128:21000, 192.168.70.129:21000, 192.168.70.130:21000
maxConns=20000 #设置最大连接数
2.然后三台机器上都启动mongos服务,注意命令,前面都是mongod,这里是mongos:

[root@localhost ~]# mongos -f /etc/mongod/mongos.conf # 三台机器上都需要执行
2019-12-18T18:26:02.566+0800 I NETWORK [main] getaddrinfo(" 192.168.70.129") failed: Name or service not known
2019-12-18T18:26:22.583+0800 I NETWORK [main] getaddrinfo(" 192.168.70.130") failed: Name or service not known
about to fork child process, waiting until server is ready for connections.
forked process: 15552
child process started successfully, parent exiting
[root@localhost ~]# ps aux |grep mongos # 三台机器上都需要检查进程是否已启动
root 15552 0.2 0.3 279940 15380 ? Sl 18:26 0:00 mongos -f /etc/mongod/mongos.conf
root 15597 0.0 0.0 112660 964 pts/0 S+ 18:27 0:00 grep --color=auto mongos
[root@localhost ~]# netstat -lntp |grep mongos # 三台机器上都需要检查端口是否已监听
tcp 0 0 0.0.0.0:20000 0.0.0.0:* LISTEN 15552/mongos
[root@localhost ~]#
分片搭建–启用分片
1.登录任意一台机器的20000端口,然后把所有分片和路由器串联:

[root@localhost ~]# mongo --host 192.168.70.128 --port 20000

串联shard1

mongos> sh.addShard(“shard1/192.168.70.128:27001,192.168.70.129:27001,192.168.70.130:27001”)
{
“shardAdded” : “shard1”, # 这里得对应的是shard1才行
“ok” : 1, # 注意,这里得是1才是成功
“$clusterTime” : {
“clusterTime” : Timestamp(1515580345, 6),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
},
“operationTime” : Timestamp(1515580345, 6)
}

串联shard2

mongos> sh.addShard(“shard2/192.168.70.128:27002,192.168.70.129:27002,192.168.70.130:27002”)
{
“shardAdded” : “shard2”, # 这里得对应的是shard2才行
“ok” : 1, # 注意,这里得是1才是成功
“$clusterTime” : {
“clusterTime” : Timestamp(1515608789, 6),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
},
“operationTime” : Timestamp(1515608789, 6)
}

串联shard3

mongos> sh.addShard(“shard3/192.168.70.128:27003,192.168.70.129:27003,192.168.70.130``:27003”)
{
“shardAdded” : “shard3”, # 这里得对应的是shard3才行
“ok” : 1, # 注意,这里得是1才是成功
“$clusterTime” : {
“clusterTime” : Timestamp(1515608789, 14),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
},
“operationTime” : Timestamp(1515608789, 14)
}
mongos>
使用sh.status()命令查询分片状态,要确认状态正常:

mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“5a55823348aee75ba3928fea”)
}
shards: # 成功的情况下,这里会列出分片信息和状态,state的值要为1
{ “_id” : “shard1”, “host” : “shard1/192.168.70.128:27001,192.168.77.130:27001”, “state” : 1 }
{ “_id” : “shard2”, “host” : “shard2/192.168.70.130:27002,192.168.77.134:27002”, “state” : 1 }
{ “_id” : “shard3”, “host” : “shard3/192.168.70.128:27003,192.168.77.134:27003”, “state” : 1 }
active mongoses:
“3.6.1” : 1
autosplit:
Currently enabled: yes # 成功的情况下,这里是yes
balancer:
Currently enabled: yes # 成功的情况下,这里是yes
Currently running: no # 没有创建库和表的情况下,这里是no,反之则得是yes
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “config”, “primary” : “config”, “partitioned” : true }
config.system.sessions
shard key: { “_id” : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ “_id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "_id"…maxKey” : 1 } } on : shard1 Timestamp(1, 0)

mongos>

mongodb分片测试

在这里插入图片描述
1.登录任意一台20000端口:

[root@localhost ~]# mongo --host 192.168.70.128 --port 20000

2.进入admin库,使用以下任意一条命令指定要分片的数据库:

db.runCommand({ enablesharding : “testdb”})
sh.enableSharding(“testdb”)

示例:

mongos> use admin
switched to db admin
mongos> sh.enableSharding(“testdb”)
{
“ok” : 1,
“$clusterTime” : {
“clusterTime” : Timestamp(1515609562, 6),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
},
“operationTime” : Timestamp(1515609562, 6)
}
mongos>
3.使用以下任意一条命令指定数据库里需要分片的集合和片键:

db.runCommand( { shardcollection : “testdb.table1”,key : {id: 1} } )
sh.shardCollection(“testdb.table1”,{“id”:1} )

示例:

mongos> sh.shardCollection(“testdb.table1”,{“id”:1} )
{
“collectionsharded” : “testdb.table1”,
“collectionUUID” : UUID(“f98762a6-8b2b-4ae5-9142-3d8acc589255”),
“ok” : 1,
“$clusterTime” : {
“clusterTime” : Timestamp(1515609671, 12),
“signature” : {
“hash” : BinData(0,“AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
},
“operationTime” : Timestamp(1515609671, 12)
}
mongos>
4.进入刚刚创建的testdb库里插入测试数据:

mongos> use testdb
switched to db testdb
mongos> for (var i = 1; i <= 10000; i++) db.table1.save({id:i,“test1”:“testval1”})
WriteResult({ “nInserted” : 1 })
mongos>
5.然后创建多几个库和集合:

mongos> sh.enableSharding(“db1”)
mongos> sh.shardCollection(“db1.table1”,{“id”:1} )
mongos> sh.enableSharding(“db2”)
mongos> sh.shardCollection(“db2.table1”,{“id”:1} )
mongos> sh.enableSharding(“db3”)
mongos> sh.shardCollection(“db3.table1”,{“id”:1} )

6.查看状态:

mongos> sh.status()
— Sharding Status —
sharding version: {
“_id” : 1,
“minCompatibleVersion” : 5,
“currentVersion” : 6,
“clusterId” : ObjectId(“5a55823348aee75ba3928fea”)
}
shards:
{ “_id” : “shard1”, “host” : “shard1/192.168.70.128:27001,192.168.70.129:27001”, “state” : 1 }
{ “_id” : “shard2”, “host” : “shard2/192.168.70.129:27002,192.168.70.130:27002”, “state” : 1 }
{ “_id” : “shard3”, “host” : “shard3/192.168.70.128:27003,192.168.70.130:27003”, “state” : 1 }
active mongoses:
“3.6.1” : 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ “_id” : “config”, “primary” : “config”, “partitioned” : true }
config.system.sessions
shard key: { “_id” : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ “_id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "_id"…maxKey” : 1 } } on : shard1 Timestamp(1, 0)
{ “_id” : “db1”, “primary” : “shard3”, “partitioned” : true }
db1.table1
shard key: { “id” : 1 }
unique: false
balancing: true
chunks:
shard3 1 # db1存储到了shard3中
{ “id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "id" …maxKey” : 1 } } on : shard3 Timestamp(1, 0)
{ “_id” : “db2”, “primary” : “shard1”, “partitioned” : true }
db2.table1
shard key: { “id” : 1 }
unique: false
balancing: true
chunks:
shard1 1 # db2存储到了shard1中
{ “id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "id" …maxKey” : 1 } } on : shard1 Timestamp(1, 0)
{ “_id” : “db3”, “primary” : “shard3”, “partitioned” : true }
db3.table1
shard key: { “id” : 1 }
unique: false
balancing: true
chunks:
shard3 1 # db3存储到了shard3中
{ “id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "id" …maxKey” : 1 } } on : shard3 Timestamp(1, 0)
{ “_id” : “testdb”, “primary” : “shard2”, “partitioned” : true }
testdb.table1
shard key: { “id” : 1 }
unique: false
balancing: true
chunks:
shard2 1 # testdb存储到了shard2中
{ “id” : { “KaTeX parse error: Expected 'EOF', got '}' at position 13: minKey" : 1 }̲ } -->> { "id" …maxKey” : 1 } } on : shard2 Timestamp(1, 0)

mongos>
如上,可以看到,刚刚创建的库都存储在了各个分片上,证明分片已经搭建成功。
使用以下命令可以查看某个集合的状态:
db.集合名称.stats()

Mongodb备份。恢复

在这里插入图片描述
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值