MongoDB 3.4 副本集 搭建 + 备份恢复
mongodb 3.4 数据下载地址:https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.4.4.tgz
安装准备 (三台机器)
192.168.0.188:27017(primary) 主节点
port=27017 #端口
dbpath= /usr/src/node1/data#数据文件存放目录
logpath= /usr/src/node1/mongodb.log #日志文件存放目录
logappend=true #使用追加的方式写日志
fork=true #以守护程序的方式启用,即在后台运行
maxConns=500 #最大同时连接数
bind_ip=0.0.0.0 #只允许通过本机访问
noauth=true #不启用验证
#auth=true #启用验证
replSet=test # 副本集名称
oplogSize=200 # 200M
keyFile = /usr/src/mongodb.key
192.168.0.188:27018(secondary) 从节点
port=27018 #端口
dbpath= /usr/src/node2/data#数据文件存放目录
logpath= /usr/src/node2/mongodb.log #日志文件存放目录
logappend=true #使用追加的方式写日志
fork=true #以守护程序的方式启用,即在后台运行
maxConns=500 #最大同时连接数
bind_ip=0.0.0.0 #只允许通过本机访问
#noauth=true #不启用验证
#auth=true #启用验证
replSet=test # 副本集名称
oplogSize=200 # 200M
keyFile = /usr/src/mongodb.key
192.168.0.188:27019(secondary)
port=27019 #端口
dbpath= /usr/src/node3/data#数据文件存放目录
logpath= /usr/src/node3/mongodb.log #日志文件存放目录
logappend=true #使用追加的方式写日志
fork=true #以守护程序的方式启用,即在后台运行
maxConns=500 #最大同时连接数
bind_ip=0.0.0.0 #只允许通过本机访问
#noauth=true #不启用验证
#auth=true #启用验证
replSet=test # 副本集名称
oplogSize=200 # 200M
keyFile = /usr/src/mongodb.key
进入mongodb配置副本集
mongod --config /usr/src/node1/mongodb.conf #启动主节点
连接主,在主上运行命令mongo
#创建认证的用户名admin跟密码123456
# db.createUser({user:"admin",pwd:"123456", roles:[{role:"root",db:"admin"}]});
>use admin
>db.createUser(
... {
... user:"admin",
... pwd:"123456",
... roles:[{role:"root",db:"admin"}]
... }
... );
#配置认证以后需要打开主节点的配置文件选择启动验证
#然后配置一个密钥
root@ubuntu:openssl rand -base64 1024 /usr/src/mongodb.key
root@ubuntu:chmod 600 /usr/src/mongodb.key
#然后开启全部节点
mongodb --config /usr/src/node1/mongodb.conf
mongodb --config /usr/src/node2/mongodb.conf
mongodb --config /usr/src/node3/mongodb.conf
#进入主节点,验证密码
>use admin
>db.auth('admin','123456')
1
#然后配置副本集 priority 越大 成为 主的权重越大
> config={_id:"test",members:[{_id:0,host:"192.168.0.188:27017,priority:10"},{_id:1,host:"192.168.0.188:27018,priority:11"},{_id:2,host:"192.168.0.188:27019,priority:12"}]}
>rs.initiate(config) #初始化配置
rs.status() //查看状态
{
"members" : [
{
"_id" : 0,
"name" : "192.168.0.188:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 9807,
},
{
"_id" : 1,
"name" : "192.168.0.188:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 9715,
},
{
"_id" : 2,
"name" : "192.168.0.188:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 9711,
}
],
"ok" : 1
}
cfg = rs.conf()
# 配置延迟备份节点
cfg.members[2].priority = 0
cfg.members[2].hidden = true
cfg.members[2].slaveDelay = 3600 #延迟多少秒
#重新配置使生效
rs.reconfig(cfg)
这样一来 副本集就搭建好了 然后做一下测试 (主备自动切换)
#首先我们停掉主节点
root 10049 0.6 0.5 739284 93844 ? Sl 12:10 1:13 mongod --config /usr/src/node1/mongodb.conf
root 10137 0.6 0.6 743792 100336 ? Sl 12:10 1:12 mongod --config /usr/src/node2/mongodb.conf
root 10221 0.5 0.3 545516 61560 ? Sl 12:10 1:03 mongod --config /usr/src/node3/mongodb.conf
kill -9 10049
#停掉主节点,然后启动从节点
mongo 192.168.0.188:27018
{
"_id" : 0,
"name" : "192.168.0.188:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
},
{
"_id" : 1,
"name" : "192.168.0.188:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", # 从节点已经代替主节点
"uptime" : 11151,
},
{
"_id" : 2,
"name" : "192.168.0.188:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 11140,
}
# 从节点已经代替主节点,那在启动主节点看看有没有自动切换
rs.status()
{
"_id" : 0,
"name" : "192.168.0.188:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY", #已经切换主节点
"uptime" : 105,
},
{
"_id" : 1,
"name" : "192.168.0.188:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 104,
},
{
"_id" : 2,
"name" : "192.168.0.188:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
}
可以看到 已经切换到主节点了
然后我们做一次备份还原数据的测试
创建两个测试表,一个test,一个tb1:
db.test.save({name:"apple"});
for(var i = 1; i <=100; i++) {db.tb1.insert({name:"hank"+i});}
再备份一次数据库
mongodump -h 192.168.0.188 -o /root/backcup/
进入数据库做一些操作
db.test.drop();
db.tb1.remove({});
db.tb1.insert({name:"xxxxxxxxxxxxxxxxxxxx"});
现在,我们要恢复到操作之前,怎么做呢,第一个就是用我们刚才全备直接restore回去,另外如果想恢复到任何一个点的话,那么就需要oplog
导出oplog
mongodump -h 192.168.0.188 -d local -c oplog.rs -o root/backup/
2017-05-19T15:26:15.686+0800 writing local.oplog.rs to
2017-05-19T15:26:15.748+0800 done dumping local.oplog.rs (10458 documents)
使用bsondump 查看oplog日志
root@ubuntu:~/backup/local# bsondump oplog.rs.bson |grep drop 对drop来进行过滤
{"ts":{"$timestamp":{"t":1495179002,"i":1}},"t":{"$numberLong":"15"},"h":{"$numberLong":"2437177398902103200"},"v":2,"op":"c","ns":"test.$cmd","o":{"drop":"test"}}
2017-05-19T15:27:36.202+0800 10458 objects found
找到这条记录以后 我们记住这个时间戳:{"t":1495179002,"i":1}} 然后我们使用mongorestore
mongorestore -h 192.168.0.188 --oplogReplay --oplogLimit "1495179002:1" /root/backup/
2017-05-19T15:32:30.821+0800 no indexes to restore
2017-05-19T15:32:30.821+0800 finished restoring b_tuxi_logs.users (14 documents)
2017-05-19T15:32:30.863+0800 error: E11000 duplicate key error collection: tuxi2.pending index: _id_ dup key: { : ObjectId('591e540da26f3b893f77bac6') }
2017-05-19T15:32:30.863+0800 no indexes to restore
2017-05-19T15:32:30.863+0800 finished restoring tuxi2.pending (1 document)
2017-05-19T15:32:30.863+0800 no indexes to restore
2017-05-19T15:32:30.863+0800 finished restoring test.test (1 document)
2017-05-19T15:32:30.863+0800 no indexes to restore
2017-05-19T15:32:30.863+0800 finished restoring a.b (1 document)
2017-05-19T15:32:30.863+0800 restoring users from /root/backup/admin/system.users.bson
2017-05-19T15:32:31.458+0800 replaying oplog
2017-05-19T15:32:32.684+0800 oplog 577KB
2017-05-19T15:32:34.263+0800 oplog 2.03MB
2017-05-19T15:32:34.263+0800 done
a:PRIMARY> db.tb1.find()
{ "_id" : ObjectId("591e9f03c7fc4390ee9a9fa8"), "name" : "xxxxxxxxxxxxxxxxxxxx" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b3"), "name" : "hank1" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b4"), "name" : "hank2" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b5"), "name" : "hank3" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b6"), "name" : "hank4" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b7"), "name" : "hank5" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b8"), "name" : "hank6" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403b9"), "name" : "hank7" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403ba"), "name" : "hank8" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403bb"), "name" : "hank9" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403bc"), "name" : "hank10" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403bd"), "name" : "hank11" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403be"), "name" : "hank12" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403bf"), "name" : "hank13" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c0"), "name" : "hank14" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c1"), "name" : "hank15" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c2"), "name" : "hank16" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c3"), "name" : "hank17" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c4"), "name" : "hank18" }
{ "_id" : ObjectId("591e9da0f7a418c28d9403c5"), "name" : "hank19" }
Type "it" for more
可以看到 数据都已经恢复完全
结尾
mongodb的备份恢复 只要有oplog日志,并且根据日志的时间戳就可以恢复到任意一节点。并且因为oplog是一个增长的过程。所以需要根据业务情况来合理的设置oplog的日志大小。