mongo分片集群部署

应用安装
# yum 安装服务
cat > /etc/yum.repos.d/mongodb-org-4.4.repo << EOF
[mongodb-org-4.4]
name=MongoDB Repository
baseurl=https://mirrors.tuna.tsinghua.edu.cn/mongodb/yum/el8-4.4/
gpgcheck=0
enabled=1

EOF
yum install -y mongodb-org -y
副本集群信息
ip部署服务端口备注
172.29.203.98Mongos/config/mongod28017/28018/28019
172.29.203.74Mongos/config/mongod28017/28018/28019
172.29.203.99Mongos/config/mongod28017/28018/28019

分片集群只部署了一个分片,正常线上集群应该配置多个分片。

服务部署
目录初始化
# 创建服务目录
mkdir -p /opt/app/mongo/{28017,28018,28019}/{logs,data,conf,run}
mongod服务配置文件

端口28019

# 创建mongod的服务配置文件
ip_list=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
for ip in $ip_list
do

cat > /opt/app/mongo/28019/conf/mongod.yaml << EOF
---
systemLog:
  verbosity: 1
  quiet: false
  path: /opt/app/mongo/28019/logs/mongod.log
  destination: file
  timeStampFormat: iso8601-local

processManagement:
  fork: true
  pidFilePath: /opt/app/mongo/28019/run/mongod.pid

cloud:
  monitoring:
    free:
      state: runtime
net:
  port: 28019
  bindIp: $ip,/opt/app/mongo/28019/run/mongod.socket
  maxIncomingConnections: 65536
  wireObjectCheck: true
  compression:
    compressors: snappy,zstd,zlib
  serviceExecutor: synchronous

storage:
  dbPath: /opt/app/mongo/28019/data
  engine: wiredTiger
  oplogMinRetentionHours: 48
  wiredTiger:
    engineConfig:
      cacheSizeGB: 1
      journalCompressor: snappy
      directoryForIndexes: true

operationProfiling:
  mode: slowOp
  slowOpThresholdMs: 100
  slowOpSampleRate: 1.0

replication:
  oplogSizeMB: 20480
  replSetName: rs-1

sharding:
  clusterRole: shardsvr

EOF

done

# 启动服务
mongod -f  /opt/app/mongo/28019/conf/mongod.yaml
副本初始化

登录任意节点对副本进行初始化操作

# 登录任意节点
mongo --port 28019 --host  172.29.203.99
# 初始化
rs.initiate()
# 加入其他两个节点
rs.add('172.29.203.98:28019')
rs.add('172.29.203.74:28019')
# 查看副本状态
rs.status()
Config服务配置文件

端口28018

# 创建mongod的服务配置文件
ip_list=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
for ip in $ip_list
do

cat > /opt/app/mongo/28018/conf/mongod.yaml << EOF
---
systemLog:
  verbosity: 1
  quiet: false
  path: /opt/app/mongo/28018/logs/mongod.log
  destination: file
  timeStampFormat: iso8601-local

processManagement:
  fork: true
  pidFilePath: /opt/app/mongo/28018/run/mongod.pid

cloud:
  monitoring:
    free:
      state: runtime
net:
  port: 28018
  bindIp: $ip,/opt/app/mongo/28018/run/mongod.socket
  maxIncomingConnections: 65536
  wireObjectCheck: true
  compression:
    compressors: snappy,zstd,zlib
  serviceExecutor: synchronous

storage:
  dbPath: /opt/app/mongo/28018/data
  engine: wiredTiger
  oplogMinRetentionHours: 48
  wiredTiger:
    engineConfig:
      cacheSizeGB: 1
      journalCompressor: snappy
      directoryForIndexes: true

operationProfiling:
  mode: slowOp
  slowOpThresholdMs: 100
  slowOpSampleRate: 1.0

replication:
  oplogSizeMB: 20480
  replSetName: cf-1
sharding:
  clusterRole: configsvr

EOF

done

# 启动服务
mongod -f  /opt/app/mongo/28018/conf/mongod.yaml
副本初始化
# 登录任意节点
mongo --port 28018 --host  172.29.203.99
# 初始化
rs.initiate()
# 加入其他两个节点
rs.add('172.29.203.98:28018')
rs.add('172.29.203.74:28018')
# 查看副本状态
rs.status()
Mongos服务部署
ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`


cat > /opt/app/mongo/28017/conf/mongos.yaml << EOF

---
systemLog:
  verbosity: 1
  quiet: false
  path: /opt/app/mongo/28017/logs/mongos.log
  destination: file
  timeStampFormat: iso8601-local

processManagement:
  fork: true
  pidFilePath: /opt/app/mongo/28017/run/mongos.pid

net:
  port: 28017
  bindIp: $ip,/opt/app/mongo/28017/run/mongos.socket
  maxIncomingConnections: 65536
  wireObjectCheck: true
  compression:
    compressors: snappy,zstd,zlib
  serviceExecutor: synchronous

sharding:
  configDB: cf-1/172.29.203.98:28018,172.29.203.74:28018,172.29.203.99:28018

EOF

# 启动服务
mongos -f  /opt/app/mongo/28017/conf/mongos.yaml
添加分片
# 登录任意节点
mongo --port 28017 --host  172.29.203.99
# 将碎片添加到集群中
sh.addShard("rs-1/172.29.203.98:28019,172.29.203.99:28019,172.29.203.74:28019")
# 查看分片状态
sh.status()
分片配置
# 开启 test 库的分片功能。
sh.enableSharding("test")
# 创建分片规则
sh.shardCollection('test.test',{name:1})
# 批量造测试数据。
use test
for (i = 1; i <= 200000; i++) db.test_shard.insert({age:(i%100), name:"user"+i, create_at:new Date()})
分片集群操作
删除分片

要删除一个 shard ,必须确定这个分片的数据已经被迁移到了集群中的其他分片中.这篇教程描述了如何安全地迁移数据和删除分片.

为了使得数据迁移能够成功, balancer 必须 是开启的.在 mongo 终端中使用 sh.getBalancerState() 确定这一点

# 查看集群存在的分片
db.adminCommand( { listShards: 1 } )
# 从分片中迁移数据
use admin
# 例如我们有一个名为rs-3的副本集
db.runCommand( { removeShard: "rs-3" } )

添加分片
# 在mongos中操作加入新的分片
sh.addShard("rs-3/172.29.203.58:28018")
# 注意:加入的分片中不能有集群中已存在的数据库。

其他命令
# 开启关闭balancer
sh.setBalancerState(true|false)
# 获取balancer
sh.getBalancerState()
# 查看是否正在有数据的迁移
sh.isBalancerRunning()
# 修改balance 窗口的时间,当你设置了activeWindow,就不能用sh.startBalancer() 启动balance
db.settings.update(
   { _id: "balancer" },
   { $set: { activeWindow : { start : "<start-time>", stop : "<stop-time>" } } },
   { upsert: true }
)
# 删除balance 窗口
use config
db.settings.update({ _id : "balancer" }, { $unset : { activeWindow : true } })
# 确认数据迁移是否完成
use config
while( sh.isBalancerRunning() ) {
          print("waiting...");
          sleep(1000);
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值