mongodb分片集群部署

mongodb分片集群部署

在这里插入图片描述

hostnamectl set-hostname mongodb01
hostnamectl set-hostname mongodb02
hostnamectl set-hostname mongodb03
三台服务器都执行:

ip1 mongodb01
ip2 mongodb02
ip3 mongodb03
mkdir -p /u01/app/mongodb/cluster/{config,mongos,shard1,shard2,shard3}/{data,logs}
yum install -y libcurl openssl xz-libs
tar -zxvf /u01/app/mongodb-linux-x86_64-rhel70-4.2.24.tgz -C /u01/app/mongodb --strip=1
echo "PATH=$PATH:/u01/app/mongodb/bin" > /etc/profile.d/mongodb.sh
source /etc/profile.d/mongodb.sh
mongo --version
groupadd mongod
useradd -g mongod mongod
chown -R mongod:mongod /u01/app/mongodb
yum install -y rsync

以下所有操作在第一个节点mongodb01执行

vi /u01/app/mongodb/mongod.conf

systemLog:
  destination: file
  logAppend: true
  path: /var/log/mongodb/mongod.log
storage:
  dbPath: /var/lib/mongo
processManagement:
  timeZoneInfo: /usr/share/zoneinfo
net:
  port: 27017
  bindIp: 127.0.0.1
    
realpath /u01/app/mongodb/cluster/* | xargs -I {} cp /u01/app/mongodb/mongod.conf {}
cat > /u01/app/mongodb/cluster/config/mongod.conf <<EOF
systemLog:
  destination: file
  logAppend: true
  path: /u01/app/mongodb/cluster/config/logs/mongod.log

# Where and how to store data.
storage:
  dbPath: /u01/app/mongodb/cluster/config/data
  journal:
    enabled: true

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /u01/app/mongodb/cluster/config/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27018
  bindIp: mongodb01

sharding:
  clusterRole: configsvr

replication:
  replSetName: config
EOF
cat > /u01/app/mongodb/cluster/shard1/mongod.conf <<EOF
systemLog:
  destination: file
  logAppend: true
  path: /u01/app/mongodb/cluster/shard1/logs/mongod.log

# Where and how to store data.
storage:
  dbPath: /u01/app/mongodb/cluster/shard1/data
  journal:
    enabled: true

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /u01/app/mongodb/cluster/shard1/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27019
  bindIp: mongodb01

sharding:
    clusterRole: shardsvr
    
replication:
    replSetName: shard1
EOF
cat > /u01/app/mongodb/cluster/shard2/mongod.conf <<EOF
systemLog:
  destination: file
  logAppend: true
  path: /u01/app/mongodb/cluster/shard2/logs/mongod.log

# Where and how to store data.
storage:
  dbPath: /u01/app/mongodb/cluster/shard2/data
  journal:
    enabled: true

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /u01/app/mongodb/cluster/shard2/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27020
  bindIp: mongodb01

sharding:
    clusterRole: shardsvr
    
replication:
    replSetName: shard2
EOF

cat > /u01/app/mongodb/cluster/shard3/mongod.conf <<EOF
systemLog:
  destination: file
  logAppend: true
  path: /u01/app/mongodb/cluster/shard3/logs/mongod.log

# Where and how to store data.
storage:
  dbPath: /u01/app/mongodb/cluster/shard3/data
  journal:
    enabled: true

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /u01/app/mongodb/cluster/shard3/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27021
  bindIp: mongodb01

sharding:
    clusterRole: shardsvr
    
replication:
    replSetName: shard3
EOF
cat > /u01/app/mongodb/cluster/mongos/mongod.conf <<EOF
systemLog:
  destination: file
  logAppend: true
  path: /u01/app/mongodb/cluster/mongos/logs/mongod.log

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /u01/app/mongodb/cluster/mongos/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27017
  bindIp: mongodb01

sharding:
  configDB: config/mongodb01:27018,mongodb02:27018,mongodb03:27018
EOF
rsync -avzp /u01/app/mongodb/cluster mongodb02:/u01/app/mongodb/
rsync -avzp /u01/app/mongodb/cluster mongodb03:/u01/app/mongodb/
grep -rl bindIp /u01/app/mongodb/cluster/ | xargs sed -i 's#bindIp: mongodb01#bindIp: mongodb02#g'
grep -rl bindIp /u01/app/mongodb/cluster/ | xargs sed -i 's#bindIp: mongodb01#bindIp: mongodb03#g'
grep -rn "bindIp: mongodb0" /u01/app/mongodb/cluster/

启动并初始化configserver

mongod -f /u01/app/mongodb/cluster/config/mongod.conf
mongo --host mongodb01 --port 27018
rs.initiate(
  {
    _id: "config",
    configsvr: true,
    members: [
      { _id : 0, host : "mongodb01:27018" },
      { _id : 1, host : "mongodb02:27018" },
      { _id : 2, host : "mongodb03:27018" }
    ]
  }
)
rs.status()

启动并初始化shard分片

mongod -f /u01/app/mongodb/cluster/shard1/mongod.conf
mongod -f /u01/app/mongodb/cluster/shard2/mongod.conf
mongod -f /u01/app/mongodb/cluster/shard3/mongod.conf
[root@mongodb01 ~]# mongo --host mongodb01 --port 27019


rs.initiate(
  {
    _id: "shard1",
    members: [
      { _id : 0, host : "mongodb01:27019" },
      { _id : 1, host : "mongodb02:27019" },
      { _id : 2, host : "mongodb03:27019" }
    ]
  }
)

[root@mongodb01 ~]# mongo --host mongodb01 --port 27020


rs.initiate(
  {
    _id: "shard2",
    members: [
      { _id : 0, host : "mongodb02:27020" },
      { _id : 1, host : "mongodb01:27020" },
      { _id : 2, host : "mongodb03:27020" }
    ]
  }
)

[root@mongodb01 ~]# mongo --host mongodb01 --port 27021


rs.initiate(
  {
    _id: "shard3",
    members: [
      { _id : 0, host : "mongodb03:27021" },
      { _id : 1, host : "mongodb02:27021" },
      { _id : 2, host : "mongodb01:27021" }
    ]
  }
)
rs.status()

启动并初始化mongos

mongos -f /u01/app/mongodb/cluster/mongos/mongod.conf
[root@mongodb01 ~]# mongo --host mongodb01 --port 27017
sh.addShard( "shard1/mongodb01:27019,mongodb02:27019,mongodb03:27019")
sh.addShard( "shard2/mongodb01:27020,mongodb02:27020,mongodb03:27020")
sh.addShard( "shard3/mongodb01:27021,mongodb02:27021,mongodb03:27021")
sh.status()
[root@mongodb01 ~]# ps -ef | grep mongod | grep -v grep
root      37847      1  5 17:53 ?        00:01:43 mongod -f /opt/mongodb/cluster/config/mongod.conf
root      37993      1  3 18:01 ?        00:00:49 mongod -f /opt/mongodb/cluster/shard1/mongod.conf
root      38036      1  3 18:01 ?        00:00:48 mongod -f /opt/mongodb/cluster/shard2/mongod.conf
root      38079      1  3 18:01 ?        00:00:50 mongod -f /opt/mongodb/cluster/shard3/mongod.conf
root      38329      1  0 18:13 ?        00:00:06 mongos -f /opt/mongodb/cluster/mongos/mongod.conf

至此分片集群部署完成,接下来执行数据分片操作。

为数据库启用分片

在分片集合之前,必须为集合的数据库启用分片,连接任意一台mongos节点shell,以mongodb01节点为例。

mongo --host mongodb01 --port 27017
use testdb;

为集合启用分片

sh.enableSharding("testdb")
sh.shardCollection("testdb.order", {"_id": "hashed" })
use testdb

for (i = 1; i <= 1000; i=i+1){
    db.order.insert({'price': 1})
}
db.order.find().count()
[root@mongodb01 ~]# mongo --host mongodb01 --port 27019

shard1:PRIMARY> use testdb
switched to db testdb
shard1:PRIMARY> db.order.find().count()
324

[root@mongodb01 ~]# mongo --host mongodb01 --port 27020

shard2:PRIMARY> use testdb
switched to db testdb
shard2:PRIMARY> db.order.find().count()
329

[root@mongodb01 ~]# mongo --host mongodb01 --port 27021

shard3:PRIMARY> use testdb
switched to db testdb
shard3:PRIMARY> db.order.find().count()
347
mongod -f /u01/app/mongodb/cluster/config/mongod.conf
mongod -f /u01/app/mongodb/cluster/shard1/mongod.conf
mongod -f /u01/app/mongodb/cluster/shard2/mongod.conf
mongod -f /u01/app/mongodb/cluster/shard3/mongod.conf
  mongos -f /u01/app/mongodb/cluster/mongos/mongod.conf

mongod --shutdown --dbpath /u01/app/mongodb/cluster/mongos/data
mongod --shutdown --dbpath /u01/app/mongodb/cluster/shard3/data
mongod --shutdown --dbpath /u01/app/mongodb/cluster/shard2/data
mongod --shutdown --dbpath /u01/app/mongodb/cluster/shard1/data
mongod --shutdown --dbpath /u01/app/mongodb/cluster/config/data
mongod --dbpath /u01/app/mongodb/cluster/shard1/data --repair

在主节点执行

#查看副本集各节点信息
rs.status()
#主从切换
#查看节点配置:rs.config()
var cfg = rs.conf()
cfg.members[3].priority = 10
rs.reconfig(cfg)
PS:也可以通过rs.stepDown()
这个命令会让primary降级为Secondary节点,并维持60s,如果这段时间内没有新的primary被选举出来,这个节点可以要求重新进行选举
剔除的节点为SECONDARY或ARBITER,可以执行通过rs.remove() 命令剔除对应节点,如为PRIMARY节点,则先进行主从切换,再剔除。
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值