mongodb4.4集群搭建

环境

CentOS7、MongoDB-4.4.13

端口/角色mongosconfigsvrshard1shard2shard3
192.168.56.155270172701929017(主节点)29018(仲裁节点)29019(副节点)
192.168.56.156270172701929017(副节点)29018(主节点)29019(仲裁节点)
192.168.56.157270172701929017(仲裁节点)29018(副节点)29019(主节点)

配置文件

  1. 创建目录(三台服务器执行相同操作)
mkdir -p ./mongodb/{data,logs,apps,run}
mkdir -p ./mongodb/data/shard{1,2,3}
mkdir -p ./mongodb/data/config
mkdir -p ./mongodb/conf
mkdir -p /tmp/mongodb/configsvr
mkdir -p /tmp/mongodb/shard1
mkdir -p /tmp/mongodb/shard2
mkdir -p /tmp/mongodb/shard3
mkdir -p /tmp/mongodb/mongos
  • Keyfile Security
    • Enforce Keyfile Access Control on Existing Replica Set. Create a keyfile:
openssl rand -base64 756 > /home/bigdata/env/mongodb/conf/mongodb.key
chmod 400 /home/bigdata/env/mongodb/conf/mongodb.key
  1. 创建配置文件(三台服务器执行相同操作)
    创建configsvr.yml配置文件:
vim ./mongodb/conf/configsvr.yml

内容如下:

systemLog:
  destination: "file"
  logAppend: true
  path: "/home/bigdata/env/mongodb/logs/configsvr.log"
storage:
  dbPath: "/home/bigdata/env/mongodb/data/config"
  journal:
    enabled: true
  engine: "wiredTiger"
  wiredTiger:
    engineConfig:
      directoryForIndexes: true
      cacheSizeGB: 2
processManagement:
  fork: true
  pidFilePath: "/home/bigdata/env/mongodb/run/configsvr.pid"
  timeZoneInfo: "/usr/share/zoneinfo"
net:
  port: 27019
  bindIp: "::,0.0.0.0"
  maxIncomingConnections: 5000
  unixDomainSocket:
    enabled: true
    pathPrefix: "/tmp/mongodb/configsvr"
    filePermissions: 0700
setParameter:
  enableLocalhostAuthBypass: true
security:
  clusterAuthMode: "keyFile"
  keyFile: "/home/bigdata/env/mongodb/conf/mongodb.key"
  authorization: "enabled"
replication:
  replSetName: "replCluster"
sharding:
  clusterRole: "configsvr"

创建shard配置文件:

vim ./mongodb/conf/shard1.yml

内容如下:

systemLog:
  destination: "file"
  logAppend: true
  path: "/home/bigdata/env/mongodb/logs/shard1.log"
storage:
  dbPath: "/home/bigdata/env/mongodb/data/shard1"
  journal:
    enabled: true
  engine: "wiredTiger"
  wiredTiger:
    engineConfig:
      directoryForIndexes: true
      cacheSizeGB: 2
processManagement:
  fork: true
  pidFilePath: "/home/bigdata/env/mongodb/run/shard1.pid"
  timeZoneInfo: "/usr/share/zoneinfo"
net:
  port: 29017
  bindIp: "::,0.0.0.0"
  maxIncomingConnections: 5000
  unixDomainSocket:
    enabled: true
    pathPrefix: "/tmp/mongodb/shard1"
    filePermissions: 0700
setParameter:
  enableLocalhostAuthBypass: true
security:
  clusterAuthMode: "keyFile"
  keyFile: "/home/bigdata/env/mongodb/conf/mongodb.key"
  authorization: "enabled"
replication:
  replSetName: "shard1"
sharding:
  clusterRole: "shardsvr"
vim ./mongodb/conf/shard2.yml

内容如下:

systemLog:
  destination: "file"
  logAppend: true
  path: "/home/bigdata/env/mongodb/logs/shard2.log"
storage:
  dbPath: "/home/bigdata/env/mongodb/data/shard2"
  journal:
    enabled: true
  engine: "wiredTiger"
  wiredTiger:
    engineConfig:
      directoryForIndexes: true
      cacheSizeGB: 2
processManagement:
  fork: true
  pidFilePath: "/home/bigdata/env/mongodb/run/shard2.pid"
  timeZoneInfo: "/usr/share/zoneinfo"
net:
  port: 29018
  bindIp: "::,0.0.0.0"
  maxIncomingConnections: 5000
  unixDomainSocket:
    enabled: true
    pathPrefix: "/tmp/mongodb/shard2"
    filePermissions: 0700
setParameter:
  enableLocalhostAuthBypass: true
security:
  clusterAuthMode: "keyFile"
  keyFile: "/home/bigdata/env/mongodb/conf/mongodb.key"
  authorization: "enabled"
replication:
  replSetName: "shard2"
sharding:
  clusterRole: "shardsvr"
vim ./mongodb/conf/shard3.yml

内容如下:

systemLog:
  destination: "file"
  logAppend: true
  path: "/home/bigdata/env/mongodb/logs/shard3.log"
storage:
  dbPath: "/home/bigdata/env/mongodb/data/shard3"
  journal:
    enabled: true
  engine: "wiredTiger"
  wiredTiger:
    engineConfig:
      directoryForIndexes: true
      cacheSizeGB: 2
processManagement:
  fork: true
  pidFilePath: "/home/bigdata/env/mongodb/run/shard3.pid"
  timeZoneInfo: "/usr/share/zoneinfo"
net:
  port: 29019
  bindIp: "::,0.0.0.0"
  maxIncomingConnections: 5000
  unixDomainSocket:
    enabled: true
    pathPrefix: "/tmp/mongodb/shard3"
    filePermissions: 0700
setParameter:
  enableLocalhostAuthBypass: true
security:
  clusterAuthMode: "keyFile"
  keyFile: "/home/bigdata/env/mongodb/conf/mongodb.key"
  authorization: "enabled"
replication:
  replSetName: "shard3"
sharding:
  clusterRole: "shardsvr"

创建mongos.yml配置文件:

vim ./mongodb/conf/mongos.yml

内容如下:

systemLog:
  destination: "file"
  logAppend: true
  path: "/home/bigdata/env/mongodb/logs/mongos.log"
processManagement:
  fork: true
  pidFilePath: "/home/bigdata/env/mongodb/run/mongos.pid"
  timeZoneInfo: "/usr/share/zoneinfo"
net:
  port: 27017
  bindIp: "::,0.0.0.0"
  maxIncomingConnections: 5000
  unixDomainSocket:
    enabled: true
    pathPrefix: "/tmp/mongodb/mongos"
    filePermissions: 0700
setParameter:
  enableLocalhostAuthBypass: true
security:
  clusterAuthMode: "keyFile"
  keyFile: "/home/bigdata/env/mongodb/conf/mongodb.key"
replication:
  localPingThresholdMs: 15
sharding:
  # 出处的replCluster与configsvr.yml中replication.replSetName配置的名称保持一致
  configDB: "replCluster/192.168.56.155:27019,192.168.56.156:27019,192.168.56.157:27019"
  1. 启动configsvr服务(三台服务器执行相同操作)
./bin/mongod --config ./conf/configsvr.yml
ps -ef | grep mongod
  1. 连接一台服务器进行初始化
mongo localhost:27019
rs.initiate({_id: "replCluster", configsvr: true, members: [{_id: 0, host: "192.168.56.155:27019"},{_id: 1,host: "192.168.56.156:27019"},{_id: 2,host: "192.168.56.157:27019"}]})

rs.status()
  1. 部署shard1分片集群,启动shard1实例(三台服务器执行相同操作)
./bin/mongod --config ./conf/shard1.yml

netstat -ntlp | grep 29017
  1. 连接一台服务器进行初始化
./bin/mongo localhost:29017

rs.initiate({_id:"shard1",members:[{_id: 0, host: "192.168.56.155:29017"},{_id: 1,host: "192.168.56.156:29017"},{_id: 2,host: "192.168.56.157:29017"}]})

rs.status()
  1. 部署shard2分片集群,启动shard2实例(三台服务器执行相同操作)
./bin/mongod --config ./conf/shard2.yml

netstat -ntlp | grep 29018
  1. 连接一台服务器进行初始化
./bin/mongo localhost:29018

rs.initiate({_id:"shard2",members:[{_id: 0, host: "192.168.56.155:29018"},{_id: 1,host: "192.168.56.156:29018"},{_id: 2,host: "192.168.56.157:29018"}]})

rs.status()
  1. 部署shard3分片集群,启动shard3实例(三台服务器执行相同操作)
./bin/mongod --config ./conf/shard3.yml

netstat -ntlp | grep 29019
  1. 连接一台服务器进行初始化
./bin/mongo localhost:29019

rs.initiate({_id:"shard3",members:[{_id: 0, host: "192.168.56.155:29019"},{_id: 1,host: "192.168.56.156:29019"},{_id: 2,host: "192.168.56.157:29019"}]})

rs.status()
  1. 初始化mongos服务
[bigdata@server1 mongodb]$ ./bin/mongos --config ./conf/mongos.yml

[bigdata@server1 mongodb]$ mongo localhost:27017
MongoDB shell version v4.4.13
connecting to: mongodb://localhost:27017/test?compressors=disabled&gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("cba0b6c8-9d2b-4e92-a003-ce9918223a3e") }
MongoDB server version: 4.4.13

mongos> admin = db.getSiblingDB("admin");
admin

# 1.Create the user administrator.
mongos> admin.createUser({user:"bigdata",pwd:passwordPrompt(),roles:[{role:"userAdminAnyDatabase",db:"admin"}]});
Enter password: 
Successfully added user: {
        "user" : "bigdata",
        "roles" : [
                {
                        "role" : "userAdminAnyDatabase",
                        "db" : "admin"
                }
        ]
}

# 2.Authenticate as the user administrator.
mongos> db.getSiblingDB("admin").auth("bigdata", passwordPrompt());
Enter password: 
1

mongos> db.getSiblingDB("admin").auth("bigdata", passwordPrompt());
Enter password: 
1

# 3.Create Administrative User for Cluster Management
mongos> db.getSiblingDB("admin").createUser({"user":"root","pwd":passwordPrompt(),roles:[{"role":"clusterAdmin","db":"admin"}]});
Enter password: 
Successfully added user: {
        "user" : "root",
        "roles" : [
                {
                        "role" : "clusterAdmin",
                        "db" : "admin"
                }
        ]
}

# 4.根据自己的需求在创建普通用户(不建议创建root权限的用户操作数据库,此处为了操作方便),例如,read或者readWrite权限的用户
mongos> db.getSiblingDB("admin").createUser({"user":"rootR","pwd":passwordPrompt(),roles:[{"role":"root","db":"admin"}]});
Enter password: 
Successfully added user: {
        "user" : "rootR",
        "roles" : [
                {
                        "role" : "root",
                        "db" : "admin"
                }
        ]
}

# 注意:以下都必须以集群管理员登录进行操作
# 5.添加分片到集群
[bigdata@server1 mongodb]$ mongo --host 192.168.56.155 --port 27017 -u "root" -p --authenticationDatabase "admin"
MongoDB shell version v4.4.13
Enter password: 
connecting to: mongodb://192.168.56.155:27017/?authSource=admin&compressors=disabled&gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("bd854ccc-f760-4af6-a712-509dd81585ae") }
MongoDB server version: 4.4.13
mongos> sh.addShard("shard1/192.168.56.155:29017,192.168.56.156:29017,192.168.56.157:29017");
{
        "shardAdded" : "shard1",
        "ok" : 1,
        "operationTime" : Timestamp(1650002796, 1),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1650002796, 2),
                "signature" : {
                        "hash" : BinData(0,"6je1jHemj/dE0M2sPxG3xi3PjSY="),
                        "keyId" : NumberLong("7086653264820699158")
                }
        }
}
mongos> sh.addShard("shard2/192.168.56.155:29018,192.168.56.156:29018,192.168.56.157:29018");
{
        "shardAdded" : "shard2",
        "ok" : 1,
        "operationTime" : Timestamp(1650002815, 4),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1650002816, 1),
                "signature" : {
                        "hash" : BinData(0,"OiHUYO1fi84mfeULtyBwoANFWDg="),
                        "keyId" : NumberLong("7086653264820699158")
                }
        }
}
mongos> sh.addShard("shard3/192.168.56.155:29019,192.168.56.156:29019,192.168.56.157:29019");
{
        "shardAdded" : "shard3",
        "ok" : 1,
        "operationTime" : Timestamp(1650002830, 3),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1650002830, 3),
                "signature" : {
                        "hash" : BinData(0,"jNnq1pEVcof6f2I5kWkI1XwiO14="),
                        "keyId" : NumberLong("7086653264820699158")
                }
        }
}
mongos> sh.status();
...
mongos> use test;
switched to db test

# 6.Enable Sharding for a Database
mongos> sh.enableSharding("test");
{
        "ok" : 1,
        "operationTime" : Timestamp(1650003031, 8),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1650003031, 8),
                "signature" : {
                        "hash" : BinData(0,"iRupWfEEf1ZvYRwlXHeJRKydcHI="),
                        "keyId" : NumberLong("7086653264820699158")
                }
        }
}

# 7.Shard a Collection
mongos> sh.shardCollection("test.user",{"id":1});
{
        "collectionsharded" : "test.user",
        "collectionUUID" : UUID("fe314f66-2398-4717-9c87-25c31b03ed30"),
        "ok" : 1,
        "operationTime" : Timestamp(1650003207, 15),
        "$clusterTime" : {
                "clusterTime" : Timestamp(1650003207, 15),
                "signature" : {
                        "hash" : BinData(0,"o022Wlj2gD+KHDc5m+VYv7CnL2U="),
                        "keyId" : NumberLong("7086653264820699158")
                }
        }
}
  1. 集群启动脚本cluster.sh
vim ./mongodb/cluster.sh

#!/bin/sh

hosts=( server1 server2 server3 )

mongo_start(){
  source ~/.bashrc
  pre_mongod=`ps -ef | grep mongod | grep -v grep`
  pre_mongos=`ps -ef | grep mongos | grep -v grep`
  if [[ !$pre_mongod ]] && [[ !$pre_mongos ]]; then
    for host in ${hosts[@]}
    do
      echo "******** $host configsvr start... ********"
      ssh $host "mongod --config /home/bigdata/env/mongodb/conf/configsvr.yml"
    done
    sleep 2

    for host in ${hosts[@]}
    do
      echo "********** $host shard start... **********"
      ssh $host "mongod --config /home/bigdata/env/mongodb/conf/shard1.yml"
      ssh $host "mongod --config /home/bigdata/env/mongodb/conf/shard2.yml"
      ssh $host "mongod --config /home/bigdata/env/mongodb/conf/shard3.yml"
    done
    sleep 2

    for host in ${hosts[@]}
    do
      echo "********** $host mongos start... **********"
      ssh $host "mongos --config /home/bigdata/env/mongodb/conf/mongos.yml"
    done
  else
    echo "check whether the process has stopped!"
  fi
}

mongo_stop(){
  source ~/.bashrc
  for host in ${hosts[@]}
  do
    echo "************ $host mongos stop... ***********"
    ssh $host "cat /home/bigdata/env/mongodb/run/mongos.pid | xargs kill -15 "
  done
  sleep 3

  for host in ${hosts[@]}
  do
    echo "************ $host shard stop... ************"
    ssh $host "cat /home/bigdata/env/mongodb/run/shard1.pid | xargs kill -15 "
    ssh $host "cat /home/bigdata/env/mongodb/run/shard2.pid | xargs kill -15 "
    ssh $host "cat /home/bigdata/env/mongodb/run/shard3.pid | xargs kill -15 "
  done
  sleep 3

  for host in ${hosts[@]}
  do
    echo "********** $host configsvr stop... **********"
    ssh $host "cat /home/bigdata/env/mongodb/run/configsvr.pid | xargs kill -15 "
  done
}

mongo_status(){
  source ~/.bashrc
  for host in ${hosts[@]}
  do
    echo "*************** $host status ***************"
    ssh $host "ps -ef | grep mongo* | grep -v grep "
  done
}

case $1 in
  start)
    echo "************** start mongodb ***************"
    mongo_start
    echo "************** start finish ****************"
  ;;
  stop)
    echo "************** stop mongodb ****************"
    mongo_stop
    echo "*************** stop finish ****************"
  ;;
  status)
    echo "*********** mongodb-4.4.13 status **********"
    mongo_status
  ;;
  *)
    echo "************* illegal argument *************"
esac

注意:脚本需要配置免密登录

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值