Mongodb安装(Docker、Docker-Composer版)

Mongodb单机、副本集、分片集部署教程(Docker、Docker-Composer版)

一、基础准备:

           1、下载 mongodb server 安装包:https://www.mongodb.com/download-center/community

           

             2、目录结构

           

            3、编写Dockerfile文件

FROM centos:7

MAINTAINER qk

ADD mongodb-linux-x86_64-4.0.13.tgz  /local/mongo

RUN mkdir -p /data/db

RUN mkdir -p /data/configdb

WORKDIR /local/mongo

EXPOSE 27017

           4、生成mongo镜像

docker image build -tmongodb-qk .

二、单机版

          1、编写docker-compose.yml 文件

version: '3'

services:

    mongodb-single:      
        image: mongodb-qk
        volumes:
           - $PWD/db:/data/db
        ports:
            - "27017:27017"
        command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0

         2、执行 

docker-compose up -d

         3、完成

三、副本集

         1、配置   

                一主两从 

                 集群名称:rs0

         2、编写docker-compose.yml 文件

version: '3'

services:

  mongodb-relica01:
    image: mongodb-qk
    volumes:
      - $PWD/db1:/data/db
    ports:
      - "27018:27017"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --replSet rs0 --bind_ip=0.0.0.0
  
  mongodb-relica02:
    image: mongodb-qk
    volumes:
      - $PWD/db2:/data/db
    ports:
      - "27019:27017"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --replSet rs0 --bind_ip=0.0.0.0
  
  mongodb-relica03:
    image: mongodb-qk
    volumes:
      - $PWD/db3:/data/db
    ports:
      - "27020:27017"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --replSet rs0 --bind_ip=0.0.0.0

           3、启动

docker-compose up -d  

           4、初始化

                 副本集名称 rs0
                 节点 : ip:27018 、 ip:27019 、ip:27020
    
                  ①登录任意节点

./mongo -port 27018 -host ip

                  ②输入 (更换你的ip)

config = {"_id": "rs0", "members": [
                      {"_id": 0, "host": "ip:27018"}, 
                      {"_id": 1, "host": "ip:27019"}, 
                      {"_id": 2, "host": "ip:27020"}]}

                   ③ 然后初始化

rs.initiate(config)

                   ④完成(操作实例如下)

[root@localhost mongodb-replica]# ls
docker-compose.yml  mongodb-replica搭建.txt
[root@localhost mongodb-replica]# docker-compose up -d
WARNING: The Docker Engine you're using is running in swarm mode.

Compose does not use swarm mode to deploy services to multiple nodes in a swarm. All containers will be scheduled on the current node.

To deploy your application across the swarm, use `docker stack deploy`.

Creating mongodb-replica_mongodb-relica03_1 ... done
Creating mongodb-replica_mongodb-relica01_1 ... done
Creating mongodb-replica_mongodb-relica02_1 ... done
[root@localhost mongodb-replica]# docker ps
CONTAINER ID        IMAGE                          COMMAND                  CREATED             STATUS              PORTS                      NAMES
c5ceaf58936b        mongodb-qk                     "./mongodb-linux-x86…"   26 seconds ago      Up 13 seconds       0.0.0.0:27019->27017/tcp   mongodb-replica_mongodb-relica02_1
71cdc4506f7d        mongodb-qk                     "./mongodb-linux-x86…"   26 seconds ago      Up 13 seconds       0.0.0.0:27018->27017/tcp   mongodb-replica_mongodb-relica01_1
46ed5cc3e3e1        mongodb-qk                     "./mongodb-linux-x86…"   26 seconds ago      Up 13 seconds       0.0.0.0:27020->27017/tcp   mongodb-replica_mongodb-relica03_1
[root@localhost mongodb-replica]# 
[root@localhost bin]# ./mongo --port 27018 --host 10.240.169.148
MongoDB shell version v4.0.13
connecting to: mongodb://10.240.169.148:27018/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("b7e6be22-53f6-46c9-a809-f7c0eceb1951") }
MongoDB server version: 4.0.13
Server has startup warnings: 
2019-10-29T07:45:29.401+0000 I CONTROL  [initandlisten] 
2019-10-29T07:45:29.401+0000 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2019-10-29T07:45:29.401+0000 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2019-10-29T07:45:29.401+0000 I CONTROL  [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2019-10-29T07:45:29.401+0000 I CONTROL  [initandlisten] 
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] 
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] 
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:45:29.403+0000 I CONTROL  [initandlisten] 
> config = {"_id": "rs0", "members": [
... {"_id": 0, "host": "10.240.169.148:27018"}, 
... {"_id": 1, "host": "10.240.169.148:27019"}, 
... {"_id": 2, "host": "10.240.169.148:27020"}]}
{
	"_id" : "rs0",
	"members" : [
		{
			"_id" : 0,
			"host" : "10.240.169.148:27018"
		},
		{
			"_id" : 1,
			"host" : "10.240.169.148:27019"
		},
		{
			"_id" : 2,
			"host" : "10.240.169.148:27020"
		}
	]
}
> rs.initiate(config)
{
	"ok" : 1,
	"operationTime" : Timestamp(1572335412, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572335412, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
rs0:PRIMARY> rs.status()
{
	"set" : "rs0",
	"date" : ISODate("2019-10-29T07:50:40.769Z"),
	"myState" : 1,
	"term" : NumberLong(1),
	"syncingTo" : "",
	"syncSourceHost" : "",
	"syncSourceId" : -1,
	"heartbeatIntervalMillis" : NumberLong(2000),
	"optimes" : {
		"lastCommittedOpTime" : {
			"ts" : Timestamp(1572335429, 4),
			"t" : NumberLong(1)
		},
		"readConcernMajorityOpTime" : {
			"ts" : Timestamp(1572335429, 4),
			"t" : NumberLong(1)
		},
		"appliedOpTime" : {
			"ts" : Timestamp(1572335429, 4),
			"t" : NumberLong(1)
		},
		"durableOpTime" : {
			"ts" : Timestamp(1572335429, 4),
			"t" : NumberLong(1)
		}
	},
	"lastStableCheckpointTimestamp" : Timestamp(1572335424, 1),
	"members" : [
		{
			"_id" : 0,
			"name" : "10.240.169.148:27018",
			"health" : 1,
			"state" : 1,
			"stateStr" : "PRIMARY",
			"uptime" : 314,
			"optime" : {
				"ts" : Timestamp(1572335429, 4),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T07:50:29Z"),
			"syncingTo" : "",
			"syncSourceHost" : "",
			"syncSourceId" : -1,
			"infoMessage" : "could not find member to sync from",
			"electionTime" : Timestamp(1572335423, 1),
			"electionDate" : ISODate("2019-10-29T07:50:23Z"),
			"configVersion" : 1,
			"self" : true,
			"lastHeartbeatMessage" : ""
		},
		{
			"_id" : 1,
			"name" : "10.240.169.148:27019",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 28,
			"optime" : {
				"ts" : Timestamp(1572335429, 4),
				"t" : NumberLong(1)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1572335429, 4),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T07:50:29Z"),
			"optimeDurableDate" : ISODate("2019-10-29T07:50:29Z"),
			"lastHeartbeat" : ISODate("2019-10-29T07:50:39.212Z"),
			"lastHeartbeatRecv" : ISODate("2019-10-29T07:50:39.933Z"),
			"pingMs" : NumberLong(1),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "10.240.169.148:27018",
			"syncSourceHost" : "10.240.169.148:27018",
			"syncSourceId" : 0,
			"infoMessage" : "",
			"configVersion" : 1
		},
		{
			"_id" : 2,
			"name" : "10.240.169.148:27020",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 28,
			"optime" : {
				"ts" : Timestamp(1572335429, 4),
				"t" : NumberLong(1)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1572335429, 4),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T07:50:29Z"),
			"optimeDurableDate" : ISODate("2019-10-29T07:50:29Z"),
			"lastHeartbeat" : ISODate("2019-10-29T07:50:39.209Z"),
			"lastHeartbeatRecv" : ISODate("2019-10-29T07:50:39.972Z"),
			"pingMs" : NumberLong(1),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "10.240.169.148:27018",
			"syncSourceHost" : "10.240.169.148:27018",
			"syncSourceId" : 0,
			"infoMessage" : "",
			"configVersion" : 1
		}
	],
	"ok" : 1,
	"operationTime" : Timestamp(1572335429, 4),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572335429, 4),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
rs0:PRIMARY> 

四、分片集

         1、分片配置:

                  config:一主一从   (副本集名称csrs  {ip:27030,ip:27031})

                  shard:两主两从    

                                                 (副本集名称 shrs01  {ip:27040,ip:27041})

                                                 (副本集名称 shrs02  {ip:27042,ip:27043})

                 router: 单节点      ({ip:27050})

         2、编写docker-compose.yml 文件

version: '3'

services:

#config
  mongodb-config01:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/config01:/data/configdb
    ports:
      - "27030:27019"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --replSet csrs --configsvr
  
  mongodb-config02:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/config02:/data/configdb
    ports:
      - "27031:27019"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --replSet csrs --configsvr  

#shard
  mongodb-shard01-01:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/shard01-01:/data/db
    ports:
      - "27040:27018"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --shardsvr --replSet shrs01
  
  mongodb-shard01-02:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/shard01-02:/data/db
    ports:
      - "27041:27018"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --shardsvr --replSet shrs01
    
  mongodb-shard02-01:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/shard02-01:/data/db
    ports:
      - "27042:27018"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --shardsvr --replSet shrs02
   
  mongodb-shard02-02:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/shard02-02:/data/db
    ports:
      - "27043:27018"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongod --bind_ip=0.0.0.0 --shardsvr --replSet shrs02 

#router
  mongodb-router:
    image: mongodb-qk
    volumes:
      - $PWD/db/mongodb-sharding/router:/data/db
    ports:
      - "27050:27017"
    command: ./mongodb-linux-x86_64-4.0.13/bin/mongos --configdb csrs/10.240.169.148:27030,10.240.169.148:27031 --bind_ip=0.0.0.0

         3、启动 

docker-compose up -d  

         4、初始化:

                  分片集
                       cinfig   名称  csrs(27030、27031)                
                       shard   名称  shrs01(27040 、27041)     shrs02(27042、27043 )
                       router   27050

------------初始化config-----------------------------------------------------------------------------------------
               

                ①登录config任意节点 

./mongo --port 27030 --host 10.240.169.148

                 ②初始化 

 rs.initiate({ _id: "csrs",version: 1,members: [{ _id: 0, host : "10.240.169.148:27030" }]});

                 ③ 添加从节点    

rs.add("10.240.169.148:27031");

                 ④完成

------------初始化shard01----------------------------------------------------------------------------------------
             

                 ①登录shrs01 任意节点

  ./mongo --port 27040 --host 10.240.169.148

                ②初始化

rs.initiate({ _id: "shrs01",version: 1,members: [{ _id: 0, host : "10.240.169.148:27040" }]});

                ③添加从节点     

 rs.add("10.240.169.148:27041");

                ④完成
------------初始化shard02----------------------------------------------------------------------------------------
           

              偷个懒,同上

------------初始化router-------------------------------------------------------------------------------------------
           

             ① 登录router 节点   连接mongos

./mongo --port 27050 --host 10.240.169.148

             ② 增加分片

sh.addShard("shrs01/10.240.169.148:27040,10.240.169.148:27041");
sh.addShard("shrs02/10.240.169.148:27042,10.240.169.148:27043");

             ③ 对数据库 集合进行分片
                  例如:数据库dalomao中的集合orders进行分片  片键useCode
                            use admin;

                             #对shard这个库启用分片
                            sh.enableSharding("shard")       

                             # 对集合user分片,片键userID(hash散列策略)
                            sh.shardCollection("shard.user",{"userID":"hashed"}); 

                            注:分片策略

                                   1:  for ranged based sharding

                                   hashed: to specify a hashed shard key.

             ④完成(操作实例如下)

[root@localhost mongodb-sharding]# ls
docker-compose.yml  mongodb-shard搭建.txt
[root@localhost mongodb-sharding]# docker-compose up -d
WARNING: The Docker Engine you're using is running in swarm mode.

Compose does not use swarm mode to deploy services to multiple nodes in a swarm. All containers will be scheduled on the current node.

To deploy your application across the swarm, use `docker stack deploy`.

Creating mongodb-sharding_mongodb-shard02-01_1 ... done
Creating mongodb-sharding_mongodb-shard02-02_1 ... done
Creating mongodb-sharding_mongodb-config02_1   ... done
Creating mongodb-sharding_mongodb-router_1     ... done
Creating mongodb-sharding_mongodb-shard01-02_1 ... done
Creating mongodb-sharding_mongodb-config01_1   ... done
Creating mongodb-sharding_mongodb-shard01-01_1 ... done
[root@localhost mongodb-sharding]# docker ps
CONTAINER ID        IMAGE                          COMMAND                  CREATED              STATUS              PORTS                                 NAMES
af98a3ff6d33        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27040->27018/tcp   mongodb-sharding_mongodb-shard01-01_1
c4130a61b9a1        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   0.0.0.0:27050->27017/tcp              mongodb-sharding_mongodb-router_1
eed25f877ca1        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27030->27019/tcp   mongodb-sharding_mongodb-config01_1
cdca2c941123        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27041->27018/tcp   mongodb-sharding_mongodb-shard01-02_1
f32a591959d7        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27043->27018/tcp   mongodb-sharding_mongodb-shard02-02_1
d365d86ae6fe        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27031->27019/tcp   mongodb-sharding_mongodb-config02_1
e58aa8b54263        mongodb-qk                     "./mongodb-linux-x86…"   About a minute ago   Up About a minute   27017/tcp, 0.0.0.0:27042->27018/tcp   mongodb-sharding_mongodb-shard02-01_1
[root@localhost mongodb-sharding]# 

初始化config:

[root@localhost bin]# ./mongo --port 27030 --host 10.240.169.148
MongoDB shell version v4.0.13
connecting to: mongodb://10.240.169.148:27030/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("895fa070-48eb-443c-92f9-bdb3c6f8cbd3") }
MongoDB server version: 4.0.13
Server has startup warnings: 
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2019-10-29T07:57:31.342+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:57:31.409+0000 I CONTROL  [initandlisten] 
> rs.initiate({ _id: "csrs",       version: 1,       members: [{ _id: 0, host : "10.240.169.148:27030" }]});
{
	"ok" : 1,
	"operationTime" : Timestamp(1572336136, 1),
	"$gleStats" : {
		"lastOpTime" : Timestamp(1572336136, 1),
		"electionId" : ObjectId("000000000000000000000000")
	},
	"lastCommittedOpTime" : Timestamp(0, 0),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336136, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
csrs:SECONDARY> rs.add("10.240.169.148:27031");
{
	"ok" : 1,
	"operationTime" : Timestamp(1572336147, 3),
	"$gleStats" : {
		"lastOpTime" : {
			"ts" : Timestamp(1572336147, 3),
			"t" : NumberLong(1)
		},
		"electionId" : ObjectId("7fffffff0000000000000001")
	},
	"lastCommittedOpTime" : Timestamp(1572336147, 2),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336147, 3),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
csrs:PRIMARY> rs.status()
{
	"set" : "csrs",
	"date" : ISODate("2019-10-29T08:02:36.781Z"),
	"myState" : 1,
	"term" : NumberLong(1),
	"syncingTo" : "",
	"syncSourceHost" : "",
	"syncSourceId" : -1,
	"configsvr" : true,
	"heartbeatIntervalMillis" : NumberLong(2000),
	"optimes" : {
		"lastCommittedOpTime" : {
			"ts" : Timestamp(1572336154, 1),
			"t" : NumberLong(1)
		},
		"readConcernMajorityOpTime" : {
			"ts" : Timestamp(1572336154, 1),
			"t" : NumberLong(1)
		},
		"appliedOpTime" : {
			"ts" : Timestamp(1572336154, 1),
			"t" : NumberLong(1)
		},
		"durableOpTime" : {
			"ts" : Timestamp(1572336154, 1),
			"t" : NumberLong(1)
		}
	},
	"lastStableCheckpointTimestamp" : Timestamp(1572336138, 27),
	"members" : [
		{
			"_id" : 0,
			"name" : "10.240.169.148:27030",
			"health" : 1,
			"state" : 1,
			"stateStr" : "PRIMARY",
			"uptime" : 312,
			"optime" : {
				"ts" : Timestamp(1572336154, 1),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T08:02:34Z"),
			"syncingTo" : "",
			"syncSourceHost" : "",
			"syncSourceId" : -1,
			"infoMessage" : "could not find member to sync from",
			"electionTime" : Timestamp(1572336136, 2),
			"electionDate" : ISODate("2019-10-29T08:02:16Z"),
			"configVersion" : 2,
			"self" : true,
			"lastHeartbeatMessage" : ""
		},
		{
			"_id" : 1,
			"name" : "10.240.169.148:27031",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 8,
			"optime" : {
				"ts" : Timestamp(1572336154, 1),
				"t" : NumberLong(1)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1572336154, 1),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T08:02:34Z"),
			"optimeDurableDate" : ISODate("2019-10-29T08:02:34Z"),
			"lastHeartbeat" : ISODate("2019-10-29T08:02:35.955Z"),
			"lastHeartbeatRecv" : ISODate("2019-10-29T08:02:35.717Z"),
			"pingMs" : NumberLong(1),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "10.240.169.148:27030",
			"syncSourceHost" : "10.240.169.148:27030",
			"syncSourceId" : 0,
			"infoMessage" : "",
			"configVersion" : 2
		}
	],
	"ok" : 1,
	"operationTime" : Timestamp(1572336154, 1),
	"$gleStats" : {
		"lastOpTime" : {
			"ts" : Timestamp(1572336147, 3),
			"t" : NumberLong(1)
		},
		"electionId" : ObjectId("7fffffff0000000000000001")
	},
	"lastCommittedOpTime" : Timestamp(1572336154, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336154, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
csrs:PRIMARY> 

初始化shard:

[root@localhost bin]# ./mongo --port 27040 --host 10.240.169.148
MongoDB shell version v4.0.13
connecting to: mongodb://10.240.169.148:27040/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("30371969-3b12-4b0a-8316-a1dd2690389c") }
MongoDB server version: 4.0.13
Server has startup warnings: 
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2019-10-29T07:57:31.341+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] 
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
2019-10-29T07:57:31.410+0000 I CONTROL  [initandlisten] 
> rs.initiate({ _id: "shrs01",       version: 1,       members: [{ _id: 0, host : "10.240.169.148:27040" }]});
{
	"ok" : 1,
	"operationTime" : Timestamp(1572336393, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336393, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
shrs01:SECONDARY> 
shrs01:SECONDARY>  rs.add("10.240.169.148:27041");
{
	"ok" : 1,
	"operationTime" : Timestamp(1572336411, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336411, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
shrs01:PRIMARY> rs.status()
{
	"set" : "shrs01",
	"date" : ISODate("2019-10-29T08:07:00.305Z"),
	"myState" : 1,
	"term" : NumberLong(1),
	"syncingTo" : "",
	"syncSourceHost" : "",
	"syncSourceId" : -1,
	"heartbeatIntervalMillis" : NumberLong(2000),
	"optimes" : {
		"lastCommittedOpTime" : {
			"ts" : Timestamp(1572336411, 1),
			"t" : NumberLong(1)
		},
		"readConcernMajorityOpTime" : {
			"ts" : Timestamp(1572336411, 1),
			"t" : NumberLong(1)
		},
		"appliedOpTime" : {
			"ts" : Timestamp(1572336411, 1),
			"t" : NumberLong(1)
		},
		"durableOpTime" : {
			"ts" : Timestamp(1572336411, 1),
			"t" : NumberLong(1)
		}
	},
	"lastStableCheckpointTimestamp" : Timestamp(1572336395, 1),
	"members" : [
		{
			"_id" : 0,
			"name" : "10.240.169.148:27040",
			"health" : 1,
			"state" : 1,
			"stateStr" : "PRIMARY",
			"uptime" : 575,
			"optime" : {
				"ts" : Timestamp(1572336411, 1),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T08:06:51Z"),
			"syncingTo" : "",
			"syncSourceHost" : "",
			"syncSourceId" : -1,
			"infoMessage" : "could not find member to sync from",
			"electionTime" : Timestamp(1572336393, 2),
			"electionDate" : ISODate("2019-10-29T08:06:33Z"),
			"configVersion" : 2,
			"self" : true,
			"lastHeartbeatMessage" : ""
		},
		{
			"_id" : 1,
			"name" : "10.240.169.148:27041",
			"health" : 1,
			"state" : 2,
			"stateStr" : "SECONDARY",
			"uptime" : 8,
			"optime" : {
				"ts" : Timestamp(1572336411, 1),
				"t" : NumberLong(1)
			},
			"optimeDurable" : {
				"ts" : Timestamp(1572336411, 1),
				"t" : NumberLong(1)
			},
			"optimeDate" : ISODate("2019-10-29T08:06:51Z"),
			"optimeDurableDate" : ISODate("2019-10-29T08:06:51Z"),
			"lastHeartbeat" : ISODate("2019-10-29T08:06:59.638Z"),
			"lastHeartbeatRecv" : ISODate("2019-10-29T08:07:00.052Z"),
			"pingMs" : NumberLong(1),
			"lastHeartbeatMessage" : "",
			"syncingTo" : "",
			"syncSourceHost" : "",
			"syncSourceId" : -1,
			"infoMessage" : "",
			"configVersion" : 2
		}
	],
	"ok" : 1,
	"operationTime" : Timestamp(1572336411, 1),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336411, 1),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
shrs01:PRIMARY> 

初始化mongos:

[root@localhost bin]# ./mongo --port 27050 --host 10.240.169.148
MongoDB shell version v4.0.13
connecting to: mongodb://10.240.169.148:27050/?gssapiServiceName=mongodb
Implicit session: session { "id" : UUID("1aa3dd55-c4df-447e-b62e-e922d2cde4ed") }
MongoDB server version: 4.0.13
Server has startup warnings: 
2019-10-29T07:57:23.712+0000 I CONTROL  [main] 
2019-10-29T07:57:23.712+0000 I CONTROL  [main] ** WARNING: Access control is not enabled for the database.
2019-10-29T07:57:23.712+0000 I CONTROL  [main] **          Read and write access to data and configuration is unrestricted.
2019-10-29T07:57:23.712+0000 I CONTROL  [main] ** WARNING: You are running this process as the root user, which is not recommended.
2019-10-29T07:57:23.712+0000 I CONTROL  [main] 
mongos> sh.addShard("shrs01/10.240.169.148:27040,10.240.169.148:27041");
{
	"shardAdded" : "shrs01",
	"ok" : 1,
	"operationTime" : Timestamp(1572336542, 6),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336542, 6),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
mongos> sh.addShard("shrs02/10.240.169.148:27042,10.240.169.148:27043");
{
	"shardAdded" : "shrs02",
	"ok" : 1,
	"operationTime" : Timestamp(1572336684, 5),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336684, 5),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
} 
mongos> use admin;
switched to db admin
mongos> sh.enableSharding("shard")
{
	"ok" : 1,
	"operationTime" : Timestamp(1572336610, 5),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336610, 5),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
mongos>  sh.shardCollection("shard.user",{"userID":1,"_id":1})
{
	"collectionsharded" : "shard.user",
	"collectionUUID" : UUID("bd6b0349-089c-40a8-8fa9-3136536add6f"),
	"ok" : 1,
	"operationTime" : Timestamp(1572336624, 13),
	"$clusterTime" : {
		"clusterTime" : Timestamp(1572336624, 13),
		"signature" : {
			"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
			"keyId" : NumberLong(0)
		}
	}
}
mongos> sh.status()
--- Sharding Status --- 
  sharding version: {
  	"_id" : 1,
  	"minCompatibleVersion" : 5,
  	"currentVersion" : 6,
  	"clusterId" : ObjectId("5db7f20addf5dc2b692b8a1b")
  }
  shards:
        {  "_id" : "shrs01",  "host" : "shrs01/10.240.169.148:27040,10.240.169.148:27041",  "state" : 1 }
        {  "_id" : "shrs02",  "host" : "shrs02/10.240.169.148:27042,10.240.169.148:27043",  "state" : 1 }
  active mongoses:
        "4.0.13" : 1
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours: 
                No recent migrations
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
        {  "_id" : "shard",  "primary" : "shrs01",  "partitioned" : true,  "version" : {  "uuid" : UUID("518f1557-5021-46f0-8bf0-04d2a150ef47"),  "lastMod" : 1 } }
                shard.user
                        shard key: { "userID" : 1, "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                shrs01	1
                        { "userID" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } } -->> { "userID" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } } on : shrs01 Timestamp(1, 0) 

mongos> 

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值