环境描述:
1.物理服务器3台
2.shards为2片,每片shard为一个Replica Set,冗余度为3(主、从、延迟从)
3.mongos节点数量为3个
4.config server节点数量为3个
5.arbiter server节点数量为6个
部署步骤:
1.配置hosts文件(三台主机都需操作)
[root@hadoop3 ~]# more /etc/hosts
10.1.245.72 hadoop1
10.1.245.73 hadoop2
10.1.245.74 hadoop3
2.配置并启动mongod和mongos服务
2.1 主机hadoop1上部署
2.1.1 配置参数文件
[root@hadoop1 ~]# more deploy_mongoDB_1.sh
#数据节点
mkdir -p /work/mongodb/shard11
cat > /work/mongodb/shard11.conf <<EOF
shardsvr=true
replSet=shard1
port=28017
dbpath=/work/mongodb/shard11
oplogSize=2048
logpath=/work/mongodb/shard11.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/shard21
cat > /work/mongodb/shard21.conf <<EOF
shardsvr=true
replSet=shard2
port=28018
dbpath=/work/mongodb/shard21
oplogSize=2048
logpath=/work/mongodb/shard21.log
logappend=true
fork=true
nojournal=true
EOF
#配置节点
mkdir -p /work/mongodb/config
cat > /work/mongodb/config1.conf <<EOF
configsvr=true
dbpath=/work/mongodb/config
port=20000
logpath=/work/mongodb/config1.log
logappend=true
fork=true
nojournal=true
EOF
#仲裁节点
mkdir -p /work/mongodb/arbiter1
cat > /work/mongodb/arbiter1.conf <<EOF
shardsvr=true
replSet=shard1
port=28031
dbpath=/work/mongodb/arbiter1
oplogSize=100
logpath=/work/mongodb/arbiter1.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/arbiter2
cat > /work/mongodb/arbiter2.conf <<EOF
shardsvr=true
replSet=shard2
port=28032
dbpath=/work/mongodb/arbiter2
oplogSize=100
logpath=/work/mongodb/arbiter2.log
logappend=true
fork=true
nojournal=true
EOF
#路由节点
mkdir -p /work/mongodb/mongos1
cat > /work/mongodb/mongos1.conf <<EOF
configdb=hadoop1:20000,hadoop2:20000,hadoop3:20000
port=28885
chunkSize=100
logpath=/work/mongodb/mongos1.log
logappend=true
fork=true
EOF
[root@hadoop1 ~]# sh deploy_mongoDB_1.sh
2.1.2 启动服务
[root@hadoop1 ~]# mongod --config /work/mongodb/shard11.conf
[root@hadoop1 ~]# mongod --config /work/mongodb/shard21.conf
[root@hadoop1 ~]# mongod --config /work/mongodb/arbiter1.conf
[root@hadoop1 ~]# mongod --config /work/mongodb/arbiter2.conf
[root@hadoop1 ~]# mongod --config /work/mongodb/config1.conf
[root@hadoop1 ~]# mongos --config /work/mongodb/mongos1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 14953
ERROR: child process failed, exited with error number 1
注:这里启动mongos进程失败,查看mongos1.log得知失败原因是由于配置的config server没有全部启动的原因,这里暂时不启动mongos服务。
[root@hadoop1 ~]# ps -ef|grep mongo|grep -v grep
root 14829 1 0 17:21 ? 00:00:00 mongod --config /work/mongodb/shard11.conf
root 14845 1 0 17:22 ? 00:00:00 mongod --config /work/mongodb/shard21.conf
root 14861 1 0 17:22 ? 00:00:00 mongod --config /work/mongodb/arbiter1.conf
root 14877 1 0 17:22 ? 00:00:00 mongod --config /work/mongodb/arbiter2.conf
root 14893 1 0 17:22 ? 00:00:00 mongod --config /work/mongodb/config1.conf
2.2 主机hadoop2上部署
2.2.1 主机hadoop2上部署
[root@hadoop2 ~]# more deploy_mongoDB_2.sh
#数据节点
mkdir -p /work/mongodb/shard12
cat > /work/mongodb/shard12.conf <<EOF
shardsvr=true
replSet=shard1
port=28017
dbpath=/work/mongodb/shard12
oplogSize=2048
logpath=/work/mongodb/shard12.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/shard22
cat > /work/mongodb/shard22.conf <<EOF
shardsvr=true
replSet=shard2
port=28018
dbpath=/work/mongodb/shard22
oplogSize=2048
logpath=/work/mongodb/shard22.log
logappend=true
fork=true
nojournal=true
EOF
#配置节点
mkdir -p /work/mongodb/config
cat > /work/mongodb/config2.conf <<EOF
configsvr=true
dbpath=/work/mongodb/config
port=20000
logpath=/work/mongodb/config2.log
logappend=true
fork=true
nojournal=true
EOF
#仲裁节点
mkdir -p /work/mongodb/arbiter1
cat > /work/mongodb/arbiter1.conf <<EOF
shardsvr=true
replSet=shard1
port=28031
dbpath=/work/mongodb/arbiter1
oplogSize=100
logpath=/work/mongodb/arbiter1.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/arbiter2
cat > /work/mongodb/arbiter2.conf <<EOF
shardsvr=true
replSet=shard2
port=28032
dbpath=/work/mongodb/arbiter2
oplogSize=100
logpath=/work/mongodb/arbiter2.log
logappend=true
fork=true
nojournal=true
EOF
#路由节点
mkdir -p /work/mongodb/mongos2
cat > /work/mongodb/mongos2.conf <<EOF
configdb=hadoop1:20000,hadoop2:20000,hadoop3:20000
port=28885
chunkSize=100
logpath=/work/mongodb/mongos2.log
logappend=true
fork=true
EOF
[root@hadoop2 ~]# sh deploy_mongoDB_2.sh
2.2.2 启动服务
[root@hadoop2 ~]# mongod --config /work/mongodb/shard12.conf
[root@hadoop2 ~]# mongod --config /work/mongodb/shard22.conf
[root@hadoop2 ~]# mongod --config /work/mongodb/arbiter1.conf
[root@hadoop2 ~]# mongod --config /work/mongodb/arbiter2.conf
[root@hadoop2 ~]# mongod --config /work/mongodb/config2.conf
[root@hadoop2 ~]# mongos --config /work/mongodb/mongos2.conf
about to fork child process, waiting until server is ready for connections.
forked process: 29632
ERROR: child process failed, exited with error number 1
注:这里启动mongos进程失败,查看mongos2.log得知失败原因是由于配置的config server没有全部启动的原因,这里暂时不启动mongos服务。
[root@hadoop2 ~]# ps -ef|grep mongo|grep -v grep
root 29556 1 0 17:35 ? 00:00:00 mongod --config /work/mongodb/shard12.conf
root 29572 1 0 17:35 ? 00:00:00 mongod --config /work/mongodb/shard22.conf
root 29588 1 0 17:35 ? 00:00:00 mongod --config /work/mongodb/arbiter1.conf
root 29604 1 0 17:35 ? 00:00:00 mongod --config /work/mongodb/arbiter2.conf
root 29620 1 0 17:35 ? 00:00:00 mongod --config /work/mongodb/config2.conf
2.3 主机hadoop3上部署
2.3.1 主机hadoop3上部署
[root@hadoop3 ~]# more deploy_mongoDB_3.sh
#数据节点
mkdir -p /work/mongodb/shard13
cat > /work/mongodb/shard13.conf <<EOF
shardsvr=true
replSet=shard1
port=28017
dbpath=/work/mongodb/shard13
oplogSize=2048
logpath=/work/mongodb/shard13.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/shard23
cat > /work/mongodb/shard23.conf <<EOF
shardsvr=true
replSet=shard2
port=28018
dbpath=/work/mongodb/shard23
oplogSize=2048
logpath=/work/mongodb/shard23.log
logappend=true
fork=true
nojournal=true
EOF
#配置节点
mkdir -p /work/mongodb/config
cat > /work/mongodb/config3.conf <<EOF
configsvr=true
dbpath=/work/mongodb/config
port=20000
logpath=/work/mongodb/config3.log
logappend=true
fork=true
nojournal=true
EOF
#仲裁节点
mkdir -p /work/mongodb/arbiter1
cat > /work/mongodb/arbiter1.conf <<EOF
shardsvr=true
replSet=shard1
port=28031
dbpath=/work/mongodb/arbiter1
oplogSize=100
logpath=/work/mongodb/arbiter1.log
logappend=true
fork=true
nojournal=true
EOF
mkdir -p /work/mongodb/arbiter2
cat > /work/mongodb/arbiter2.conf <<EOF
shardsvr=true
replSet=shard2
port=28032
dbpath=/work/mongodb/arbiter2
oplogSize=100
logpath=/work/mongodb/arbiter2.log
logappend=true
fork=true
nojournal=true
EOF
#路由节点
mkdir -p /work/mongodb/mongos3
cat > /work/mongodb/mongos3.conf <<EOF
configdb=hadoop1:20000,hadoop2:20000,hadoop3:20000
port=28885
chunkSize=100
logpath=/work/mongodb/mongos3.log
logappend=true
fork=true
EOF
[root@hadoop3 ~]# sh deploy_mongoDB_3.sh
2.3.2 启动服务
[root@hadoop3 ~]# mongod --config /work/mongodb/shard13.conf
[root@hadoop3 ~]# mongod --config /work/mongodb/shard23.conf
[root@hadoop3 ~]# mongod --config /work/mongodb/arbiter1.conf
[root@hadoop3 ~]# mongod --config /work/mongodb/arbiter2.conf
[root@hadoop3 ~]# mongod --config /work/mongodb/config3.conf
[root@hadoop3 ~]# mongos --config /work/mongodb/mongos3.conf
[root@hadoop3 ~]# ps -ef|grep mongo|grep -v grep
root 3089 1 0 17:41 ? 00:00:00 mongod --config /work/mongodb/shard13.conf
root 3105 1 0 17:41 ? 00:00:00 mongod --config /work/mongodb/shard23.conf
root 3121 1 0 17:41 ? 00:00:00 mongod --config /work/mongodb/arbiter1.conf
root 3137 1 0 17:41 ? 00:00:00 mongod --config /work/mongodb/arbiter2.conf
root 3153 1 0 17:41 ? 00:00:00 mongod --config /work/mongodb/config3.conf
root 3165 1 0 17:41 ? 00:00:00 mongos --config /work/mongodb/mongos3.conf
注:这个时候config server已经都启动了,所以在主机hadoop3上的mongos服务启动正常,下面把主机hadoop1~2上的mongos服务都启动
[root@hadoop1 ~]# mongos --config /work/mongodb/mongos1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 15078
child process started successfully, parent exiting
[root@hadoop2 mongodb]# mongos --config /work/mongodb/mongos2.conf
about to fork child process, waiting until server is ready for connections.
forked process: 29778
child process started successfully, parent exiting
3.配置分片
3.1 在主机hadoop1上配置
[root@hadoop1 ~]# mongo hadoop1:28017/admin
>config = {_id:'shard1', members: [{_id: 0, host: 'hadoop1:28017'},
{_id: 1, host: 'hadoop2:28017'},
{_id :2, host: 'hadoop3:28017',slaveDelay : 7200 ,priority:0},
{_id: 3, host: 'hadoop1:28031', arbiterOnly : true},
{_id: 4, host: 'hadoop2:28031', arbiterOnly : true},
{_id: 5, host: 'hadoop3:28031', arbiterOnly : true}]};
> rs.initiate(config);
{ "ok" : 1 }
shard1:OTHER> rs.status();
{
"set" : "shard1",
"date" : ISODate("2015-12-30T09:55:43.409Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "hadoop1:28017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 2025,
"optime" : Timestamp(1451469320, 1),
"optimeDate" : ISODate("2015-12-30T09:55:20Z"),
"electionTime" : Timestamp(1451469324, 1),
"electionDate" : ISODate("2015-12-30T09:55:24Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "hadoop2:28017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 22,
"optime" : Timestamp(1451469320, 1),
"optimeDate" : ISODate("2015-12-30T09:55:20Z"),
"lastHeartbeat" : ISODate("2015-12-30T09:55:42.529Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T09:55:42.534Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 2,
"name" : "hadoop3:28017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 22,
"optime" : Timestamp(1451469320, 1),
"optimeDate" : ISODate("2015-12-30T09:55:20Z"),
"lastHeartbeat" : ISODate("2015-12-30T09:55:42.529Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T09:55:42.537Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 3,
"name" : "hadoop1:28031",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 22,
"lastHeartbeat" : ISODate("2015-12-30T09:55:42.528Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T09:55:42.533Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 4,
"name" : "hadoop2:28031",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 22,
"lastHeartbeat" : ISODate("2015-12-30T09:55:42.529Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T09:55:42.535Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 5,
"name" : "hadoop3:28031",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 22,
"lastHeartbeat" : ISODate("2015-12-30T09:55:42.533Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T09:55:42.539Z"),
"pingMs" : 0,
"configVersion" : 1
}
],
"ok" : 1
}
shard1:PRIMARY>
注:副本集状态正常,hadoop1主机上的mongod进程(端口28017)已被选举成primary节点了
3.2 在主机hadoop2上配置
[root@hadoop2 ~]# mongo hadoop2:28018/admin
> config = {_id:'shard2', members: [{_id: 0, host: 'hadoop2:28018'},
{_id: 1, host: 'hadoop1:28018'},
{_id :2, host: 'hadoop3:28018',slaveDelay : 7200 ,priority:0},
{_id: 3, host: 'hadoop1:28032', arbiterOnly : true},
{_id: 4, host: 'hadoop2:28032', arbiterOnly : true},
{_id: 5, host: 'hadoop3:28032', arbiterOnly : true}]};
> rs.initiate(config);
shard2:SECONDARY> rs.status();
{
"set" : "shard2",
"date" : ISODate("2015-12-30T10:02:22.153Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "hadoop2:28018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1621,
"optime" : Timestamp(1451469736, 1),
"optimeDate" : ISODate("2015-12-30T10:02:16Z"),
"electionTime" : Timestamp(1451469739, 1),
"electionDate" : ISODate("2015-12-30T10:02:19Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "hadoop1:28018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6,
"optime" : Timestamp(1451469736, 1),
"optimeDate" : ISODate("2015-12-30T10:02:16Z"),
"lastHeartbeat" : ISODate("2015-12-30T10:02:21.692Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T10:02:21.695Z"),
"pingMs" : 0,
"syncingTo" : "hadoop2:28018",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "hadoop3:28018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6,
"optime" : Timestamp(1451469736, 1),
"optimeDate" : ISODate("2015-12-30T10:02:16Z"),
"lastHeartbeat" : ISODate("2015-12-30T10:02:21.692Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T10:02:21.701Z"),
"pingMs" : 0,
"syncingTo" : "hadoop2:28018",
"configVersion" : 1
},
{
"_id" : 3,
"name" : "hadoop1:28032",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 6,
"lastHeartbeat" : ISODate("2015-12-30T10:02:21.692Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T10:02:21.698Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 4,
"name" : "hadoop2:28032",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 6,
"lastHeartbeat" : ISODate("2015-12-30T10:02:21.689Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T10:02:21.696Z"),
"pingMs" : 0,
"configVersion" : 1
},
{
"_id" : 5,
"name" : "hadoop3:28032",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 6,
"lastHeartbeat" : ISODate("2015-12-30T10:02:21.692Z"),
"lastHeartbeatRecv" : ISODate("2015-12-30T10:02:21.695Z"),
"pingMs" : 0,
"configVersion" : 1
}
],
"ok" : 1
}
shard2:PRIMARY>
注:副本集状态正常,hadoop2主机上的mongod进程(端口28018)已被选举成primary节点了
4.添加分片
4.1 在主机hadoop3上操作
4.1.1 添加副本集分片
[root@hadoop3 ~]# mongo hadoop3:28885/admin
mongos> db.runCommand({"addShard" : "shard1/hadoop1:28017"})
{ "shardAdded" : "shard1", "ok" : 1 }
mongos> db.runCommand({"addShard" : "shard2/hadoop2:28018"})
{ "shardAdded" : "shard2", "ok" : 1 }
注:添加shard的操作一定要切换到admin数据库,不然会报如下认错:
2015-12-31T11:20:44.228+0800 E QUERY Error: error: {
"$err" : "error creating initial database config information :: caused by :: can't find a shard to put new db on",
"code" : 10185
}
4.1.2 启用数据库sharding
mongos> db.runCommand( { "enableSharding" : "shardingdb"});
{ "ok" : 1 }
4.1.3 对collection添加shard key
mongos> db.runCommand( { shardCollection : "shardingdb.food",key : {_id: 1} } )
{ "collectionsharded" : "shardingdb.food", "ok" : 1 }
4.1.4 查看sharding状态
mongos> sh.status();
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5683a6de968054cb43c6ee0d")
}
shards:
{ "_id" : "shard1", "host" : "shard1/hadoop1:28017,hadoop2:28017" }
{ "_id" : "shard2", "host" : "shard2/hadoop1:28018,hadoop2:28018" }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "shardingdb", "partitioned" : true, "primary" : "shard1" }
shardingdb.food
shard key: { "_id" : 1 }
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
注:至此整个Sarded Cluster已经部署完毕!
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/20801486/viewspace-1969202/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/20801486/viewspace-1969202/