mongodb分片搭建
由於是簡單搭建,採用一臺虛機配置多個vip的方式。
每個shard是一個複製集,每個複製集的第一個節點是arbiter
節點概覽
route0 1.1.1.100:4000
route1 1.1.1.101:4001
conf0 1.1.1.110:4010
conf1 1.1.1.111:4011
conf2 1.1.1.112:4012
rs0-0 1.1.1.120:4020
rs0-1 1.1.1.121:4021
rs0-2 1.1.1.122:4022
rs1-0 1.1.1.130:4030
rs1-1 1.1.1.131:4031
rs1-2 1.1.1.132:4032
rs2-0 1.1.1.140:4040
rs2-1 1.1.1.141:4041
rs2-2 1.1.1.142:4042
rs3-0 1.1.1.150:4050
rs3-1 1.1.1.151:4051
rs3-2 1.1.1.152:4052
route1 1.1.1.101:4001
conf0 1.1.1.110:4010
conf1 1.1.1.111:4011
conf2 1.1.1.112:4012
rs0-0 1.1.1.120:4020
rs0-1 1.1.1.121:4021
rs0-2 1.1.1.122:4022
rs1-0 1.1.1.130:4030
rs1-1 1.1.1.131:4031
rs1-2 1.1.1.132:4032
rs2-0 1.1.1.140:4040
rs2-1 1.1.1.141:4041
rs2-2 1.1.1.142:4042
rs3-0 1.1.1.150:4050
rs3-1 1.1.1.151:4051
rs3-2 1.1.1.152:4052
vip指令碼:
[root@db10 mongo]# cat vip.sh
ifconfig | grep eth0: | awk '{print $1}' | xargs -l -i ifconfig {} down
ifconfig eth0:1 1.1.1.100 netmask 255.255.255.0 up
ifconfig eth0:2 1.1.1.101 netmask 255.255.255.0 up
ifconfig eth0:3 1.1.1.110 netmask 255.255.255.0 up
ifconfig eth0:4 1.1.1.111 netmask 255.255.255.0 up
ifconfig eth0:5 1.1.1.112 netmask 255.255.255.0 up
ifconfig eth0:6 1.1.1.120 netmask 255.255.255.0 up
ifconfig eth0:7 1.1.1.121 netmask 255.255.255.0 up
ifconfig eth0:8 1.1.1.122 netmask 255.255.255.0 up
ifconfig eth0:9 1.1.1.130 netmask 255.255.255.0 up
ifconfig eth0:10 1.1.1.131 netmask 255.255.255.0 up
ifconfig eth0:11 1.1.1.132 netmask 255.255.255.0 up
ifconfig eth0:12 1.1.1.140 netmask 255.255.255.0 up
ifconfig eth0:13 1.1.1.141 netmask 255.255.255.0 up
ifconfig eth0:14 1.1.1.142 netmask 255.255.255.0 up
ifconfig eth0:15 1.1.1.150 netmask 255.255.255.0 up
ifconfig eth0:16 1.1.1.151 netmask 255.255.255.0 up
ifconfig eth0:17 1.1.1.152 netmask 255.255.255.0 up
ifconfig | grep -E "inet addr" | grep -v -E "127|2\.2\.2"
ifconfig | grep eth0: | awk '{print $1}' | xargs -l -i ifconfig {} down
ifconfig eth0:1 1.1.1.100 netmask 255.255.255.0 up
ifconfig eth0:2 1.1.1.101 netmask 255.255.255.0 up
ifconfig eth0:3 1.1.1.110 netmask 255.255.255.0 up
ifconfig eth0:4 1.1.1.111 netmask 255.255.255.0 up
ifconfig eth0:5 1.1.1.112 netmask 255.255.255.0 up
ifconfig eth0:6 1.1.1.120 netmask 255.255.255.0 up
ifconfig eth0:7 1.1.1.121 netmask 255.255.255.0 up
ifconfig eth0:8 1.1.1.122 netmask 255.255.255.0 up
ifconfig eth0:9 1.1.1.130 netmask 255.255.255.0 up
ifconfig eth0:10 1.1.1.131 netmask 255.255.255.0 up
ifconfig eth0:11 1.1.1.132 netmask 255.255.255.0 up
ifconfig eth0:12 1.1.1.140 netmask 255.255.255.0 up
ifconfig eth0:13 1.1.1.141 netmask 255.255.255.0 up
ifconfig eth0:14 1.1.1.142 netmask 255.255.255.0 up
ifconfig eth0:15 1.1.1.150 netmask 255.255.255.0 up
ifconfig eth0:16 1.1.1.151 netmask 255.255.255.0 up
ifconfig eth0:17 1.1.1.152 netmask 255.255.255.0 up
ifconfig | grep -E "inet addr" | grep -v -E "127|2\.2\.2"
執行指令碼
[root@db10 mongo]# ./vip.sh
inet addr:1.1.1.10 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.100 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.101 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.110 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.111 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.112 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.120 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.121 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.122 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.130 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.131 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.132 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.140 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.141 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.142 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.150 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.151 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.152 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.10 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.100 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.101 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.110 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.111 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.112 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.120 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.121 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.122 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.130 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.131 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.132 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.140 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.141 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.142 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.150 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.151 Bcast:1.1.1.255 Mask:255.255.255.0
inet addr:1.1.1.152 Bcast:1.1.1.255 Mask:255.255.255.0
檢查連通性
[root@db10 mongo]# cat /etc/hosts | grep "1.1.1" | awk {'print $2'} | xargs -l ping -c 1 | grep from
64 bytes from db10 (1.1.1.10): icmp_seq=1 ttl=64 time=0.090 ms
64 bytes from route0 (1.1.1.100): icmp_seq=1 ttl=64 time=0.076 ms
64 bytes from route1 (1.1.1.101): icmp_seq=1 ttl=64 time=0.040 ms
64 bytes from conf0 (1.1.1.110): icmp_seq=1 ttl=64 time=0.020 ms
64 bytes from conf1 (1.1.1.111): icmp_seq=1 ttl=64 time=0.015 ms
64 bytes from conf2 (1.1.1.112): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs0-0 (1.1.1.120): icmp_seq=1 ttl=64 time=0.018 ms
64 bytes from rs0-1 (1.1.1.121): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs0-2 (1.1.1.122): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs1-0 (1.1.1.130): icmp_seq=1 ttl=64 time=0.034 ms
64 bytes from rs1-1 (1.1.1.131): icmp_seq=1 ttl=64 time=0.017 ms
64 bytes from rs1-2 (1.1.1.132): icmp_seq=1 ttl=64 time=0.018 ms
64 bytes from rs2-0 (1.1.1.140): icmp_seq=1 ttl=64 time=0.015 ms
64 bytes from rs2-1 (1.1.1.141): icmp_seq=1 ttl=64 time=0.012 ms
64 bytes from rs1-2 (1.1.1.132): icmp_seq=1 ttl=64 time=0.000 ms
64 bytes from rs3-0 (1.1.1.150): icmp_seq=1 ttl=64 time=0.017 ms
64 bytes from rs3-1 (1.1.1.151): icmp_seq=1 ttl=64 time=0.013 ms
64 bytes from rs3-2 (1.1.1.152): icmp_seq=1 ttl=64 time=0.020 ms
64 bytes from db10 (1.1.1.10): icmp_seq=1 ttl=64 time=0.090 ms
64 bytes from route0 (1.1.1.100): icmp_seq=1 ttl=64 time=0.076 ms
64 bytes from route1 (1.1.1.101): icmp_seq=1 ttl=64 time=0.040 ms
64 bytes from conf0 (1.1.1.110): icmp_seq=1 ttl=64 time=0.020 ms
64 bytes from conf1 (1.1.1.111): icmp_seq=1 ttl=64 time=0.015 ms
64 bytes from conf2 (1.1.1.112): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs0-0 (1.1.1.120): icmp_seq=1 ttl=64 time=0.018 ms
64 bytes from rs0-1 (1.1.1.121): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs0-2 (1.1.1.122): icmp_seq=1 ttl=64 time=0.014 ms
64 bytes from rs1-0 (1.1.1.130): icmp_seq=1 ttl=64 time=0.034 ms
64 bytes from rs1-1 (1.1.1.131): icmp_seq=1 ttl=64 time=0.017 ms
64 bytes from rs1-2 (1.1.1.132): icmp_seq=1 ttl=64 time=0.018 ms
64 bytes from rs2-0 (1.1.1.140): icmp_seq=1 ttl=64 time=0.015 ms
64 bytes from rs2-1 (1.1.1.141): icmp_seq=1 ttl=64 time=0.012 ms
64 bytes from rs1-2 (1.1.1.132): icmp_seq=1 ttl=64 time=0.000 ms
64 bytes from rs3-0 (1.1.1.150): icmp_seq=1 ttl=64 time=0.017 ms
64 bytes from rs3-1 (1.1.1.151): icmp_seq=1 ttl=64 time=0.013 ms
64 bytes from rs3-2 (1.1.1.152): icmp_seq=1 ttl=64 time=0.020 ms
建立資料檔案資料夾
[root@db10 mongo]# mkdir conf0 conf1 conf2 rs0_0 rs0_1 rs0_2 rs1_0 rs1_1 rs1_2 rs2_0 rs2_1 rs2_2 rs3_0 rs3_1 rs3_2
config節點引數檔案
[root@db10 mongo]# cat conf0.conf
port = 4010
logpath = /opt/mongo/conf0.log
logappend = true
dbpath = /opt/mongo/conf0/
smallfiles = true
port = 4010
logpath = /opt/mongo/conf0.log
logappend = true
dbpath = /opt/mongo/conf0/
smallfiles = true
啟動config節點
nohup mongod --configsvr -f /opt/mongo/conf0.conf &
nohup mongod --configsvr -f /opt/mongo/conf1.conf &
nohup mongod --configsvr -f /opt/mongo/conf2.conf &
nohup mongod --configsvr -f /opt/mongo/conf1.conf &
nohup mongod --configsvr -f /opt/mongo/conf2.conf &
路由節點引數檔案
[root@db10 mongo]# cat route0.conf
port = 4000
logpath = /opt/mongo/route0.log
logappend = true
configdb = conf0:4010,conf1:4011,conf2:4012
port = 4000
logpath = /opt/mongo/route0.log
logappend = true
configdb = conf0:4010,conf1:4011,conf2:4012
啟動路由節點
nohup mongos -f route0.conf &
nohup mongos -f route1.conf &
shard複製集引數檔案
(有4個複製集,每個複製集的第一個節點是arbiter,後面的是儲存資料的節點)
[root@db10 mongo]# cat rs0-0.conf
smallfiles = true
logappend = true
replSet = rs0
port = 4020
dbpath = /opt/mongo/rs0-0/
logpath = /opt/mongo/rs0-0.log
smallfiles = true
logappend = true
replSet = rs0
port = 4020
dbpath = /opt/mongo/rs0-0/
logpath = /opt/mongo/rs0-0.log
初始化各複製集
(如果是用一臺機器+vip+hosts配機器名的方式做測試,初始化複製集是一定要用下面這種手工制定members[n].host的方式,否則mongo會把當前節點的host設定為機器名,而不是我們制定的rs0-n)
> var config = {_id: 'rs0', members: [
... {_id: 0, host: 'rs0-0:4020', arbiterOnly: true},
... {_id: 1, host: 'rs0-1:4021'},
... {_id: 2, host: 'rs0-2:4022'}
... ]};
> rs.initiate(config);
{ "ok" : 1 }
rs0:OTHER> rs.status();
{
"set" : "rs0",
"date" : ISODate("2015-03-30T06:09:28.166Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "rs0-0:4020",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 7,
"lastHeartbeat" : ISODate("2015-03-30T06:09:26.825Z"),
"lastHeartbeatRecv" : ISODate("2015-03-30T06:09:26.834Z"),
"pingMs" : 1,
"configVersion" : 1
},
{
"_id" : 1,
"name" : "rs0-1:4021",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 112,
"optime" : Timestamp(1427695760, 1),
"optimeDate" : ISODate("2015-03-30T06:09:20Z"),
"electionTime" : Timestamp(1427695762, 1),
"electionDate" : ISODate("2015-03-30T06:09:22Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 2,
"name" : "rs0-2:4022",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 7,
"optime" : Timestamp(1427695760, 1),
"optimeDate" : ISODate("2015-03-30T06:09:20Z"),
"lastHeartbeat" : ISODate("2015-03-30T06:09:26.825Z"),
"lastHeartbeatRecv" : ISODate("2015-03-30T06:09:26.834Z"),
"pingMs" : 1,
"configVersion" : 1
}
],
"ok" : 1
}
... {_id: 0, host: 'rs0-0:4020', arbiterOnly: true},
... {_id: 1, host: 'rs0-1:4021'},
... {_id: 2, host: 'rs0-2:4022'}
... ]};
> rs.initiate(config);
{ "ok" : 1 }
rs0:OTHER> rs.status();
{
"set" : "rs0",
"date" : ISODate("2015-03-30T06:09:28.166Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "rs0-0:4020",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 7,
"lastHeartbeat" : ISODate("2015-03-30T06:09:26.825Z"),
"lastHeartbeatRecv" : ISODate("2015-03-30T06:09:26.834Z"),
"pingMs" : 1,
"configVersion" : 1
},
{
"_id" : 1,
"name" : "rs0-1:4021",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 112,
"optime" : Timestamp(1427695760, 1),
"optimeDate" : ISODate("2015-03-30T06:09:20Z"),
"electionTime" : Timestamp(1427695762, 1),
"electionDate" : ISODate("2015-03-30T06:09:22Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 2,
"name" : "rs0-2:4022",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 7,
"optime" : Timestamp(1427695760, 1),
"optimeDate" : ISODate("2015-03-30T06:09:20Z"),
"lastHeartbeat" : ISODate("2015-03-30T06:09:26.825Z"),
"lastHeartbeatRecv" : ISODate("2015-03-30T06:09:26.834Z"),
"pingMs" : 1,
"configVersion" : 1
}
],
"ok" : 1
}
新增shard
mongos> sh.addShard("rs0/rs0-0:4020,rs0-1:4021,rs0-2:4022");
{ "shardAdded" : "rs0", "ok" : 1 }
mongos> sh.addShard("rs1/rs1-0:4030,rs1-1:4031,rs1-2:4032");
{ "shardAdded" : "rs1", "ok" : 1 }
mongos> sh.addShard("rs2/rs2-0:4040,rs2-1:4041,rs2-2:4042");
{ "shardAdded" : "rs2", "ok" : 1 }
mongos> sh.addShard("rs3/rs3-0:4050,rs3-1:4051,rs3-2:4052");
{ "shardAdded" : "rs3", "ok" : 1 }
{ "shardAdded" : "rs0", "ok" : 1 }
mongos> sh.addShard("rs1/rs1-0:4030,rs1-1:4031,rs1-2:4032");
{ "shardAdded" : "rs1", "ok" : 1 }
mongos> sh.addShard("rs2/rs2-0:4040,rs2-1:4041,rs2-2:4042");
{ "shardAdded" : "rs2", "ok" : 1 }
mongos> sh.addShard("rs3/rs3-0:4050,rs3-1:4051,rs3-2:4052");
{ "shardAdded" : "rs3", "ok" : 1 }
isdbgrid表示當前連線的是一個shard cluster
mongos> rs.isMaster()
{
"ismaster" : true,
"msg" : "isdbgrid",
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2015-03-30T06:33:04.209Z"),
"maxWireVersion" : 3,
"minWireVersion" : 0,
"ok" : 1
}
{
"ismaster" : true,
"msg" : "isdbgrid",
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2015-03-30T06:33:04.209Z"),
"maxWireVersion" : 3,
"minWireVersion" : 0,
"ok" : 1
}
建立測試資料
(
為了方便在少量資料下觀察,最好把chunksize設成1k
use config
db.settings.save( { _id:"chunksize", value: 1 } )
)
use test
sh.enableSharding("test");
db.createCollection("doc0");
db.createCollection("doc0");
db.createCollection("doc1");
db.createCollection("doc2");
sh.shardCollection("test.doc0", {"int1": 1, "int2": 1});
sh.shardCollection("test.doc1", {"datee": 1});
sh.shardCollection("test.doc2", {"_id": "hashed"});
sh.shardCollection("test.doc1", {"datee": 1});
sh.shardCollection("test.doc2", {"_id": "hashed"});
for(var i = 0; i < 100; i++) {
for(var j = 0; j < 100; j++) {
db.doc0.insert({int1:i, int2:j});
}
}
for(var j = 0; j < 100; j++) {
db.doc0.insert({int1:i, int2:j});
}
}
for(var i = 0; i < 100; i++) {
for(var j = 0; j < 100; j++) {
db.doc1.insert({datee:ISODate("2015-0"+(i%9+1)+"-0"+(i%9+1)), int1:i, int2:j});
db.doc1.insert({datee:ISODate("2015-0"+(i%9+1)+"-"+(i%19+10)), int1:i, int2:j});
for(var j = 0; j < 100; j++) {
db.doc1.insert({datee:ISODate("2015-0"+(i%9+1)+"-0"+(i%9+1)), int1:i, int2:j});
db.doc1.insert({datee:ISODate("2015-0"+(i%9+1)+"-"+(i%19+10)), int1:i, int2:j});
db.doc1.insert({datee:ISODate("2015-1"+(i%3)+"-0"+(i%9+1)), int1:i, int2:j});
db.doc1.insert({datee:ISODate("2015-1"+(i%3)+"-"+(i%19+10)), int1:i, int2:j});
}
}
}
for(var i = 0; i < 100; i++) {
for(var j = 0; j < 100; j++) {
db.doc2.insert({int1:i*j});
}
}
for(var j = 0; j < 100; j++) {
db.doc2.insert({int1:i*j});
}
}
mongos> db.doc0.find().count()
10000
mongos> db.doc1.find().count()
40000
mongos> db.doc2.find().count()
10000
10000
mongos> db.doc1.find().count()
40000
mongos> db.doc2.find().count()
10000
一鍵啟動
[root@db10 mongo]# cat startall
nohup mongod --configsvr -f /opt/mongo/conf0.conf &
nohup mongod --configsvr -f /opt/mongo/conf1.conf &
nohup mongod --configsvr -f /opt/mongo/conf2.conf &
nohup mongod -f /opt/mongo/rs0-0.conf &
nohup mongod -f /opt/mongo/rs0-1.conf &
nohup mongod -f /opt/mongo/rs0-2.conf &
nohup mongod -f /opt/mongo/rs1-0.conf &
nohup mongod -f /opt/mongo/rs1-1.conf &
nohup mongod -f /opt/mongo/rs1-2.conf &
nohup mongod -f /opt/mongo/rs2-0.conf &
nohup mongod -f /opt/mongo/rs2-1.conf &
nohup mongod -f /opt/mongo/rs2-2.conf &
nohup mongod -f /opt/mongo/rs3-0.conf &
nohup mongod -f /opt/mongo/rs3-1.conf &
nohup mongod -f /opt/mongo/rs3-2.conf &
sleep 4
nohup mongos -f route0.conf &
nohup mongos -f route1.conf &
sleep 2
ps -ef| grep -E mongo[sd]
nohup mongod --configsvr -f /opt/mongo/conf0.conf &
nohup mongod --configsvr -f /opt/mongo/conf1.conf &
nohup mongod --configsvr -f /opt/mongo/conf2.conf &
nohup mongod -f /opt/mongo/rs0-0.conf &
nohup mongod -f /opt/mongo/rs0-1.conf &
nohup mongod -f /opt/mongo/rs0-2.conf &
nohup mongod -f /opt/mongo/rs1-0.conf &
nohup mongod -f /opt/mongo/rs1-1.conf &
nohup mongod -f /opt/mongo/rs1-2.conf &
nohup mongod -f /opt/mongo/rs2-0.conf &
nohup mongod -f /opt/mongo/rs2-1.conf &
nohup mongod -f /opt/mongo/rs2-2.conf &
nohup mongod -f /opt/mongo/rs3-0.conf &
nohup mongod -f /opt/mongo/rs3-1.conf &
nohup mongod -f /opt/mongo/rs3-2.conf &
sleep 4
nohup mongos -f route0.conf &
nohup mongos -f route1.conf &
sleep 2
ps -ef| grep -E mongo[sd]
一鍵關閉
[root@db10 mongo]# cat killall
find . -name mongod.lock -exec cat {} \; | xargs kill
ps -ef| grep mongos | grep -v grep | awk '{print $2}' |xargs kill
sleep 4
ps -ef| grep -E mongo[sd]
find . -name mongod.lock -exec cat {} \; | xargs kill
ps -ef| grep mongos | grep -v grep | awk '{print $2}' |xargs kill
sleep 4
ps -ef| grep -E mongo[sd]
檢查程式
[root@db10 mongo]# cat check
ps -ef| grep -E mongo[sd]
ps -ef| grep -E mongo[sd]
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/26239116/viewspace-1485434/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- MongoDB 分片叢集搭建MongoDB
- 搭建MongoDB分片叢集MongoDB
- MongoDB 分片的原理、搭建、應用MongoDB
- MongoDB Sharding(二) -- 搭建分片叢集MongoDB
- 搭建高可用MongoDB叢集(四):分片MongoDB
- MongoDB分片MongoDB
- mongodb分片(sharding)搭建、應用及管理MongoDB
- linux下Mongodb叢集搭建:分片+副本集LinuxMongoDB
- 【MongoDB】分片(sharding)+副本集(replSet)叢集搭建MongoDB
- MongoDB 分片管理MongoDB
- MongoDB之分片MongoDB
- mongodb分片balanceMongoDB
- mongodb 分片叢集建立分片集合MongoDB
- MongoDB分片叢集新增分片(自用)MongoDB
- 搭建 MongoDB分片(sharding) / 分割槽 / 叢集環境MongoDB
- mongodb之shard分片MongoDB
- mongodb複製+分片MongoDB
- mongodb複製集(replica sets)+分片(sharding)環境搭建MongoDB
- MongoDB叢集之分片MongoDB
- MongoDB 4.2分片叢集搭建及與3.4分片叢集搭建時的一些異同MongoDB
- mongodb 分片叢集設定MongoDB
- mongodb分片物理搬遷方案MongoDB
- MongoDB分片叢集常用操作MongoDB
- 【Mongodb】分片加複製集MongoDB
- MongoDB 3.4配置sharding分片MongoDB
- mongodb簡單分片實驗MongoDB
- mongodb 分片群集(sharding cluster)MongoDB
- MongoDB分片鍵選擇指南MongoDB
- 【Mongodb】分片複製集環境新增新的分片MongoDB
- 利用Mongodb的複製集搭建高可用分片,Replica Sets + Sharding的搭建過程MongoDB
- MongoDB分片叢集chunk的概念MongoDB
- MongoDB何時考慮使用分片MongoDB
- 高可用mongodb叢集(分片+副本)MongoDB
- MongoDB4.2 分片掃盲說明MongoDB
- Mongodb分散式叢集副本集+分片MongoDB分散式
- MongoDB 分片鍵的選擇與案例MongoDB
- MongoDB分片需要考慮的事項MongoDB
- mongodb的分散式叢集(3、分片)MongoDB分散式