基本安装
yum安装(rpm)
vi /etc/yum.repos.d/mongodb-org-3.4.repo
[mongodb-org-3.4]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/$releasever(这里填写系统版本)/mongodb-org/3.4/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc
yum install -y mongodb-org
解压安装(推荐)
MongoDB 归档:https://www.mongodb.org/dl/linux/x86_64
http://downloads.mongodb.org/linux/mongodb-linux-x86_64-rhel62-3.4.20.tgz
解压至任意目录即可
Shell启动
启动指令
./mongod --port 27017 --dbpath=/usr/mongodb/data
shell链接
./mongo # 链接默认端口和本地
-------------------------------------
./mongo --shell 192.168.134.99:27017/user # 连接shell到192.168.134.99 27017数据库下user
-------------------------------------
./mongo --shell --port 27017 --host 192.168.134.99
基本操作
基础指令
1)展示所有数据库 show dbs
2)展示当前使用的数据库 db
3)切换数据库 use datbasename
切换到指定的数据库(如果没有则创建,没有数据则无效)
4)展示当前库下所有的集合 show collections
插入操作
1)插入单个文档 db.user.insertOne({id:1,name:"zzj",age:18})
2)插入多个文档 db.user.insertMany([{id:2,name:"zjz",age:18},{id:3,name:"jzz",age:20}
3)通过JS进行插入 for(var i = 1;i<10;i++){db.user.insertOne({name:"name"+i,id:i,age:18+i})}
查询
1)查询所有 db.user.find()
2)简单条件查询 db.user.find({name:"zzj"})
3)使用运算符进行条件查询
> db.user.find({age:{$in:[18,20]}})
> db.user.find({id:1,name:"zzj"})
> db.user.find({$or:[{id:2},{name:"zzj"}]})
> db.user.find({age:18,$or:[{id:2},{name:"zzj"}]})
4)正则匹配查询 db.user.find({name:{$regex:"name"}})
5)嵌套文档查询
准备数据:
db.user.insertMany([
{id:5,name:"zs",age:18,info:{h:180,w:130}},
{id:6,name:"ls",age:20,info:{h:185,w:120}},
{id:7,name:"ww",age:19,info:{h:190,w:200}}
])
例如查询 身高为180 体重为130的:
> db.user.find({info:{h:180,w:130}})
{ "_id" : ObjectId("5c8f30bb574ffc7f00356807"), "id" : 5, "name" : "zs", "age" : 18, "info" : { "h" : 180, "w" : 130 } }
6)查询嵌套字段
查询身高
> db.user.find({"info.h":180})
{ "_id" : ObjectId("5c8f30bb574ffc7f00356807"), "id" : 5, "name" : "zs", "age" : 18, "info" : { "h" : 180, "w" : 130 } }
7)查询数组
准备数据
db.user.insertMany([
{id:9,name:"feng",age:21,info:{h:180,w:130},
hobby:["smoke","drink"],lucknum:[1,2,4]},
{id:10,name:"yuxi",age:22,info:{h:185,w:120},
hobby:["hothead","smoke"],lucknum:[2,3,4]},
{id:11,name:"wenchao",age:23,info:{h:190,w:200},
hobby:["eat","smoke"],lucknum:[6,7,8]}
])
查询喜欢抽烟和喝酒的人
> db.user.find({hobby:["smoke","drink"]})
{ "_id" : ObjectId("5c8f37fd574ffc7f0035680d"), "id" : 9, "name" : "feng", "age" : 21, "info" : { "h" : 180, "w" : 130 }, "hobby" : [ "smoke", "drink" ], "lucknum" : [ 1, 2, 4 ] }
查询喜欢抽烟的人
> db.user.find({hobby:"smoke"})
{ "_id" : ObjectId("5c8f37fd574ffc7f0035680d"), "id" : 9, "name" : "feng", "age" : 21, "info" : { "h" : 180, "w" : 130 }, "hobby" : [ "smoke", "drink" ], "lucknum" : [ 1, 2, 4 ] }
{ "_id" : ObjectId("5c8f37fd574ffc7f0035680e"), "id" : 10, "name" : "yuxi", "age" : 22, "info" : { "h" : 185, "w" : 120 }, "hobby" : [ "hothead", "smoke" ], "lucknum" : [ 2, 3, 4 ] }
{ "_id" : ObjectId("5c8f37fd574ffc7f0035680f"), "id" : 11, "name" : "wenchao", "age" : 23, "info" : { "h" : 190, "w" : 200 }, "hobby" : [ "eat", "smoke" ], "lucknum" : [ 6, 7, 8 ] }
查询幸运数大于0且小于6
> db.user.find({lucknum:{$gt:0,$lt:6}})
{ "_id" : ObjectId("5cd12f4c315b584c7c5cfe22"), "id" : 9, "name" : "feng", "age" : 21, "info" : { "h" : 180, "w" : 130 }, "hobby" : [ "smoke", "drink" ], "lucknum" : [ 1, 2, 4 ] }
{ "_id" : ObjectId("5cd12f4c315b584c7c5cfe23"), "id" : 10, "name" : "yuxi", "age" : 22, "info" : { "h" : 185, "w" : 120 }, "hobby" : [ "hothead", "smoke" ], "lucknum" : [ 2, 3, 4 ] }
查询数组下标为0的数它要大于5
> db.user.find({"lucknum.0":{$gt:5}})
{ "_id" : ObjectId("5c8f372f574ffc7f0035680c"), "id" : 11, "name" : "wenchao", "age" : 23, "info" : { "h" : 190, "w" : 200 }, "lucknum" : [ 6, 7, 8 ] }
{ "_id" : ObjectId("5c8f37fd574ffc7f0035680f"), "id" : 11, "name" : "wenchao", "age" : 23, "info" : { "h" : 190, "w" : 200 }, "hobby" : [ "eat", "smoke" ], "lucknum" : [ 6, 7, 8 ] }
按照数组长度查询数据
> db.user.find({"lucknum":{$size:3}})
{ "_id" : ObjectId("5c8f372f574ffc7f0035680a"), "id" : 9, "name" : "feng", "age" : 21, "info" : { "h" : 180, "w" : 130 }, "lucknum" : [ 1, 2, 4 ] }
{ "_id" : ObjectId("5c8f372f574ffc7f0035680b"), "id" : 10, "name" : "yuxi", "age" : 22, "info" : { "h" : 185, "w" : 120 }, "lucknum" : [ 5, 6, 7 ] }
{ "_id" : ObjectId("5c8f372f574ffc7f0035680c"), "id" : 11, "name" : "wenchao", "age" : 23, "info" : { "h" : 190, "w" : 200 }, "lucknum" : [ 6, 7, 8 ] }
更新
db.collection.updateOne(<filter>, <update>, <options>)
db.collection.updateMany(<filter>, <update>, <options>)
db.collection.replaceOne(<filter>, <replacement>, <options>)
准备数据
db.inventory.insertMany( [
{ item: "canvas", qty: 100, size: { h: 28, w: 35.5, uom: "cm" }, status: "A" },
{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" },
{ item: "mat", qty: 85, size: { h: 27.9, w: 35.5, uom: "cm" }, status: "A" },
{ item: "mousepad", qty: 25, size: { h: 19, w: 22.85, uom: "cm" }, status: "P" },
{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "P" },
{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" },
{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" },
{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" },
{ item: "sketchbook", qty: 80, size: { h: 14, w: 21, uom: "cm" }, status: "A" },
{ item: "sketch pad", qty: 95, size: { h: 22.85, w: 30.5, uom: "cm" }, status: "A" }
]);
更新单个文档 – 查找item为paper的文档,size.uom更新为cm,status更新为p,lastModified更新为当前时间
db.inventory.updateOne(
{ item: "paper" },
{
$set: { "size.uom": "cm", status: "P" },
$currentDate: { lastModified: true }
}
)
更新多个文档 – 查找qty小于50 的文档数据,设置size.uom为in status为p,修改最后修改时间为当前时间
db.inventory.updateMany(
{ "qty": { $lt: 50 } },
{
$set: { "size.uom": "in", status: "P" },
$currentDate: { lastModified: true }
}
)
替换文档
db.inventory.replaceOne(
{ item: "paper" },
{ item: "user", instock: [ { warehouse: "A", qty: 60 }, { warehouse: "B", qty: 40 } ] }
)
删除
db.collection.deleteMany()
db.collection.deleteOne()
准备数据
db.inventory.insertMany( [
{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" },
{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "P" },
{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" },
{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" },
{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" },
] );
删除全部文档
db.inventory.deleteMany({})
删除集合中所有状态为A的文档数据
db.inventory.deleteMany({ status : "A" })
根据条件删除一个 – 即使有很多文档数据匹配当前条件,但是只会删除第一根据条件找到的文档数据
db.inventory.deleteOne( { status: "D" } )
索引
1)单Field索引: db.records.createIndex( { score: 1 } )
2)复合索引 : db.collection.createIndex({ score: 1,name:-1 } )
3)唯一索引 : db.members.createIndex({"user_id":1},{unique:true})
4)稀疏索引 : db.t_user.createIndex({name:1},{sparse:true});
5)部分索引 :
db.t_user.createIndex({name:1},{partialFilterExpression:{age:{$gt:18}},unique:true})
聚合
准备数据
准备数据
[
{name:"张三",sex:true,class:"软开1班",age:28,salary:18000},
{name:"李四",sex:true,class:"软开2班",age:25,salary:15000},
{name:"王五",sex:false,class:"软开1班",age:35,salary:10000},
{name:"赵六",sex:true,class:"软开2班",age:20,salary:15000}
]
1.统计班级人员的平均薪资
db.t_user.aggregate([{$group:{_id:'$class',avgSalary:{$avg:'$salary'}}}]);
2.分析班级中薪资大于10000学生的平均年龄
db.t_user.aggregate([{$match:{salary:{$gt:10000}}},{$group:{_id:'$class',avgAge:{$avg:'$age'}}}]);
3.求每一个班中薪资最高薪资
db.t_user.aggregate([{$group:{_id:'$class',maxSalary:{$max:'$salary'}}}]);
4.求每一个班中薪资最低薪资
db.t_user.aggregate([{$group:{_id:'$class',minSalary:{$min:'$salary'}}}]);
5.求每一个班中薪资最低薪资班级降序
db.t_user.aggregate([{$group:{_id:'$class',minSalary:{$min:'$salary'}}},{$sort:{_id:-1}}]);
6.按照班级平均薪资降序
db.t_user.aggregate([{$group:{_id:'$class',avgSalary:{$avg:'$salary'}}},{$sort:{avgSalary:-1}}]);
7.求班级平均薪资最高的班级信息
db.t_user.aggregate([{$group:{_id:'$class',avgSalary:{$avg:'$salary'}}},{$sort:{avgSalary:-1}},{$limit:1}]);
Java API
依赖
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongodb-driver</artifactId>
<version>3.4.3</version>
</dependency>
获取数据库连接
MongoClient mongoClient = new MongoClient();
MongoClient mongoClient = new MongoClient( "host1" );
MongoClient mongoClient = new MongoClient( "host1" , 27017 );
MongoClient mongoClient = new MongoClient(new MongoClientURI("mongodb://host1:27017"));
基本操作
获得数据库对象
MongoDatabase database = mongoClient.getDatabase("test");
获得集合对象
MongoCollection<Document> coll = database.getCollection("myTestCollection");
创建上限集合 – 例如 创建一个大小为1M字节的上限集合(8进制)
database.createCollection("cappedCollection",
new CreateCollectionOptions().capped(true).sizeInBytes(0x100000));
获得当前数据中所有的集合
for (String listCollectionName : database.listCollectionNames()) {
System.out.println(listCollectionName);
}
删除集合
MongoCollection<Document> collection = database.getCollection("contacts");
collection.drop();
获取集合中所有的数据
database.getCollection("user").find().forEach((Block<? super Document>) document->{
System.out.println(document.toJson());
});
条件查询(查询过滤器)
//1.空过滤器(Empty Filter)
database.getCollection("user").find(new Document()).forEach((Block<? super Document>) document->{
System.out.println(document.toJson());
});
//2.根据条件查询
database.getCollection("user").find(new Document("name","zzj")).forEach((Block<? super Document>) document -> {
System.out.println(document.toJson());
});
写操作
Document document = new Document("name", "Café Con Leche")
.append("contact", new Document("phone", "228-555-0149")
.append("email", "cafeconleche@example.com")
.append("location",Arrays.asList(-73.92502, 40.8279556)))
.append("stars", 3)
.append("categories", Arrays.asList("Bakery", "Coffee", "Pastries"));
collection.insertOne(document);
SpringData
依赖
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-mongodb</artifactId>
</dependency>
配置文件
spring.data.mongodb.host=192.168.134.141
spring.data.mongodb.port=27017
spring.data.mongodb.database=zzj
具体操作
1)实体类
使用注解(@Document)将其映射为mongo中的表
@Document(collection = "user")
public class User {
private String id;
private String name;
private Integer age;
private Boolean sex;
//省略seth和get方法
}
2)Dao及实现
public interface UserDao {
void insertOne(User user);
void inserManny(List<User> list);
void deleteOne(User user);
List<User> findAll();
List<User> findByName(User user);
}
@Repository
public class UserDaoImpl implements UserDao {
@Autowired
private MongoTemplate mongoTemplate;
@Override
public void insertOne(User user) {
mongoTemplate.insert(user);
}
@Override
public void inserManny(List<User> list) {
mongoTemplate.insertAll(list);
}
@Override
public void deleteOne(User user) {
mongoTemplate.remove(user);
}
@Override
public List<User> findAll() {
List<User> userList = mongoTemplate.findAll(User.class);
return userList;
}
@Override
public List<User> findByName(User user) {
List<User> userList = mongoTemplate.find(Query.query(Criteria.where("name").is(user.getName())), User.class);
return userList;
}
}
3)测试
@RunWith(SpringRunner.class)
@SpringBootTest
public class MongoSpringDataTest {
@Autowired
private UserDao userDao;
@Test
public void operationInsert(){
User user1 = new User(UUID.randomUUID().toString(),"zzj1",17,true);
User user2 = new User(UUID.randomUUID().toString(),"zzj2",18,false);
User user3 = new User(UUID.randomUUID().toString(),"zzj3",19,true);
ArrayList<User> users = new ArrayList<>();
users.add(user1);
users.add(user2);
users.add(user3);
userDao.inserManny(users);
}
@Test
public void operationFind(){
List<User> userList = userDao.findAll();
userList.forEach((user)->{
System.out.println(user.toString());
});
}
@Test
public void operationFindByName(){
List<User> userList = userDao.findByName(new User("", "zzj", null, null));
userList.forEach((user)->{
System.out.println(user.toString());
});
}
@Test
public void operationDel(){
userDao.deleteOne(new User("5c8f21d96d7739f7bddc7d86", "zzj", 18, null));
}
}
GridFS
1)简介
GridFS是用于存储和检索超过 BSON -文档大小限制为16 MB的文件的规范。
GridFS不是将文件存储在单个文档中,而是将文件分成多个部分或块[1],并将每个块存储为单独的文档。默认情况下,GridFS使用默认的块大小255 kB; 也就是说,GridFS将文件分成255 kB的块,但最后一个块除外。最后一个块只有必要的大小。类似地,不大于块大小的文件只有最终块,只使用所需的空间和一些额外的元数据。
GridFS使用两个集合来存储文件。一个集合存储文件块,另一个存储文件元数据。GridFS集合部分 详细描述了每个集合。
当您查询GridFS文件时,驱动程序将根据需要重新组装块。您可以对通过GridFS存储的文件执行范围查询。您还可以从文件的任意部分访问信息,例如“跳过”到视频或音频文件的中间。
GridFS不仅可用于存储超过16 MB的文件,还可用于存储您想要访问的任何文件,而无需将整个文件加载到内存中。另请参见 何时使用GridFS。
2)何时使用?
当使用MongoDB时文件的大小超过16MB
在某些情况下,在MongoDB数据库中存储大文件可能比在系统级文件系统上更高效。
- 如果文件系统限制目录中的文件数,则可以使用GridFS根据需要存储任意数量的文件。
- 如果要从大型文件的各个部分访问信息而无需将整个文件加载到内存中,可以使用GridFS调用文件的各个部分,而无需将整个文件读入内存。
如果需要以原子方式更新整个文件的内容,请不要使用GridFS。作为替代方案,您可以存储每个文件的多个版本,并在元数据中指定文件的当前版本。您可以在上载新版本的文件后更新在原子更新中指示“最新”状态的元数据字段,并在以后删除以前版本
此外,如果您的文件都小于16 MB 限制,请考虑将每个文件存储在单个文档中,而不是使用GridFS。您可以使用BinData数据类型来存储二进制数据。
3)使用方式
shell
(1)上传文件
[root@TestVM bin]# ./mongofiles --port 27017 --host 127.0.0.1 -d user put /root/mongod.png
2019-05-06T20:51:39.375+0800 connected to: 127.0.0.1:27017
added file: /root/mongod.png
(2)查看所有文件
[root@TestVM bin]# ./mongofiles --port 27017 --host 127.0.0.1 -d user list
2019-05-06T20:52:26.590+0800 connected to: 127.0.0.1:27017
/root/mongod.png 36846
(3)删除文件
[root@TestVM bin]# ./mongofiles --port 27017 --host 127.0.0.1 -d user delete /root/mongodb.png
2019-05-06T20:57:31.496+0800 connected to: 127.0.0.1:27017
successfully deleted all instances of '/root/mongodb.png' from GridFS
(4)搜索文件
[root@TestVM bin]# ./mongofiles --port 27017 --host 127.0.0.1 -d user search mon
2019-05-06T20:59:49.772+0800 connected to: 127.0.0.1:27017
/root/mongod.png 36846
(5)下载文件
[root@TestVM bin]# ./mongofiles --port 27017 --host 127.0.0.1 -d user get /root/mongod.png --local /root/1.png
2019-05-06T21:16:15.262+0800 connected to: 127.0.0.1:27017
finished writing to /root/1.png
Java
依赖
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongodb-driver</artifactId>
<version>3.6.4</version>
</dependency>
代码
//上传
@Test
public void gridFSUpload() throws Exception {
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
InputStream streamToUploadFrom = new FileInputStream(new File("E:\\A.docx"));
GridFSUploadOptions options = new GridFSUploadOptions()
.chunkSizeBytes(358400)
.metadata(new Document("type", "docx"));
ObjectId fileId = gridFSBucket.uploadFromStream("A", streamToUploadFrom, options);
System.out.println(fileId);
}
//带有缓冲区上传操作
@Test
public void gridFSOpenUploadStream() throws Exception {
/*
* 该GridFSUploadStream缓存数据,直到它到达chunkSizeBytes
* ,然后插入块到chunks集合。当GridFSUploadStream关闭时,
* 最后的块写入和文件元数据插入到files集合。
* */
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
GridFSUploadOptions options = new GridFSUploadOptions()
.chunkSizeBytes(358400)
.metadata(new Document("type", "presentation"));
GridFSUploadStream uploadStream = gridFSBucket.openUploadStream("A.docx", options);
byte[] data = Files.readAllBytes(new File("E:\\A.docx").toPath());
uploadStream.write(data);
uploadStream.close();
System.out.println("The fileId of the uploaded file is: " + uploadStream.getObjectId().toHexString());
}
//查找所有的文件
@Test
public void gridFsFindall(){
/*
* import com.mongodb.client.gridfs.model.GridFSFile;
* */
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
gridFSBucket.find().forEach(
new Block<GridFSFile>() {
public void apply(final GridFSFile gridFSFile) {
System.out.println(gridFSFile.getFilename());
}
});
}
//简单的条件查找
@Test
public void gridFsFind(){
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
gridFSBucket.find(new Document("metadata.user", "zzj")).forEach(
new Block<GridFSFile>() {
public void apply(final GridFSFile gridFSFile) {
System.out.println(gridFSFile.getFilename());
}
});
}
// 下载操作
@Test
public void gridFSDonloadFile() throws Exception {
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
//指定参数为本地文件路径
FileOutputStream streamToDownloadTo = new FileOutputStream("E:\\Aa.png");
//指定参数为服务端文件路径或者文件id
gridFSBucket.downloadToStream("/root/mongod.png", streamToDownloadTo);
streamToDownloadTo.close();
System.out.println(streamToDownloadTo.toString());
}
//带缓冲区的下载操作
@Test
public void gridFSOpenDownloadStream() throws Exception {
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
GridFSDownloadStream downloadStream = gridFSBucket.openDownloadStream("/root/mongod.png");
int fileLength = (int) downloadStream.getGridFSFile().getLength();
byte[] bytesToWriteTo = new byte[fileLength];
downloadStream.read(bytesToWriteTo);
downloadStream.close();
FileOutputStream fileOutputStream = new FileOutputStream(new File("E://aaaa.png"));
fileOutputStream.write(bytesToWriteTo);
fileOutputStream.close();
}
//重命名文件操作
/**
*
* 注意
该rename方法需要ObjectId而不是filename确保重命名正确的文件。
要重命名相同文件名的多个修订版,请首先检索完整的文件列表。然后对于每个应该重命名的文件
,然后rename使用相应的文件执行_id。
* */
@Test
public void gridFSRename() {
ObjectId objectId = new ObjectId("5cd02ddbf4b1ab695e2ea7d6");
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
gridFSBucket.rename(objectId, "1.png");
}
//删除文件
@Test
public void gridFSdelFile() throws Exception {
MongoDatabase myDatabase = mongoClient.getDatabase("user");
GridFSBucket gridFSBucket = GridFSBuckets.create(myDatabase);
ObjectId objectId = null;
objectId = new ObjectId("5cd03ddd682ecf2838695c75");
gridFSBucket.delete(objectId);
}
MongoDB 架构
分片&副本集
ConfigServer
:存储集群的元数据信息,MongoDB3.4
版本配置服务器必须部署成副本集
ShardServer
:存储实际的物理数据,存储集合中的部分信息,每一个shardserver也必须部署成副本集
router
:路由服务器,作为集群的访问代理,将用户的请求转发给配置服务器或者shard服务器。可以将一个庞大的MongoDB的集群伪装成单个mongod服务实例。
[root@centos ~]# mkdir cf1 cf2 cf3 shard1_1 shard1_2 shard1_3 shard2_1 shard2_2 shard2_3
#搭建副本集
[root@centos ~]# mongod --dbpath /root/shard1_1/ --port 27017 --fork --syslog --replSet rs1 --shardsvr
[root@centos ~]# mongod --dbpath /root/shard1_2/ --port 27018 --fork --syslog --replSet rs1 --shardsvr
[root@centos ~]# mongod --dbpath /root/shard1_3/ --port 27019 --fork --syslog --replSet rs1 --shardsvr
[root@centos ~]# mongo --port 27017
> var conf={
_id : "rs1",
members: [
{ _id: 0, host: "localhost:27017" },
{ _id: 1, host: "localhost:27018" },
{ _id: 2, host: "localhost:27019" }
]
}
> rs.initiate(conf)
[root@centos ~]# mongod --dbpath /root/shard2_1/ --port 37017 --fork --syslog --replSet rs2 --shardsvr
[root@centos ~]# mongod --dbpath /root/shard2_2/ --port 37018 --fork --syslog --replSet rs2 --shardsvr
[root@centos ~]# mongod --dbpath /root/shard2_3/ --port 37019 --fork --syslog --replSet rs2 --shardsvr
[root@centos ~]# mongo --port 37017
> var conf={
_id : "rs2",
members: [
{ _id: 0, host: "localhost:37017" },
{ _id: 1, host: "localhost:37018" },
{ _id: 2, host: "localhost:37019" }
]
}
> rs.initiate(conf)
[root@centos ~]# mongod --dbpath /root/cf1/ --port 47017 --fork --syslog --replSet cf --configsvr
[root@centos ~]# mongod --dbpath /root/cf2/ --port 47018 --fork --syslog --replSet cf --configsvr
[root@centos ~]# mongod --dbpath /root/cf3/ --port 47019 --fork --syslog --replSet cf --configsvr
[root@centos ~]# mongo - --port 47017
> var conf={
_id : "cf",
members: [
{ _id: 0, host: "localhost:47017" },
{ _id: 1, host: "localhost:47018" },
{ _id: 2, host: "localhost:47019" }
]
}
> rs.initiate(conf)
#启动路由服务,用户可以通过连接该路由服务操作mongodb集群
[root@centos ~]# mongos --configdb cf/localhost:47017,localhost:47018,localhost:47019 --fork --syslog --port 8000
#配置分片
[root@centos ~]# mongo --host localhost --port 8000
mongos> sh.addShard("rs1/localhost:27017,localhost:27018,localhost:27019")
mongos> sh.addShard("rs2/localhost:37017,localhost:37018,localhost:37019")
mongos> sh.enableSharding('user')# 对该库下的表做分片
mongos> sh.shardCollection("user.t_user",{_id:"hashed"})#对t_user做hashshard让数据均匀分布
mongos> sh.shardCollection( "user.t_order", {_id:1,name:1})#做range方式,写性能不高,有利于区间检索
mongos> use user
mongos> for(var i=0;i<1000;i++){
db.t_user.insert({name:"user"+i})
db.t_order.insert({name:"user"+i})
}
> db.t_user|t_order.stats();