一.引入pom.xml
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-data-mongodb</artifactId> </dependency> <!-- mongodb连接池 --> <dependency> <groupId>com.spring4all</groupId> <artifactId>mongodb-plus-spring-boot-starter</artifactId> <version>1.0.0.RELEASE</version> </dependency>
二.application.yml添加配置
spring: data: mongodb: uri: mongodb://username:password@localhost:27017/dbname?authSource=collectionname
三.测试
1.测试添加:将20万条数据通过线程池,分10个线程,每个线程2000条数据保存,通过mongoTemplate批量并行存储。
@Test
public void testAdd() {
Stopwatch stopwatch = Stopwatch.createStarted();
DateTime date = DateUtil.date();
//2.配置线程池,去分批次组装HomsPubInfo;
int batchNum = 2000;
int totalNum = 200000;
int pageNum = totalNum % batchNum == 0 ? totalNum / batchNum : totalNum / batchNum + 1; //总数据数去根据线程数切块;
ExecutorService executorService = Executors.newFixedThreadPool(pageNum);
try {
CountDownLatch countDownLatch = new CountDownLatch(pageNum);
int fromIndex;
int toIndex;
for (int i = 0; i < pageNum; i++) {
fromIndex = i * batchNum;
toIndex = Math.min(fromIndex + batchNum, totalNum);
HandleTask handleTask = new HandleTask(countDownLatch, fromIndex, toIndex, date);
executorService.execute(handleTask);
}
// 主线程必须在启动其它线程后立即调用CountDownLatch.await()方法,这样主线程的操作就会在这个方法上阻塞,直到其它线程完成各自的任务。
// 计数器的值等于0时,主线程就能通过await()方法恢复执行自己的任务。
countDownLatch.await(10, TimeUnit.MINUTES);
stopwatch.stop();
log.info("10个线程,存储10W条数据耗时:{}", stopwatch.toString());
log.info("********************** 数据操作完成,进入主线程执行代码 ***************************");
} catch (Exception e) {
e.printStackTrace();
}
}
@AllArgsConstructor
@NoArgsConstructor
@Data
public class HandleTask implements Runnable {
private CountDownLatch countDownLatch;
private int fromIndex;
private int toIndex;
private DateTime date;
@Override
public void run() {
// 业务逻辑,例如批量insert或者update
date.offset(DateField.SECOND, 1);
List<String> columnList = make100Field();
List<Map> mapList = new ArrayList<>();
for (int i = fromIndex; i < toIndex; i++) {
Map<String, Object> mapTmp = new ConcurrentHashMap<>();
for (int j = 0; j < columnList.size(); j++) {
String fieldName = columnList.get(j);
if ("c1".equals(fieldName)) {
mapTmp.put(fieldName, date.offset(DateField.SECOND, 1).toString());
} else {
mapTmp.put(fieldName, i);
}
}
mapList.add(mapTmp);
if (mapList.size() == 100) {
mongoTemplate.insert(mapList, "mytest");
mapList.clear();
}
}
//7.发出线程任务完成的信号
countDownLatch.countDown();
}
}
public List<String> make100Field() {
String fieldPre = "c";
List<String> fieldNameList = new LinkedList<>();
for (int i = 1; i < 51; i++) {
String columnName = String.format("%s%s", fieldPre, i);
fieldNameList.add(columnName);
}
return fieldNameList;
}
2.测试检索
@Test
public void testSearch() {
Stopwatch stopwatch = Stopwatch.createStarted();
Query query = new Query().addCriteria(
Criteria.where("c1").gte("2021-10-16 15:28:42").lte("2021-10-16 15:58:46"));
List<Map> mytest = mongoTemplate.find(query, Map.class, "mytest");
stopwatch.stop();
System.out.println("检索耗时:" + stopwatch.toString());
System.out.println("############# end ###########");
}