目录
摘要
- 解决批量导出文档耗时过长导致nginx报超时问题(一般来说,nginx配置超时时间60s)
- 使用CountDownLatch+Runnable去优化批量下载问题,优化批量下载word打包zip并通过浏览器下载的操作
- 提供实际项目模板案例、修改对应参数即可
- 考虑到测试项目、应该是一键启动、就不集成mybatis-plus(虽然代码有用到)这里自行集成
- 注意:提升的是下载文档速度,如果压缩包过大,输出到浏览器的速度还是会慢
一、简单介绍【countdownlatch】
1、【CountDownLatch】可以使一个获多个线程等待其他线程各自执行完毕后再执行。
2、【CountDownLatch】定义了一个计数器,和一个阻塞队列, 当计数器的值递减为0之前,阻塞队列里面的线程处于挂起状态,当计数器递减到0时会唤醒阻塞队列所有线程,这里的计数器是一个标志,可以表示一个任务一个线程,也可以表示一个倒计时器。
二、对比效果
结果对比
修改application.yml參數
单线程处理1000份文档
结果为大概在30秒上下
多线程线程处理1000份文档
2条线程同时处理1000份文档 大概在20秒上下
3条线程同时处理1000份文档 大概在16秒上下
4条线程同时处理1000份文档 大概在15秒上下(博主的笔记本极限了)
测试用例地址
三、代码实现
1、Controller层
/**
* 批量下载
*/
@PostMapping("/downloadBatch")
public void downloadBatch(HttpServletResponse response, @RequestBody Down down) {
downLoadService.downLoadBatch(response, down);
}
2、Service层
private void downLoadBatchTest(HttpServletResponse response) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
// 测试数据 10条线程、每条线程处理100份数据
// 文件本地统一存放路径(防止路径出现重复的情况、建议用雪花id、这里没集成就随便弄个时间戳代替
String filePath = FileUtil.getCaseInfoPath() + System.currentTimeMillis() + File.separator;
File file = new File(filePath);
if (!file.exists()) {
file.mkdirs();
}
try {
// 初始化多线程
Executor executor = executorConfig.asyncServiceExecutor();
CountDownLatch countDownLatch = new CountDownLatch(pageNum);
for (int i = 1; i <= pageNum; i++) {
executor.execute(new DownLoadThreadTest(countDownLatch, filePath,number));
}
countDownLatch.await();
stopWatch.stop();
System.out.println("下载共耗时:" + stopWatch.getTotalTimeSeconds()+"秒");
// 压缩文件名称
String zipName = "导出记录" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()) + ".zip";
// 通过浏览器下载
FileUtil.downLoadZip(filePath, zipName, response);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
// 删除文件
FileUtil.delAllFile(file);
}
}
3、FileUtil工具类
// 文件写出大小
private static final int BUFFER_SIZE = 2 * 1024;
/**
* 删除文件夹/文件
*
* @param directory 要被删除的文件夹
*/
public static void delAllFile(File directory) {
if (!directory.isDirectory()) {
directory.delete();
} else {
File[] files = directory.listFiles();
// 空文件夹
if (files.length == 0) {
directory.delete();
return;
}
// 删除子文件夹和子文件
for (File file : files) {
if (file.isDirectory()) {
delAllFile(file);
} else {
file.delete();
}
}
// 删除文件夹自己
directory.delete();
}
}
/**
* 压缩后直接输出到浏览器
* @param fileList
* @param zipFileName
* @param response
*/
public static void downLoadZip(String fileList, String zipFileName,HttpServletResponse response) {
try (BufferedOutputStream bos = new BufferedOutputStream(response.getOutputStream());
ZipOutputStream zous = new ZipOutputStream(bos)) {
// 重置
response.reset();
response.setContentType("application/octet-stream; charset=utf-8");
response.setHeader("Content-Disposition", "attachment; filename=" + Encodes.urlEncode(zipFileName));
File filePath = new File(fileList);
File[] files = filePath.listFiles();
if (files != null && files.length != 0) {
byte[] buf = new byte[BUFFER_SIZE];
int len;
for (File file : files) {
try (FileInputStream in = new FileInputStream(file)) {
zous.putNextEntry(new ZipEntry(file.getName()));
while ((len = in.read(buf)) != -1) {
zous.write(buf, 0, len);
}
zous.closeEntry();
}
}
zous.finish();
}
} catch (Exception e) {
e.printStackTrace();
}
}
4、ExecutorConfig 线程池配置
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
@Configuration
@EnableAsync
public class ExecutorConfig {
@Bean
public Executor asyncServiceExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
//配置核心线程数
executor.setCorePoolSize(Runtime.getRuntime().availableProcessors() * 2);
//配置最大线程数
executor.setMaxPoolSize(Runtime.getRuntime().availableProcessors() * 2);
//配置队列大小
executor.setQueueCapacity(100000);
//配置线程池中的线程的名称前缀
executor.setThreadNamePrefix("thread-");
// rejection-policy:当pool已经达到max size的时候,如何处理新任务
// CALLER_RUNS:不在新线程中执行任务,而是有调用者所在的线程来执行
executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
//执行初始化
executor.initialize();
return executor;
}
}
5、Runnable实现类
package com.muyangren.thread;
import com.deepoove.poi.XWPFTemplate;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
/**
* @author: muyangren
* @Date: 2023/4/3
* @Description: com.muyangren.thread
* @Version: 1.0
*/
public class DownLoadThreadTest implements Runnable {
private final CountDownLatch countDownLatch;
private final String filePath;
private final Integer number;
public DownLoadThreadTest(CountDownLatch countDownLatch, String filePath, Integer number) {
this.countDownLatch = countDownLatch;
this.filePath = filePath;
this.number = number;
}
@Override
public void run() {
// 获取模板
Resource resource = new ClassPathResource("document" + File.separator + "word" + File.separator + "测试模板.docx");
for (int i = 0; i < this.number; i++) {
// 文件名-防止重名
String fileName = UUID.randomUUID() + "《" + i + "》家人们.docx";
try (OutputStream os = Files.newOutputStream(Paths.get(filePath + fileName))) {
// 传入参数
HashMap<String, Object> data = new HashMap<>(3);
data.put("one", "数据数据数据数据数据数据one");
data.put("two", "数据数据数据数据数据数据数据数据two");
data.put("three", "数据数据数据数据数据数据数据数据数据three");
XWPFTemplate template = XWPFTemplate.compile(resource.getInputStream()).render(data);
// 导出到指定路径下
template.writeAndClose(os);
} catch (Exception e) {
e.printStackTrace();
}
}
countDownLatch.countDown();
}
}