前端代码:
#安装依赖,打开项目目录命令行,分别安装。如果按照的时候出错,使用命令清理下下npm缓存
npm install --save vue-simple-uploader
npm install --save spark-md5
#创建上传组件,组件名称自定义FileFragmentationUpload:
<template>
<div>
<uploader
:autoStart="false"
:options="options"
:file-status-text="statusText"
class="uploader-example"
@file-complete="fileComplete"
@complete="complete"
@file-success="fileSuccess"
@files-added="filesAdded"
>
<uploader-unsupport></uploader-unsupport>
<uploader-drop>
<p>将文件拖放到此处以上传</p>
<uploader-btn>选择文件</uploader-btn>
<uploader-btn :attrs="attrs">选择图片</uploader-btn>
<uploader-btn :directory="true">选择文件夹</uploader-btn>
</uploader-drop>
<!-- <uploader-list></uploader-list> -->
<uploader-files> </uploader-files>
</uploader>
<br />
<el-button @click="allStart()" :disabled="disabled">全部开始</el-button>
<el-button @click="allStop()" style="margin-left: 4px">全部暂停</el-button>
<el-button @click="allRemove()" style="margin-left: 4px">全部移除</el-button>
</div>
</template>
<script>
import axios from "axios";
import SparkMD5 from "spark-md5";
import {mergeChunks} from "@/api/inter/upload";
import { getToken } from "@/utils/auth";
// import storage from "store";
// import { ACCESS_TOKEN } from '@/store/mutation-types'
export default {
name: "Home",
data() {
return {
skip: false,
options: {
target: process.env.VUE_APP_BASE_API +"/inter/upload/chunk",
// 开启服务端分片校验功能
testChunks: true,
parseTimeRemaining: function (timeRemaining, parsedTimeRemaining) {
return parsedTimeRemaining
.replace(/\syears?/, "年")
.replace(/\days?/, "天")
.replace(/\shours?/, "小时")
.replace(/\sminutes?/, "分钟")
.replace(/\sseconds?/, "秒");
},
// 服务器分片校验函数
checkChunkUploadedByResponse: (chunk, message) => {
const result = JSON.parse(message);
if (result.data.skipUpload) {
this.skip = true;
return true;
}
console.log("check skip:"+this.skip);
return (result.data.uploaded || []).indexOf(chunk.offset + 1) >= 0;
},
headers: {
Authorization: "Bearer " + getToken(),
},
},
attrs: {
accept: "image/*",
},
statusText: {
success: "上传成功",
error: "上传出错了",
uploading: "上传中...",
paused: "暂停中...",
waiting: "等待中...",
cmd5: "计算文件MD5中...",
},
fileList: [],
disabled: true,
};
},
watch: {
fileList(o, n) {
this.disabled = false;
},
},
methods: {
// fileSuccess(rootFile, file, response, chunk) {
// // console.log(rootFile);
// // console.log(file);
// // console.log(message);
// // console.log(chunk);
// const result = JSON.parse(response);
// console.log(result.success, this.skip);
//
// if (result.success && !this.skip) {
// axios
// .post(
// "http://127.0.0.1:9999/upload/merge",
// {
// identifier: file.uniqueIdentifier,
// filename: file.name,
// totalChunks: chunk.offset,
// },
// // {
// // headers: { "Access-Token": storage.get(ACCESS_TOKEN) }
// // }
// )
// .then((res) => {
// if (res.data.success) {
// console.log("上传成功");
// } else {
// console.log(res);
// }
// })
// .catch(function (error) {
// console.log(error);
// });
// } else {
// console.log("上传成功,不需要合并");
// }
// if (this.skip) {
// this.skip = false;
// }
// },
fileSuccess(rootFile, file, response, chunk) {
// console.log(rootFile);
// console.log(file);
console.log(response);
// console.log(chunk);
const result = JSON.parse(response);
console.log(result.code, this.skip);
const user = {
identifier: file.uniqueIdentifier,
filename: file.name,
totalChunks: chunk.offset,
}
if (result.code == 200 && !this.skip) {
mergeChunks(user).then((res) => {
console.log("res:"+res.code)
console.log("res data:"+res.data)
if (res.code == 200) {
if(res.data.result){
console.log("filePath:"+res.data.filePath)
console.log("上传成功");
}else{
console.log("上传失败");
console.log("filePath:"+res.data.filePath)
}
} else {
console.log(res);
}
})
.catch(function (error) {
console.log(error);
});
} else {
console.log("上传成功,不需要合并");
}
if (this.skip) {
this.skip = false;
}
},
fileComplete(rootFile) {
// 一个根文件(文件夹)成功上传完成。
// console.log("fileComplete", rootFile);
// console.log("一个根文件(文件夹)成功上传完成。");
},
complete() {
// 上传完毕。
// console.log("complete");
},
filesAdded(file, fileList, event) {
// console.log(file);
file.forEach((e) => {
this.fileList.push(e);
this.computeMD5(e);
});
},
computeMD5(file) {
let fileReader = new FileReader();
let time = new Date().getTime();
let blobSlice =
File.prototype.slice ||
File.prototype.mozSlice ||
File.prototype.webkitSlice;
let currentChunk = 0;
const chunkSize = 1024 * 1024;
let chunks = Math.ceil(file.size / chunkSize);
let spark = new SparkMD5.ArrayBuffer();
// 文件状态设为"计算MD5"
file.cmd5 = true; //文件状态为“计算md5...”
file.pause();
loadNext();
fileReader.onload = (e) => {
spark.append(e.target.result);
if (currentChunk < chunks) {
currentChunk++;
loadNext();
// 实时展示MD5的计算进度
console.log(
`第${currentChunk}分片解析完成, 开始第${
currentChunk + 1
} / ${chunks}分片解析`
);
} else {
let md5 = spark.end();
console.log(
`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${
file.size
} 用时:${new Date().getTime() - time} ms`
);
spark.destroy(); //释放缓存
file.uniqueIdentifier = md5; //将文件md5赋值给文件唯一标识
file.cmd5 = false; //取消计算md5状态
file.resume(); //开始上传
}
};
fileReader.onerror = function () {
this.error(`文件${file.name}读取出错,请检查该文件`);
file.cancel();
};
function loadNext() {
let start = currentChunk * chunkSize;
let end =
start + chunkSize >= file.size ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
}
},
allStart() {
console.log(this.fileList);
this.fileList.map((e) => {
if (e.paused) {
e.resume();
}
});
},
allStop() {
console.log(this.fileList);
this.fileList.map((e) => {
if (!e.paused) {
e.pause();
}
});
},
allRemove() {
this.fileList.map((e) => {
e.cancel();
});
this.fileList = [];
},
},
};
</script>
<style>
.uploader-example {
width: 100%;
padding: 15px;
margin: 0px auto 0;
font-size: 12px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.4);
}
.uploader-example .uploader-btn {
margin-right: 4px;
}
.uploader-example .uploader-list {
max-height: 440px;
overflow: auto;
overflow-x: hidden;
overflow-y: auto;
}
</style>
#main.js引用组件
import uploader from 'vue-simple-uploader'
Vue.use(uploader)
#其他页面引用以上组件:
<file-fragmentation-upload v-model="form.filePath"/>
#请求后端方法的upload.js
import request from '@/utils/request'
// 请求合并文件分片
export function mergeChunks(data) {
return request({
url: '/inter/upload/mergeChunks',
method: 'post',
data: data
})
}
后端代码:
#表现层
package com.xx.web.controller.inter.background;
import com.xx.common.core.domain.AjaxResult;
import com.xx.inter.dto.FileChunkDTO;
import com.xx.inter.dto.FileChunkResultDTO;
import com.xx.inter.service.IUploadService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.Map;
/**
* 附件分片上传
*/
@RestController
@RequestMapping("/inter/upload")
public class UploaderController {
@Autowired
private IUploadService uploadService;
/**
* 检查分片是否存在
* @param chunkDTO
* @return
*/
@GetMapping("/chunk")
public AjaxResult checkChunkExist(FileChunkDTO chunkDTO){
FileChunkResultDTO fileChunkResultDTO;
try{
fileChunkResultDTO = uploadService.checkChunkExist(chunkDTO);
return AjaxResult.success(fileChunkResultDTO);
}catch (Exception e){
e.printStackTrace();
return AjaxResult.error(e.getMessage());
}
}
/**
* 上传文件分片
* @param chunkDTO
* @return
*/
@PostMapping("/chunk")
public AjaxResult uploadChunk(FileChunkDTO chunkDTO){
try{
uploadService.uploadChunk(chunkDTO);
return AjaxResult.success(chunkDTO.getIdentifier());
}catch (Exception e){
e.printStackTrace();
return AjaxResult.error(e.getMessage());
}
}
/**
* 请求合并文件分片
*/
@PostMapping("/mergeChunks")
public AjaxResult mergeChunks(@RequestBody FileChunkDTO chunkDTO){
try{
Map<String,Object> success = uploadService.mergeChunk(chunkDTO.getIdentifier(),
chunkDTO.getFilename(),chunkDTO.getTotalChunks());
return AjaxResult.success(success);
}catch (Exception e){
e.printStackTrace();
return AjaxResult.error(e.getMessage());
}
}
}
#service
package com.hqa.inter.service;
import com.hqa.inter.dto.FileChunkDTO;
import com.hqa.inter.dto.FileChunkResultDTO;
import java.io.IOException;
import java.util.Map;
/**
* 附件分片上传
*/
public interface IUploadService {
/**
* 检查文件是否存在,如果存在则跳过该文件的上传,如果不存在,返回需要上传的分片集合
* @param chunkDTO
* @return
*/
FileChunkResultDTO checkChunkExist(FileChunkDTO chunkDTO);
/**
* 上传文件分片
* @param chunkDTO
*/
void uploadChunk(FileChunkDTO chunkDTO) throws IOException;
/**
* 合并文件分片
* @param identifier
* @param fileName
* @param totalChunks
* @return
* @throws IOException
*/
Map<String,Object> mergeChunk(String identifier, String fileName, Integer totalChunks)throws IOException;
}
#service impl
package com.xx.inter.service.impl;
import com.xx.common.config.HqaConfig;
import com.xx.common.core.redis.RedisCache;
import com.xx.common.utils.DateUtils;
import com.xx.common.utils.StringUtils;
import com.xx.common.utils.file.FileUploadUtils;
import com.xx.common.utils.uuid.Seq;
import com.xx.inter.dto.FileChunkDTO;
import com.xx.inter.dto.FileChunkResultDTO;
import com.xx.inter.service.IUploadService;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FilenameUtils;
import org.apache.poi.util.IOUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.*;
import java.util.*;
@Service
@SuppressWarnings("all")
@Slf4j
public class UploadServiceImpl implements IUploadService {
@Autowired
private RedisCache redisCache;
/**
* 检查文件是否存在,如果存在则跳过该文件的上传,如果不存在,返回需要上传的分片集合
* @param chunkDTO
* @return
*/
@Override
public FileChunkResultDTO checkChunkExist(FileChunkDTO chunkDTO) {
//1.检查文件是否已上传过
//1.1检查在磁盘中是否存在
String fileFolderPath = getFileFolderPath(chunkDTO.getIdentifier());
log.info("fileFolderPath--->{}",fileFolderPath);
String filePath = getFilePath(chunkDTO.getIdentifier(),chunkDTO.getFilename());
File file = new File(filePath);
boolean exists = file.exists();
//1.2检查Redis中是否存在,并且所有分片已经上传完成.
Set<Integer> uploaded = (Set<Integer>) redisCache.getCacheMapValue(chunkDTO.getIdentifier(),"uploaded");
if (uploaded != null && uploaded.size() == chunkDTO.getTotalChunks() && exists){
return new FileChunkResultDTO(true);
}
File fileFolder = new File(fileFolderPath);
if (!fileFolder.exists()){
boolean mkdirs = fileFolder.mkdirs();
log.info("准备工作,创建文件夹,fileFolderPath:{},mkdirs:{}",fileFolderPath,mkdirs);
}
//断点续传,返回已上传的分片
return new FileChunkResultDTO(false,uploaded);
}
/**
* 得到文件的绝对路径
* @param identifier
* @param filename
* @return
*/
private String getFilePath(String identifier, String filename) {
String ext = filename.substring(filename.lastIndexOf("."));
// return getFileFolderPath(identifier) + identifier + ext;
String fileName = StringUtils.format("{}/{}_{}{}", DateUtils.datePath(),
FilenameUtils.getBaseName(filename), Seq.getId(Seq.uploadSeqType), ext);
String absPath = null;
try {
absPath = FileUploadUtils.getAbsoluteFile(HqaConfig.getUploadPath()+"/", fileName).getAbsolutePath();
} catch (IOException e) {
e.printStackTrace();
}
return absPath;
// return uploadFolder+filename;
}
private String getFileFolderPath(String identifier){
String fileName = StringUtils.format("{}", DateUtils.datePath());
return HqaConfig.getUploadPath()+"/"+fileName+"/"+identifier.substring(0,1)+File.separator+identifier.substring(1,2)+File.separator
+identifier+File.separator;
}
/**
* 上传分片
* @param chunkDTO
* @throws IOException
*/
@Override
public void uploadChunk(FileChunkDTO chunkDTO) throws IOException {
//分片的目录
String chunkFileFolderPath = getChunkFileFolderPath(chunkDTO.getIdentifier());
log.info("分块的目录--->{}",chunkFileFolderPath);
File chunkFileFolder = new File(chunkFileFolderPath);
if (!chunkFileFolder.exists()){
boolean mkdirs = chunkFileFolder.mkdirs();
log.info("创建分片文件夹:{}",mkdirs);
}
//写入分片
try(InputStream inputStream = chunkDTO.getFile().getInputStream();
FileOutputStream outputStream = new FileOutputStream(chunkFileFolderPath+chunkDTO.getChunkNumber());){
IOUtils.copy(inputStream,outputStream);
log.info("文件标识:{},chunkNumber:{}",chunkDTO.getIdentifier(),chunkDTO.getChunkNumber());
//将该分片写入redis
long size = saveToRedis(chunkDTO);
}catch (Exception e){
e.printStackTrace();
}
}
/**
* 分片写入redis
* @param chunkDTO
* @return
*/
private synchronized long saveToRedis(FileChunkDTO chunkDTO) {
Set<Integer> uploaded = (Set<Integer>) redisCache.getCacheMapValue(chunkDTO.getIdentifier(),"uploaded");
if (uploaded == null){
uploaded = new HashSet<>(Arrays.asList(chunkDTO.getChunkNumber()));
HashMap<String,Object> objectHashMap = new HashMap<>();
objectHashMap.put("uploaded",uploaded);
objectHashMap.put("totalChunks",chunkDTO.getTotalChunks());
objectHashMap.put("totalSize",chunkDTO.getTotalSize());
objectHashMap.put("path",chunkDTO.getFilename());
redisCache.setCacheMap(chunkDTO.getIdentifier(),objectHashMap);
}else{
uploaded.add(chunkDTO.getChunkNumber());
redisCache.setCacheMapValue(chunkDTO.getIdentifier(),"uploaded",uploaded);
}
return uploaded.size();
}
/**
* 得到分块文件所属的目录
* @param identifier
* @return
*/
private String getChunkFileFolderPath(String identifier) {
return getFileFolderPath(identifier)+"chunks"+ File.separator;
}
@Override
public Map<String,Object> mergeChunk(String identifier, String fileName, Integer totalChunks) throws IOException {
return mergeChunks(identifier,fileName,totalChunks);
}
/**
* 合并分片
* @param identifier
* @param fileName
* @param totalChunks
* @return
*/
private Map<String,Object> mergeChunks(String identifier, String fileName, Integer totalChunks){
Map<String,Object> result = new HashMap<>();
String chunkFileFolderPath = getChunkFileFolderPath(identifier);
String filePath = getFilePath(identifier,fileName);
//检查分片是否都存在
if (checkChunks(chunkFileFolderPath,totalChunks)){
File chunkFileFolder = new File(chunkFileFolderPath);
File mergeFile = new File(filePath);
File[] chunks = chunkFileFolder.listFiles();
//切片排序1.2/3、---
List fileList = Arrays.asList(chunks);
Collections.sort(fileList,(Comparator<File>)(o1,o2)->{
return Integer.parseInt(o1.getName()) - (Integer.parseInt(o2.getName()));
} );
try{
RandomAccessFile randomAccessFileWriter = new RandomAccessFile(mergeFile,"rw");
byte[] bytes = new byte[1024];
for (File chunk : chunks){
RandomAccessFile randomAccessFileReader = new RandomAccessFile(chunk,"r");
int len;
while ((len = randomAccessFileReader.read(bytes)) != -1){
randomAccessFileWriter.write(bytes,0,len);
}
randomAccessFileReader.close();
}
randomAccessFileWriter.close();
}catch (Exception e){
e.printStackTrace();
result.put("result",false);
result.put("filePath",null);
return result;
}
result.put("result",true);
String nfilePath = "\\profile\\"+filePath.substring(filePath.indexOf("upload\\"));
result.put("filePath",nfilePath);
return result;
}
result.put("result",false);
result.put("filePath",null);
return result;
}
/**
* 检查分片是否都存在
* @param chunkFileFolderPath
* @param totalChunks
* @return
*/
private boolean checkChunks(String chunkFileFolderPath, Integer totalChunks) {
try{
for (int i = 1;i <= totalChunks+1;i++){
File file = new File(chunkFileFolderPath+File.separator+i);
if (file.exists()){
continue;
}else{
return false;
}
}
}catch (Exception e){
return false;
}
return true;
}
}
#dto类
package com.xx.inter.dto;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
/**
* 附件分片上传
*/
@Data
public class FileChunkDTO{
/**
* 文件 md5
*/
private String identifier;
/**
* 分块文件
*/
MultipartFile file;
/**
* 当前分块序号
*/
private Integer chunkNumber;
/**
* 分块大小
*/
private Long chunkSize;
/**
* 当前分块大小
*/
private Long currentChunkSize;
/**
* 文件总大小
*/
private Long totalSize;
/**
* 分块总数
*/
private Integer totalChunks;
/**
* 文件名
*/
private String filename;
}
package com.xx.inter.dto;
import lombok.Data;
import java.util.Set;
/**
* 附件分片上传结果
*/
@Data
public class FileChunkResultDTO {
/**
* 是否跳过上传
*/
private Boolean skipUpload;
/**
* 已上传分片的集合
*/
private Set<Integer> uploaded;
public FileChunkResultDTO(Boolean skipUpload,Set<Integer> uploaded){
this.skipUpload = skipUpload;
this.uploaded = uploaded;
}
public FileChunkResultDTO(Boolean skipUpload){
this.skipUpload = skipUpload;
}
}
#redis,若依自带的
package com.xx.common.core.redis;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.BoundSetOperations;
import org.springframework.data.redis.core.HashOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.ValueOperations;
import org.springframework.stereotype.Component;
/**
* spring redis 工具类
*
* @author hqa
**/
@SuppressWarnings(value = { "unchecked", "rawtypes" })
@Component
public class RedisCache
{
@Autowired
public RedisTemplate redisTemplate;
/**
* 缓存基本的对象,Integer、String、实体类等
*
* @param key 缓存的键值
* @param value 缓存的值
*/
public <T> void setCacheObject(final String key, final T value)
{
redisTemplate.opsForValue().set(key, value);
}
/**
* 缓存基本的对象,Integer、String、实体类等
*
* @param key 缓存的键值
* @param value 缓存的值
* @param timeout 时间
* @param timeUnit 时间颗粒度
*/
public <T> void setCacheObject(final String key, final T value, final Integer timeout, final TimeUnit timeUnit)
{
redisTemplate.opsForValue().set(key, value, timeout, timeUnit);
}
/**
* 设置有效时间
*
* @param key Redis键
* @param timeout 超时时间
* @return true=设置成功;false=设置失败
*/
public boolean expire(final String key, final long timeout)
{
return expire(key, timeout, TimeUnit.SECONDS);
}
/**
* 设置有效时间
*
* @param key Redis键
* @param timeout 超时时间
* @param unit 时间单位
* @return true=设置成功;false=设置失败
*/
public boolean expire(final String key, final long timeout, final TimeUnit unit)
{
return redisTemplate.expire(key, timeout, unit);
}
/**
* 获取有效时间
*
* @param key Redis键
* @return 有效时间
*/
public long getExpire(final String key)
{
return redisTemplate.getExpire(key);
}
/**
* 判断 key是否存在
*
* @param key 键
* @return true 存在 false不存在
*/
public Boolean hasKey(String key)
{
return redisTemplate.hasKey(key);
}
/**
* 获得缓存的基本对象。
*
* @param key 缓存键值
* @return 缓存键值对应的数据
*/
public <T> T getCacheObject(final String key)
{
ValueOperations<String, T> operation = redisTemplate.opsForValue();
return operation.get(key);
}
/**
* 删除单个对象
*
* @param key
*/
public boolean deleteObject(final String key)
{
return redisTemplate.delete(key);
}
/**
* 删除集合对象
*
* @param collection 多个对象
* @return
*/
public boolean deleteObject(final Collection collection)
{
return redisTemplate.delete(collection) > 0;
}
/**
* 缓存List数据
*
* @param key 缓存的键值
* @param dataList 待缓存的List数据
* @return 缓存的对象
*/
public <T> long setCacheList(final String key, final List<T> dataList)
{
Long count = redisTemplate.opsForList().rightPushAll(key, dataList);
return count == null ? 0 : count;
}
/**
* 获得缓存的list对象
*
* @param key 缓存的键值
* @return 缓存键值对应的数据
*/
public <T> List<T> getCacheList(final String key)
{
return redisTemplate.opsForList().range(key, 0, -1);
}
/**
* 缓存Set
*
* @param key 缓存键值
* @param dataSet 缓存的数据
* @return 缓存数据的对象
*/
public <T> BoundSetOperations<String, T> setCacheSet(final String key, final Set<T> dataSet)
{
BoundSetOperations<String, T> setOperation = redisTemplate.boundSetOps(key);
Iterator<T> it = dataSet.iterator();
while (it.hasNext())
{
setOperation.add(it.next());
}
return setOperation;
}
/**
* 获得缓存的set
*
* @param key
* @return
*/
public <T> Set<T> getCacheSet(final String key)
{
return redisTemplate.opsForSet().members(key);
}
/**
* 缓存Map
*
* @param key
* @param dataMap
*/
public <T> void setCacheMap(final String key, final Map<String, T> dataMap)
{
if (dataMap != null) {
redisTemplate.opsForHash().putAll(key, dataMap);
}
}
/**
* 获得缓存的Map
*
* @param key
* @return
*/
public <T> Map<String, T> getCacheMap(final String key)
{
return redisTemplate.opsForHash().entries(key);
}
/**
* 往Hash中存入数据
*
* @param key Redis键
* @param hKey Hash键
* @param value 值
*/
public <T> void setCacheMapValue(final String key, final String hKey, final T value)
{
redisTemplate.opsForHash().put(key, hKey, value);
}
/**
* 获取Hash中的数据
*
* @param key Redis键
* @param hKey Hash键
* @return Hash中的对象
*/
public <T> T getCacheMapValue(final String key, final String hKey)
{
HashOperations<String, String, T> opsForHash = redisTemplate.opsForHash();
return opsForHash.get(key, hKey);
}
/**
* 获取多个Hash中的数据
*
* @param key Redis键
* @param hKeys Hash键集合
* @return Hash对象集合
*/
public <T> List<T> getMultiCacheMapValue(final String key, final Collection<Object> hKeys)
{
return redisTemplate.opsForHash().multiGet(key, hKeys);
}
/**
* 删除Hash中的某条数据
*
* @param key Redis键
* @param hKey Hash键
* @return 是否成功
*/
public boolean deleteCacheMapValue(final String key, final String hKey)
{
return redisTemplate.opsForHash().delete(key, hKey) > 0;
}
/**
* 获得缓存的基本对象列表
*
* @param pattern 字符串前缀
* @return 对象列表
*/
public Collection<String> keys(final String pattern)
{
return redisTemplate.keys(pattern);
}
}