坚持周总结系列第四周(2020.5.5)
文件上传
文件类型判断
- 通过文件二进制流来判断文件类型,防止篡改文件后缀,影响判断
blobToString(blob){
return new Promise(resolve=>{
const reader = new FileReader()
reader.onload = function(){
const ret = reader.result
.split('')
.map(v=>v.charCodeAt())
.map(v=>v.toString(16).toUppseCase())
.join(' ')
resolve(ret)
}
reader.readAsBinaryString(blob)
})
},
const ret = await this.blobToString(file.slice(0,6))
const isGif = (ret == '47 49 46 38 39 61') || (ret == '47 49 46 38 37 61')
const ret = await this.bolbToString(file.slice(0, 8))
const isPng = (ret == '89 50 4E 47 0D 0A 1A 0A')
const len = file.size
const start = await this.bolbToString(file.slice(0, 2))
const end = await this.bolbToString(file.slice(-2, len))
const isJpg = (start == 'FF D8') && (end == 'FF D9')
文件切片
const CHUNK_SIZE = 1 * 1024 * 1024
createFileChunk(size=CHUNK_SIZE){
const chunks=[]
let cur=0
while(cur<this.file.size){
chunks.push({index:cur,file:this.file.slice(cur,cur+size)})
cur+=size
}
return chuks
}
文件hash计算
async calculateHashWorker(){
return new Promise(resolve=>{
this.worker=new Worker('/hash.js')
this.worker.postMessage({chunks:this.chunks})
this.worker.onmessage=e=>{
const {progress,hash}=e.data
this.hashProgress=Number(progress.toFixed(2))
if(hash){
resolve(hash)
}
}
})
}
self.importScripts('spark-md5.min.js')
self.onmessage=e=>{
const {chunks}=e.data
const spark=new self.SparkMD5.ArrayBuffer()
let progress=0
let count=0
const loadNext=index=>{
const reader=new FileReader()
reader.readAsArrayBuffer(chunks[index].file)
reader.onload=e=>{
count++
spark.append(e.target.result)
if(count==chunks.length){
self.postMessage({
progress:100,
hash:spark.end()
})
}else{
progress+=100/chunks.length
self.postMessage({
progress
})
loadNext()
}
}
}
loadNext(0)
}
async calculateHashIdle(){
const chunks=this.chunks
return new Promise(resolve=>{
const spark=new sparkMD5.ArrayBuffer()
let count=0
const appendToSpark=async file=>{
return new Promise(resolve=>{
const reader=new FileReader()
reader.readAsArrayBuffer(file)
reader.onload=e=>{
spark.append(e.target.result)
resolve()
}
})
}
const workLoop=async deadLine=>{
while(count<chunks.length && deadLine.timeRemaining()>1){
await appendToSpark(chunks[count].file)
count++
if(count>chunks.length){
this.hashProgress=Number(((100*count)/chunks.length).toFixed(2))
}else{
this.hashProgress=100
resolve(spark.end())
}
}
}
})
}
async calculateHashSmple(){
return new Promise(resolve=>{
const spark=new sparkMD5()
const reader=new FileReader()
const file=this.file
const size=file.size
const offset=2*1024*1024
let chunks=[file.slice(0,offset)]
let cur=offset
while(cur<size){
if(cur+offset>=size){
chunks.push(file.slice(cur,cur+offset))
}else{
const mid=cur+offset/2
const end=cur+offset
chunks.push(file.slice(cur,cur+2))
chunks.push(file.slice(mid,mid+2))
chunks.push(file.slice(end-2,end))
}
cur+=offset
}
reader.readAsArrayBuffer(new Blob(chunks))
reader.onload=e=>{
spark.append(e.target.result)
this.hashProgress=100
resolve(spark.end())
}
})
}
文件秒传和断点续传
const chunks = this.createFileChunk()
this.hash = await this.calculateHashSample()
const {data:{uploaded,uploadedList}}=await this.$http.post('/checkfile',{
hash:this.hash,
ext:this.file.name.split('.').pop()
})
if(uploaded){
return this.$message.success('文件秒传成功!')
}
this.chunks=chunks.map((chunk,index)=>{
const name=this.hash+'-'+index
return {
hash:this.hash,name,index,chunk:chunk.file,
progress:uploadList.indexOf(name)>-1 ? 100 : 0
}
})
await this.uploadChunks(uploadList)
async uploadChunks(uploadList=[]){
const requests=this.chunks
.filter(chunk=>uploadList.indexOf(chunk.name)===-1)
.map((chunk,index)=>{
const form=new FormData()
form.append('chunk',chunk.chunk)
form.append('hash',chunk.hash)
form.append('name',chunk.name)
return {
form,index:chunk.index,error:0
}
})
await this.sendRequest(requests)
await this.mergeRequest()
}
async checkfile(){
const {ctx}=this
const {ext,hash}=ctx.request.body
const filePath=path.resolve(this.config.UPLOAD_DIR,`${hash}.${ext}`)
let uploaded=false
let uploadedList=[]
if(fse.existsSync(filePath)){
uploaded=true
}else{
uploadList=await this.getUploadedList(path.resolve(this.config.UPLOAD_DIR,hash))
}
this.success({
uploaded,
uploadedList
})
}
async getUploadedList(dirpath){
return fse.existsSync(dirpath) ?
(await fse.readdir(dirpath)).filter(name=>name[0]!=='.') :
[]
}
上传并发量控制
async sendRequest(chunks,limit=3){
return new Promise((resolve,reject)=>{
const len=chunks.length
let count=0
let isStop=false
const start=async ()=>{
if(isStop){
return
}
const task=chunks.shift()
if(task){
const {form,index}=task
try{
await this.$http.post('/uploadFile',form,{
onUploadProgress:progress=>{
this.chunks[index].progress =
Number(((progress.loaded / progress.total) * 100).toFixed(2))
}
})
if(count==len-1){
resolve()
}else{
count++
start()
}
}catch(err){
this.chunks[index].progress=-1
if(task.error<3){
task.error++
chunks.unshift(task)
start()
}esle{
isStop=true
reject()
}
}
}
}
while(limit>0){
setTimeout(()=>{
start()
},Math.random()*2000)
limit-=1
}
})
}
async uploadFile(){
const {ctx}=this
const file=ctx.request.files[0]
const {hash,name}=ctx.request.body
const chunkPath=path.resolve(this.config.UPLOAD_DIR, hash)
if(!fse.exists(chunkPath)){
await fse.mkdir(chunkPath)
}
await fse.move(file.filepath,chunkPath+'/'+name)
this.message('切片上传成功!')
}
切片合成
async mergeRequest () {
this.$http.post('/merge', {
ext: this.file.name.split('.').pop(),
size: CHUNK_SIZE,
hash: this.hash
})
}
async merge() {
const {ext,size,hash} = this.ctx.request.body
const filePath = path.resolve(this.config.UPLOAD_DIR, `${hash}.${ext}`)
await this.ctx.service.tools.mergeFile(filePath, hash, size)
this.success({
url: `/public/${hash}.${ext}`,
})
}
async mergeFile(filePath,fileHash,size){
const chunkDir=path.resolve(this.config.UPLOAD_DIR,fileHash)
let chunks=await fse.readdir(chunkDir)
chunks.sort((a,b)=> a.split('-')[1]-b.split('-')[1])
chunks=chunks.map(cp=>path.resolve(chunkDir,cp))
await this.mergeChunks(chunks,filePath,size,chunkDir)
}
async mergeChunks(files,dest,size,chunkDir){
const pipeStream=(filePath,writeStream)=> new Promise(resolve=>{
const readStream=fse.createReadStream(filePath)
readStream.on('end',()=>{
fse.unlinkSync(filePath)
resolve()
})
readStream.pipe(writeStream)
})
await Promise.all(
files.map((file,index)=>{
pipeStream(file,fse.createWriteStream(dest,{
start:index*size,
end:(index+1)*size
}))
})
)
fse.rmdirSync(chunkDir)
}