传输参数对象
package com.deju.provider.upload.domain;
import lombok.Data;
import org.springframework.web.multipart.MultipartFile;
@Data
public class MultipartFileParam {
//文件传输任务ID
private String taskId;
//当前为第几分片
private int chunkNumber;
//每个分块的大小
private long chunkSize;
//分片总数
private int totalChunks;
//文件唯一标识
private String identifier;
//分块文件传输对象
private MultipartFile file;
}
Controller
@PostMapping("/chunkUpload")
public void fileChunkUpload(MultipartFileParam param, HttpServletRequest request, HttpServletResponse response) {
if (checkUploadedByMd5(param.getIdentifier(), response)) {
return;
}
sliceUploadService.fileChunkUpload(param, request, response);
}
/**
* 文件是否曾经上传过
*
* @param md5Value 文件md5值
* @param response
* @return
*/
private boolean checkUploadedByMd5(String md5Value, HttpServletResponse response) {
MembersOrganAuthUpload upload = uploadService.lambdaQuery().eq(MembersOrganAuthUpload::getFileMd5, md5Value).one();
if (Objects.nonNull(upload) && StringUtils.isNotBlank(upload.getFilePath())) {
try {
response.setStatus(200);
response.getWriter().print(upload.getFilePath());
} catch (Exception e) {
response.setStatus(415);
} finally {
try {
response.getWriter().flush();
} catch (IOException e) {
}
}
return true;
}
return false;
}
Service
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.tomcat.util.http.fileupload.servlet.ServletFileUpload;
import org.springframework.stereotype.Service;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.UUID;
/**
* @author alpha.
* @className SliceUploadService
* @date 2023/12/4 10:39
*/
@Service
@Slf4j
public class SliceUploadService {
public void fileChunkUpload(MultipartFileParam param, HttpServletRequest request, HttpServletResponse response) {
String root = "保存目录路径";
//验证文件夹规则,不能包含特殊字符
File file = new File(root);
String path = file.getAbsolutePath();
response.setContentType("text/html;charset=UTF-8");
//response.setStatus对接`前端插件
//200, 201, 202: 当前块上传成功,不需要重传。
//404, 415. 500, 501: 当前块上传失败,会取消整个文件上传。
//其他状态码: 出错了,但是会自动重试上传。
try {
/**
* 判断前端Form表单格式是否支持文件上传
*/
boolean isMultipart = ServletFileUpload.isMultipartContent(request);
if (!isMultipart) {
//这里是我向前端发送数据的代码,可理解为 return 数据; 具体的就不贴了
log.warn("不支持的表单格式");
response.setStatus(404);
response.getOutputStream().write("不支持的表单格式".getBytes());
} else {
param.setTaskId(param.getIdentifier());
String absPath = chunkUploadByMappedByteBuffer(param, path);
if (StringUtils.isNotBlank(absPath)) {
response.setStatus(200);
String savePath = absPath.split(RuoYiConfig.SLICE_FLAG)[1];
response.getWriter().print(savePath);
}
}
response.getWriter().flush();
} catch (ServiceException e) {
response.setStatus(501);
} catch (Exception e) {
e.printStackTrace();
log.warn("上传文件失败");
response.setStatus(415);
}
}
/**
* 上传
*
* @param param
* @param filePath
* @return
* @throws Exception
*/
private String chunkUploadByMappedByteBuffer(MultipartFileParam param, String filePath) throws Exception {
if (param.getTaskId() == null || "".equals(param.getTaskId())) {
param.setTaskId(UUID.randomUUID().toString());
}
/**
*
* 1:创建临时文件,和源文件一个路径
* 2:如果文件路径不存在重新创建
*/
String fileName = param.getFile().getOriginalFilename();
String tempFileName = param.getTaskId() + fileName.substring(fileName.lastIndexOf(".")) + "_tmp";
File fileDir = new File(filePath);
if (!fileDir.exists()) {
fileDir.mkdirs();
}
File tempFile = new File(filePath, tempFileName);
//第一步
RandomAccessFile raf = new RandomAccessFile(tempFile, "rw");
//第二步
FileChannel fileChannel = raf.getChannel();
//第三步 计算偏移量
long position = (param.getChunkNumber() - 1) * param.getChunkSize();
//第四步
byte[] fileData = param.getFile().getBytes();
//第五步
long end = position + fileData.length - 1;
fileChannel.position(position);
fileChannel.write(ByteBuffer.wrap(fileData));
//使用 fileChannel.map的方式速度更快,但是容易产生IO操作,无建议使用
//MappedByteBuffer mappedByteBuffer = fileChannel.map(FileChannel.MapMode.READ_WRITE,position,fileData.length);
//第六步
//mappedByteBuffer.put(fileData);
//第七步
//freedMappedByteBuffer(mappedByteBuffer);
//Method method = FileChannelImpl.class.getDeclaredMethod("unmap", MappedByteBuffer.class);
//method.setAccessible(true);
//method.invoke(FileChannelImpl.class, mappedByteBuffer);
fileChannel.force(true);
fileChannel.close();
raf.close();
//第八步
boolean isComplete = checkUploadStatus(param, fileName, filePath);
String absPath = null;
if (isComplete) {
//重命名文件,然后校验MD5文件是否一致
String md5 = DigestUtils.md5Hex(new FileInputStream(tempFile.getPath()));
absPath = renameFile(tempFile, fileName);
if (StringUtils.isNotBlank(md5) && !md5.equals(param.getIdentifier())) {
//不是同一文件抛出异常
throw new ServiceException("文件不一致");
}
}
return absPath;
}
/**
* 文件重命名
*
* @param toBeRenamed 将要修改名字的文件
* @param toFileNewName 新的名字
* @return
*/
public String renameFile(File toBeRenamed, String toFileNewName) {
//检查要重命名的文件是否存在,是否是文件
if (!toBeRenamed.exists() || toBeRenamed.isDirectory()) {
// 自定义异常
throw new ServiceException("文件不存在");
}
String p = toBeRenamed.getParent();
String realName = p + File.separatorChar + toFileNewName;
File newFile = new File(realName);
//修改文件名
toBeRenamed.renameTo(newFile);
return realName;
}
/**
* 检查文件上传进度
*
* @return
*/
public boolean checkUploadStatus(MultipartFileParam param, String fileName, String filePath) throws IOException {
File confFile = new File(filePath, fileName + ".conf");
RandomAccessFile confAccessFile = new RandomAccessFile(confFile, "rw");
//设置文件长度
confAccessFile.setLength(param.getTotalChunks());
//设置起始偏移量
confAccessFile.seek(param.getChunkNumber() - 1);
//将指定的一个字节写入文件中 127,
confAccessFile.write(Byte.MAX_VALUE);
byte[] completeStatusList = FileUtils.readFileToByteArray(confFile);
confAccessFile.close();//不关闭会造成无法占用
//这一段逻辑有点复杂,看的时候思考了好久,创建conf文件文件长度为总分片数,每上传一个分块即向conf文件中写入一个127,那么没上传的位置就是默认的0,已上传的就是Byte.MAX_VALUE 127
for (int i = 0; i < completeStatusList.length; i++) {
if (completeStatusList[i] != Byte.MAX_VALUE) {
return false;
}
}
//如果全部文件上传完成,删除conf文件
confFile.delete();
return true;
}
}
前端Vue
- 前提条件(安装库)
npm install --save vue-simple-uploader
npm install --save spark-md5
Vue组件核心
<el-form-item label="选择文件" prop="filePath">
<uploader :key="uploader_key" :options="options"
:autoStart="false"
class="uploader-example"
@file-success="onFileSuccess"
@file-error="onFileError"
@file-added="filesAdded"
>
<uploader-unsupport></uploader-unsupport>
<uploader-drop>
<uploader-btn :single="true">选择文件</uploader-btn>
</uploader-drop>
<uploader-list></uploader-list>
</uploader>
</el-form-item>
//////////////
<script>
import SparkMD5 from 'spark-md5'
import {getToken} from "@/utils/auth";
export default {
name: 'Auth',
dicts: ['auth_ddl', 'upload_status', 'auth_file_type', 'sys_yes_no'],
props: {
msg: String
},
data() {
return {
////////////////////////////////////
uploader_key: new Date().getTime(),
options: {
target: window.location.origin + '/members-plus/sliceUpload/chunkUpload',
testChunks: false,//是否测试分片
headers: {
Authorization: "Bearer " + getToken(),
}
},
methods: {
onFileSuccess: function (rootFile, file, response, chunk) {
debugger
console.log(rootFile)
console.log(file)
console.log(chunk)
if (this.uploadForm.filePath == null) {
this.uploadForm.filePath = response
} else {
this.uploadForm.filePath += "," + response
this.$modal.msgSuccess("成功")
}
},
onFileError: function (rootFile, file, response, chunk) {
debugger
console.log(rootFile)
console.log(file)
console.log(chunk)
this.$modal.msgError("错误:" + response)
},
/**
* 计算md5,实现断点续传及秒传
* @param file
*/
computeMD5(file) {
//大文件的md5计算时间比较长,显示个进度条
const loading = this.$loading({
lock: true,
text: '正在计算MD5',
spinner: 'el-icon-loading',
background: 'rgba(0, 0, 0, 0.7)'
});
let fileReader = new FileReader();
let time = new Date().getTime();
let blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice;
let currentChunk = 0;
const chunkSize = 10 * 1024 * 1000;
let chunks = Math.ceil(file.size / chunkSize);
let spark = new SparkMD5.ArrayBuffer();
file.pause();
loadNext();
fileReader.onload = (e => {
spark.append(e.target.result);
if (currentChunk < chunks) {
currentChunk++;
loadNext();
// 实时展示MD5的计算进度
this.$nextTick(() => {
console.log('校验MD5 ' + ((currentChunk / chunks) * 100).toFixed(0) + '%')
})
} else {
let md5 = spark.end();
loading.close();
this.computeMD5Success(md5, file);
console.log(`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${file.size} 用时:${new Date().getTime() - time} ms`);
}
});
fileReader.onerror = function () {
this.error(`文件${file.name}读取出错,请检查该文件`);
loading.close();
file.cancel();
};
function loadNext() {
let start = currentChunk * chunkSize;
let end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize;
fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
}
},
computeMD5Success(md5, file) {
file.uniqueIdentifier = md5;//把md5值作为文件的识别码
file.resume();//开始上传
},
/**
* 添加文件后触发
* @param file
* @param event
*/
filesAdded(file, event) {
debugger
if (this.uploadForm.filePath && this.uploadForm.filePath.indexOf(file.name) > 0) {
this.$modal.msgError("文件名称已存在")
file.cancel();
return
}
this.computeMD5(file)
},
},
}
</script>
标签:文件,Vue,String,进度条,param,file,分片,import,response
From: https://www.cnblogs.com/BoBo-B0BO/p/17880258.html