分片上传
2024/10/19大约 3 分钟
分片上传
文件上传是一个常见的需求,尤其是大文件的上传。为了提高上传效率和用户体验,我们可以采用并发切片上传和秒传技术。本文将介绍如何使用这些技术,并提供一个完整的实现示例。
1. 实现思路
- 文件切片:将大文件分成多个小切片,每个切片大小为1MB。
- 并发上传:使用并发池控制同时上传的切片数量,避免过多并发请求导致服务器压力过大。
- 秒传:在上传前通过文件的哈希值验证文件是否已经存在,如果存在则直接返回上传成功。
- 进度条:实时显示上传进度,提升用户体验。
2. 代码实现
以下是一个完整的文件上传实现示例。
import request from "@/utils/http";
import { message } from "ant-design-vue";
import axios from "axios";
import BigNumber from "big-number";
import SparkMD5 from "spark-md5";
const useUpload = () => {
const SIZE = 1024 * 1024 * 1; // 切片大小1MB
const MAX_POOL = 3; // 最大并发数
const POOL = []; // 并发池
let totalSize = 0; //文件大小
let filename = ""; //文件名称
let fileChunks = []; //文件切片
let progressArr = []; //文件切片上传进度
let cancelFuncArr = []; //取消canceler
let uploader; // ant-design uploader组件上传入参
const setProgress = (data) => {
console.log("____progress____", data);
uploader?.onProgress({ percent: data });
if (data >= 100) {
setTimeout(() => {
message.success("上传成功");
uploader?.onSuccess();
}, 500);
}
};
// 处理进度条
const handleUploadProgress = (progressEvent, chunkIndex) => {
if (progressEvent.total) {
progressArr[chunkIndex] = progressEvent.loaded * 100;
const curTotal = progressArr.reduce(
(accumulator, currentValue) => accumulator + currentValue,
0,
);
setProgress(Number(BigNumber(curTotal).div(BigNumber(totalSize))));
}
};
const handleFinishedUploadProgress = (size, chunkIndex) => {
progressArr[chunkIndex] = size * 100;
const curTotal = progressArr.reduce(
(accumulator, currentValue) => accumulator + currentValue,
0,
);
setProgress(Number(BigNumber(curTotal).div(BigNumber(totalSize))));
};
// start
const handleFileChange = async (info) => {
uploader = info;
const file = info.file;
console.log(`1 file`, file);
if (!file.size) {
Promise.reject("文件内容为空").catch((err) => {
info.onError(new Error(err));
});
return message.error("文件内容为空");
}
fileChunks = [];
cancelFuncArr = [];
let chunkIndex = 0;
totalSize = file.size;
filename = file.name;
const spark = new SparkMD5.ArrayBuffer();
for (let cur = 0; cur < file.size; cur += SIZE) {
fileChunks.push({
chunkIndex: chunkIndex++,
chunk: file.slice(cur, cur + SIZE),
});
spark.append(await file.slice(cur, cur + SIZE).arrayBuffer());
}
console.log(`2 fileChunks`, SIZE, file.size, fileChunks);
const hash = spark.end();
progressArr = [];
handleFileUpload(hash);
};
const handleFileUpload = async (hash) => {
const verifyRes = await verifyUpload(filename, hash).catch((e) => {
console.error(e);
});
if (verifyRes !== undefined) {
if (typeof verifyRes === "boolean") {
if (verifyRes) {
console.log("开始秒传");
setProgress(100);
} else {
POOL.length = 0;
sliceChunks(
hash,
fileChunks.map(() => 0),
);
}
} else {
POOL.length = 0;
sliceChunks(hash, verifyRes);
}
} else {
message.error("文件验证失败");
uploader?.onError();
}
};
const verifyUpload = (filename, hash) => {
return new Promise((resolve, reject) => {
request({
url: "/upload/verify",
method: "POST",
data: { filename, hash },
headers: { "Content-Type": "application/json" },
})
.then((res) => {
resolve(res);
})
.catch((err) => {
reject(err);
uploader?.onError();
});
});
};
const sliceChunks = async (hash, chunksSize) => {
for (let i = 0; i < fileChunks.length; i++) {
const fileChunk = fileChunks[i];
const formData = new FormData();
formData.append("filename", filename);
formData.append("chunkIndex", String(fileChunk.chunkIndex));
formData.append("hash", hash);
formData.append("file", fileChunk.chunk);
if (chunksSize[i] !== fileChunk.chunk.size) {
// size一样的说明已经上传完毕了,只传size不一样的
const uplaodTask = uploadFile(formData, i);
uplaodTask.then(() => handleTask(uplaodTask));
POOL.push(uplaodTask);
if (POOL.length === MAX_POOL) {
// 并发池跑完一个任务之后才会继续执行for循环,塞入一个新任务
await Promise.race(POOL);
}
if (POOL.length) await Promise.race(POOL);
} else {
handleFinishedUploadProgress(chunksSize[i], i);
}
}
Promise.all(POOL).then(() => {
mergeFile(filename, hash);
});
};
const uploadFile = (data, chunkIndex) => {
return request({
url: "/upload/upload",
method: "POST",
data,
onUploadProgress: (progressEvent) =>
handleUploadProgress(progressEvent, chunkIndex),
cancelToken: new axios.CancelToken((cancelFunc) => {
cancelFuncArr[chunkIndex] = cancelFunc;
}),
})
.then(() => {
console.log("上传成功");
})
.catch((err) => {
message.error(err);
uploader?.onError();
});
};
const handleTask = (uplaodTask) => {
// 请求结束后将该Promise任务从并发池中移除
const index = POOL.findIndex((t) => t === uplaodTask);
POOL.splice(index);
};
const mergeFile = (filename, hash) => {
if (!fileChunks.length) {
return;
}
request({
url: "/upload/merge",
method: "POST",
data: { filename, hash },
headers: { "Content-Type": "application/json" },
})
.then(() => {
console.log("合并成功");
})
.catch(() => {
message.error("合并失败");
uploader?.onError();
});
};
const handleStop = () => {
cancelFuncArr.forEach((cancelFunc) => {
cancelFunc();
});
fileChunks = [];
setProgress(0);
};
return {
handleFileChange,
handleStop,
};
};
export default useUpload;
3. 代码解析
- 文件切片:在
handleFileChange
函数中,将文件分成多个1MB大小的切片,并计算文件的MD5哈希值。 - 秒传验证:在
handleFileUpload
函数中,通过verifyUpload
函数向服务器发送文件名和哈希值,验证文件是否已经存在。 - 并发上传:在
sliceChunks
函数中,使用并发池控制同时上传的切片数量,并在上传完成后合并文件。 - 进度条:在
handleUploadProgress
和handleFinishedUploadProgress
函数中,计算并更新上传进度。