大文件上传会耗费大量的工夫,而且中途有可能上传失败。这时咱们须要前端和后端配合来解决这个问题。

解决步骤:

  1. 文件分片,缩小每次申请耗费的工夫,如果某次申请失败能够独自上传,而不是从头开始
  2. 告诉服务端合并文件分片
  3. 管制并发的申请数量,防止浏览器内存溢出
  4. 当因为网络或者其余起因导致某次的申请失败,咱们从新发送申请

文件的分片与合并

在JavaScript中,FIle对象是' Blob '对象的子类,该对象蕴含一个重要的办法slice,通过该办法咱们能够这样宰割二进制文件:

<!DOCTYPE html><html lang="en"><head>    <meta charset="UTF-8">    <meta http-equiv="X-UA-Compatible" content="IE=edge">    <meta name="viewport" content="width=device-width, initial-scale=1.0">    <title>Document</title>    <script src="https://cdn.bootcdn.net/ajax/libs/axios/0.24.0/axios.min.js"></script></head><body>    <input type="file" multiple="multiple" id="fileInput" />    <button onclick="SliceUpload()">上传</button>      <script>        function SliceUpload() {            const file = document.getElementById('fileInput').files[0]            if (!file) return            // 文件分片            let size = 1024 * 50; //50KB 50KB Section size            let fileChunks = [];            let index = 0;        //Section num            for (let cur = 0; cur < file.size; cur += size) {                fileChunks.push({                    hash: index++,                    chunk: file.slice(cur, cur + size),                });            }            // 上传分片            const uploadList = fileChunks.map((item, index) => {                let formData = new FormData();                formData.append("filename", file.name);                formData.append("hash", item.hash);                formData.append("chunk", item.chunk);                return axios({                    method: "post",                    url: "/upload",                    data: formData,                });            });            await Promise.all(uploadList);            // 所有分片上传实现,告诉服务器合并分片            await axios({                method: "get",                url: "/merge",                params: {                    filename: file.name,                },            });            console.log("Upload to complete");        }    </script></body></html>

并发管制

如果文件很大,这样切分的分片会很多,浏览器短时间内就会发动大量的申请,可能会导致内存耗尽,所以要进行并发管制。

这里咱们联合Promise.race()办法 管制并发申请的数量,防止浏览器内存溢出。

// 退出并发管制async function SliceUpload() {    const file = document.getElementById('fileInput').files[0]    if (!file) return    // 文件分片    let size = 1024 * 50; //50KB 50KB Section size    let fileChunks = [];    let index = 0;        //Section num    for (let cur = 0; cur < file.size; cur += size) {        fileChunks.push({            hash: index++,            chunk: file.slice(cur, cur + size),        });    }    let pool = []; //Concurrent pool    let max = 3; //Maximum concurrency    for (let i = 0; i < fileChunks.length; i++) {        let item = fileChunks[i];        let formData = new FormData();        formData.append("filename", file.name);        formData.append("hash", item.hash);        formData.append("chunk", item.chunk);        // 上传分片        let task = axios({            method: "post",            url: "/upload",            data: formData,        });        task.then(() => {        // 从并发池中移除曾经实现的申请        let index = pool.findIndex((t) => t === task);            pool.splice(index);        });        // 把申请放入并发池中,如果曾经达到最大并发量        pool.push(task);        if (pool.length === max) {            //All requests are requested complete            await Promise.race(pool);        }    }    // 所有分片上传实现,告诉服务器合并分片    await axios({        method: "get",        url: "/merge",        params: {            filename: file.name,        },    });    console.log("Upload to complete");}

使代码可复用

function SliceUpload() {    const file = document.getElementById('fileInput').files[0]    if (!file) return    // 文件分片    let size = 1024 * 50; // 分片大小设置    let fileChunks = [];    let index = 0;        // 分片序号    for (let cur = 0; cur < file.size; cur += size) {        fileChunks.push({            hash: index++,            chunk: file.slice(cur, cur + size),        });    }    const uploadFileChunks = async function(list){        if(list.length === 0){            // 所有分片上传实现,告诉如无            await axios({                method: 'get',                url: '/merge',                params: {                    filename: file.name                }            });            console.log('Upload to complete')            return        }        let pool = []       // 并发池        let max = 3         // 最大并发数        let finish = 0      // 实现数量        let failList = []   // 失败列表        for(let i=0;i<list.length;i++){            let item = list[i]            let formData = new FormData()            formData.append('filename', file.name)            formData.append('hash', item.hash)            formData.append('chunk', item.chunk)                        let task = axios({                method: 'post',                url: '/upload',                data: formData            })            task.then((data)=>{                // 从并发池中移除曾经实现的申请                let index = pool.findIndex(t=> t===task)                pool.splice(index)            }).catch(()=>{                failList.push(item)            }).finally(()=>{                finish++                // 如果有失败的从新上传                if(finish===list.length){                    uploadFileChunks(failList)                }            })            pool.push(task)            if(pool.length === max){                await Promise.race(pool)            }        }    }    uploadFileChunks(fileChunks)}

服务端接口实现

const express = require('express')const multiparty = require('multiparty')const fs = require('fs')const path = require('path')const { Buffer } = require('buffer')// file pathconst STATIC_FILES = path.join(__dirname, './static/files')// Temporary path to upload filesconst STATIC_TEMPORARY = path.join(__dirname, './static/temporary')const server = express()// Static file hostingserver.use(express.static(path.join(__dirname, './dist')))// Interface for uploading slicesserver.post('/upload', (req, res) => {    const form = new multiparty.Form();    form.parse(req, function(err, fields, files) {        let filename = fields.filename[0]        let hash = fields.hash[0]        let chunk = files.chunk[0]        let dir = `${STATIC_TEMPORARY}/${filename}`        // console.log(filename, hash, chunk)        try {            if (!fs.existsSync(dir)) fs.mkdirSync(dir)            const buffer = fs.readFileSync(chunk.path)            const ws = fs.createWriteStream(`${dir}/${hash}`)            ws.write(buffer)            ws.close()            res.send(`${filename}-${hash} Section uploaded successfully`)        } catch (error) {            console.error(error)            res.status(500).send(`${filename}-${hash} Section uploading failed`)        }    })})//Merged slice interfaceserver.get('/merge', async (req, res) => {    const { filename } = req.query    try {        let len = 0        const bufferList = fs.readdirSync(`${STATIC_TEMPORARY}/${filename}`).map((hash,index) => {            const buffer = fs.readFileSync(`${STATIC_TEMPORARY}/${filename}/${index}`)            len += buffer.length            return buffer        });        //Merge files        const buffer = Buffer.concat(bufferList, len);        const ws = fs.createWriteStream(`${STATIC_FILES}/${filename}`)        ws.write(buffer);        ws.close();        res.send(`Section merge completed`);    } catch (error) {        console.error(error);    }})server.listen(3000, _ => {    console.log('http://localhost:3000/')})