Cause: When uploading a file, if the file size is too large, the request may time out. In this case, you need to split the file into small pieces to shorten the transfer time of a single request

The flow chart

Server side dependency

Koa: HTTP middleware framework

Koa-router: indicates the KOA routing middleware

Koa-body: Koabody parsing middleware for parsing POST content

Fs-extra: node file system extension

Koa-static: KOA static resource middleware, used to process static resource requests

The directory structure

  • Index.html HTML page with upload function
  • Upload Stores the last large file to be merged
  • Temp Stores fragmented files temporarily
  • Server. Js service

Content to realize

Step 1 – Upload normally

The upload page


      
<html lang="zh-CN">

<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="Width = device - width, initial - scale = 1.0, the maximum - scale = 1.0, user - scalable = 0">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <title>Document</title>
  <! -- Introducing Koa -->
  <script src="https://unpkg.com/axios/dist/axios.min.js"></script>
</head>

<body>
  <input type="file" id="btnFile">
  <input type="button" value="Upload" onclick="upload()">
  <script>
    let btnFile = document.querySelector('#btnFile')

    function upload() {
      // Get the uploaded file
      const file = btnFile.files[0]
      const formData = new FormData()
      formData.append('file', file)
      axios.post('/upload', formData).then(res= > {
        console.log(res)
      })
    }
  </script>
</body>

</html>
Copy the code
rendering

The service side

const path = require('path')
const Koa = require('koa')
const Router = require('koa-router')
const koaBody = require('koa-body')
const source = require('koa-static')

const app = new Koa()
const router = new Router()

// Process static resources
app.use(source(path.resolve(__dirname, 'public')))


// Process page requests
app.use(koaBody({
  multipart: true.formidable: {
    uploadDir: path.resolve(__dirname, 'temp'),// File storage address
    keepExtensions: true.maxFieldsSize: 2 * 1024 * 1024}}))// File upload
router.post('/upload'.async ctx => {
  ctx.body = 'File uploaded successfully'
})

app.use(router.routes()).use(router.allowedMethods())

app.listen(3000, () = >console.log('Server runnint on port 3000'))
Copy the code

Start the service and upload the file

rendering

The uploaded file is obtained in temp

Step 2 – Fragment upload

Add the fragment function to upload scripts

Set the size of each slice -> Split and rename files based on size -> Recursive shard upload


      
<html lang="zh-CN">

<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="Width = device - width, initial - scale = 1.0, the maximum - scale = 1.0, user - scalable = 0">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <title>Document</title>
  <link type="text/css" rel="stylesheet" href="">
  <script type="text/javascript" src=""></script>
  <script src="https://unpkg.com/axios/dist/axios.min.js"></script>
</head>

<body>
  <input type="file" id="btnFile">
  <input type="button" value="Upload" onclick="upload(0)">
  <script>
    let btnFile = document.querySelector('#btnFile')

    // Size of each slice
    const chunkSize = 1024 * 1024 * 2

    function upload(index/* Current slice subscript */) {
      // Get the uploaded file
      const file = btnFile.files[0]
      // [file name, file suffix]
      const [ fname, fext ] = file.name.split('. ')
      // Get the start byte of the current slice
      const start = index * chunkSize
      if (start > file.size) {// Stop recursive uploads when the file size exceeds
        return
      }
      const blob = file.slice(start, start + chunkSize)
      // Give each slice a name
      const blobName = `${fname}.${index}.${fext}`
      const blobFile = new File([blob], blobName)

      const formData = new FormData()
      formData.append('file', blobFile)
      axios.post('/upload', formData).then(res= > {
        console.log(res)
        // Recursive fragment upload
        upload(++index)
      })
    }
  </script>
</body>

</html>
Copy the code

The server collates the fragments

Receive shards -> Create a temporary directory for large files -> move shards from the temp directory to the temporary directory

const path = require('path')
const Koa = require('koa')
const Router = require('koa-router')
const koaBody = require('koa-body')
const fse = require('fs-extra')
const source = require('koa-static')

const app = new Koa()
const router = new Router()

// Process static resources
app.use(source(path.resolve(__dirname, 'public')))

// Directory address for uploading files
const UPLOAD_DIR = path.resolve(__dirname, 'public/upload')

// Process page requests
app.use(koaBody({
  multipart: true.formidable: {
    uploadDir: path.resolve(__dirname, 'temp'),// File storage address
    keepExtensions: true.maxFieldsSize: 2 * 1024 * 1024}}))// File upload
router.post('/upload'.async ctx => {// File transfer
  // koa-body is bound to ctx.request.files after processing the file
  const file = ctx.request.files.file
  // [name, index, ext] - Split file name
  const fileNameArr = file.name.split('. ')
  // The directory for storing slices
  const chunkDir = `${UPLOAD_DIR}/${fileNameArr[0]}`
  if(! fse.existsSync(chunkDir)) {// Create a directory without one
    // Create a temporary directory for large files
    await fse.mkdirs(chunkDir)
  }
  Index - Specifies the address and name of each fragment
  const dPath = path.join(chunkDir, fileNameArr[1])

  // Move the sharded file from temp to the temporary directory where the large file is uploaded
  await fse.move(file.path, dPath, { overwrite: true })
  ctx.body = 'File uploaded successfully'
})

app.use(router.routes()).use(router.allowedMethods())

app.listen(3000, () = >console.log('Server runnint on port 3000'))
Copy the code
rendering

Step 3 – Shard consolidation

The client sends an end-of-integration signal to the server when the upload is complete


      
<html lang="zh-CN">

<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="Width = device - width, initial - scale = 1.0, the maximum - scale = 1.0, user - scalable = 0">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <title>Document</title>
  <link type="text/css" rel="stylesheet" href="">
  <script type="text/javascript" src=""></script>
  <script src="https://unpkg.com/axios/dist/axios.min.js"></script>
</head>

<body>
  <input type="file" id="btnFile">
  <input type="button" value="Upload" onclick="upload(0)">
  <script>
    let btnFile = document.querySelector('#btnFile')

    // Block size
    const chunkSize = 1024 * 1024 * 2

    function upload(index) {
      // Get the uploaded file
      const file = btnFile.files[0]
      const [ fname, fext ] = file.name.split('. ')
      // Get the block content
      const start = index * chunkSize
      if (start > file.size) {// Stop recursive uploads when the file size exceeds
        // request consolidation
        merge(file.name)
        return
      }
      const blob = file.slice(start, start + chunkSize)
      const blobName = `${fname}.${index}.${fext}`
      const blobFile = new File([blob], blobName)

      const formData = new FormData()
      formData.append('file', blobFile)
      axios.post('/upload', formData).then(res= > {
        console.log(res)
        upload(++index)
      })
    }

    function merge(name) {
      axios.post('/merge', { name: name }).then(res= > {
        console.log(res)
      })
    }
  </script>
</body>

</html>
Copy the code

After receiving the shard consolidation request, the server starts to consolidate shards

Read the files in the large file temporary directory in sequence and merge them into one file -> Delete the large file temporary directory -> return the address of the consolidated file

const path = require('path')
const Koa = require('koa')
const Router = require('koa-router')
const koaBody = require('koa-body')
const fse = require('fs-extra')
const source = require('koa-static')

const app = new Koa()
const router = new Router()

// Process static resources
app.use(source(path.resolve(__dirname, 'public')))

const UPLOAD_DIR = path.resolve(__dirname, 'public/upload')

// Process page requests
app.use(koaBody({
  multipart: true.// encoding: 'gzip', // Enabling compression in /merge returns an error
  formidable: {
    uploadDir: path.resolve(__dirname, 'temp'),// File storage address
    keepExtensions: true.maxFieldsSize: 2 * 1024 * 1024}}))// File upload
router.post('/upload'.async ctx => {// File transfer
  const file = ctx.request.files.file
  // [ name, index, ext ]
  const fileNameArr = file.name.split('. ')
  // The directory for storing slices
  const chunkDir = `${UPLOAD_DIR}/${fileNameArr[0]}`
  if(! fse.existsSync(chunkDir)) {// Create a directory without one
    await fse.mkdirs(chunkDir)
  }
  // The original file name is.index.ext
  const dPath = path.join(chunkDir, fileNameArr[1])
  await fse.move(file.path, dPath, { overwrite: true })
  ctx.body = 'File uploaded successfully'
})

// Merge files
router.post('/merge'.async ctx => {
  const { name }= ctx.request.body
  const fname = name.split('. ') [0]

  const chunkDir = path.join(UPLOAD_DIR, fname)
  const chunks = await fse.readdir(chunkDir)

  chunks.sort((a, b) = > a - b).map(chunkPath= > {
    // Merge files
    fse.appendFileSync(
      path.join(UPLOAD_DIR, name),
      fse.readFileSync(`${chunkDir}/${chunkPath}`))})// Delete the temporary folder
  fse.removeSync(chunkDir)
  // return the file address
  ctx.body = { msg: 'Merger successful'.url: `http://localhost:3000/upload/${name}` }
})

app.use(router.routes()).use(router.allowedMethods())

app.listen(3000, () = >console.log('Server runnint on port 3000'))
Copy the code
rendering

Access return address