mustafa.salaheldin
mustafa.salaheldin

Reputation: 73

Autodesk Forge and 416 (Requested Range Not Satisfiable)

I'm trying to send a Revit file to my Bucket chunk by chunk. My Revit file is almost 13 MB. Here is my code:

function handleFileSelect(evt) {
    var files = evt.target.files; 
    var file = files[0];

    var segmentSize = 1024 * 1024 * 5; //5 MB
    var startingByte = 0;
    var endingByte = startingByte + segmentSize - 1;
    var segments = Math.ceil(file.size / segmentSize);
    var session = Math.floor(100000000 + Math.random() * -900000000);
    

    for (var i = 0; i < segments; i ++)
    {
        var blob = file.slice(startingByte, endingByte);
        var url = 'https://developer.api.autodesk.com/oss/v2/buckets/' + 'linked_model' + '/objects/' + file.name + '/resumable';
        //console.log(url);
        var contentRange = 'bytes ' + startingByte + '-' + endingByte + '/' + file.size;

        $.ajax({
            type: 'PUT',
            url: url,
            data: blob,
            headers: {
                'Authorization':'Bearer ' + token,
                'Content-Type':'application/octet-stream',
                'Content-Range': contentRange,
                'Session-Id': session
            },
            crossDomain: true,
            processData: false,
            success: function (data) {
                console.log(i);
                startingByte = endingByte + 1;
                endingByte = startingByte + segmentSize - 1;
                },
            error: function (XMLHttpRequest, textStatus, errorThrown) {
                alert("Status: " + textStatus); alert("Error: " + errorThrown);
                console.log(startingByte);
                console.log(endingByte);
                console.log(file.size);
            }
        });
    }
}

It gives me error: 416 (Requested Range Not Satisfiable)

Can anyone help?

Upvotes: 0

Views: 458

Answers (1)

Felipe
Felipe

Reputation: 4375

I had the same 416 error but my issue was that I tried to upload chunks smaller than 2MB, which is not doable (except for the last chunk).

When I increased the chunks size to 5MB it started to work. I just wrote a blog article about it: https://forge.autodesk.com/blog/nailing-large-files-uploads-forge-resumable-api

Below is the core piece of code that handles chunking and uploading (in node.js).

By the way, I strongly discourage you to perform this kind of operation client-side as your snippet suggests, this means you have to pass a write-access token to the web page which compromises security of your app. You should first upload the file to your server and then securely upload it to Forge as described in the post and my sample.

/////////////////////////////////////////////////////////
// Uploads object to bucket using resumable endpoint
//
/////////////////////////////////////////////////////////
uploadObjectChunked (getToken, bucketKey, objectKey,
                     file,  opts = {}) {

  return new Promise((resolve, reject) => {

    const chunkSize = opts.chunkSize || 5 * 1024 * 1024

    const nbChunks = Math.ceil(file.size / chunkSize)

    const chunksMap = Array.from({
      length: nbChunks
    }, (e, i) => i)

    // generates uniques session ID
    const sessionId = this.guid()

    // prepare the upload tasks
    const uploadTasks = chunksMap.map((chunkIdx) => {

      const start = chunkIdx * chunkSize

      const end = Math.min(
          file.size, (chunkIdx + 1) * chunkSize) - 1

      const range = `bytes ${start}-${end}/${file.size}`

      const length = end - start + 1

      const readStream =
        fs.createReadStream(file.path, {
          start, end: end
        })

      const run = async () => {

        const token = await getToken()

        return this._objectsAPI.uploadChunk(
          bucketKey, objectKey,
          length, range, sessionId,
          readStream, {},
          {autoRefresh: false}, token)
      }

      return {
        chunkIndex: chunkIdx,
        run
      }
    })

    let progress = 0

    // runs asynchronously in parallel the upload tasks
    // number of simultaneous uploads is defined by
    // opts.concurrentUploads
    eachLimit(uploadTasks, opts.concurrentUploads || 3,
      (task, callback) => {

        task.run().then((res) => {

          if (opts.onProgress) {

            progress += 100.0 / nbChunks

            opts.onProgress ({
              progress: Math.round(progress * 100) / 100,
              chunkIndex: task.chunkIndex
            })
          }

          callback ()

        }, (err) => {

          console.log('error')
          console.log(err)

          callback(err)
        })

    }, (err) => {

        if (err) {

          return reject(err)
        }

        return resolve({
          fileSize: file.size,
          bucketKey,
          objectKey,
          nbChunks
        })
    })
  })
}

Upvotes: 1

Related Questions