From f0b4eb0d65b856fb26f10edb611690dd641028c3 Mon Sep 17 00:00:00 2001 From: Ferdinand Thiessen Date: Fri, 21 Feb 2025 16:00:50 +0100 Subject: [PATCH] fix(uploader): Correctly (re)set progress on upload On upload event we should also update the uploaded bytes for chunked uploads. Moreover we need to reset the uploaded bytes when a retry occures as the progress bar is otherwise showing the wrong ETA. Signed-off-by: Ferdinand Thiessen --- __tests__/utils/upload.spec.ts | 41 +- .../components/UploadPicker/progress.cy.ts | 359 ++++++++++++++++++ lib/components/UploadPicker.vue | 3 +- lib/uploader.ts | 69 +++- lib/utils/upload.ts | 54 ++- 5 files changed, 482 insertions(+), 44 deletions(-) create mode 100644 cypress/components/UploadPicker/progress.cy.ts diff --git a/__tests__/utils/upload.spec.ts b/__tests__/utils/upload.spec.ts index 3c8f62e3..1bc62a2f 100644 --- a/__tests__/utils/upload.spec.ts +++ b/__tests__/utils/upload.spec.ts @@ -1,4 +1,4 @@ -/** +/*! * SPDX-FileCopyrightText: 2022 Nextcloud GmbH and Nextcloud contributors * SPDX-License-Identifier: AGPL-3.0-or-later */ @@ -109,7 +109,7 @@ describe('Upload data', () => { const blob = new Blob([new ArrayBuffer(50 * 1024 * 1024)]) const signal = new AbortController().signal const onUploadProgress = vi.fn() - await uploadData(url, blob, signal, onUploadProgress) + await uploadData(url, blob, { signal, onUploadProgress }) expect(onUploadProgress).toHaveBeenCalledTimes(1) expect(axiosMock.request).toHaveBeenCalledTimes(1) @@ -125,6 +125,7 @@ describe('Upload data', () => { 'axios-retry': { retries: 5, retryDelay: expect.any(Function), + onRetry: expect.any(Function), }, }) }) @@ -137,7 +138,7 @@ describe('Upload data', () => { const signal = new AbortController().signal const onUploadProgress = vi.fn() - await uploadData(url, data, signal, onUploadProgress) + await uploadData(url, data, { signal, onUploadProgress }) expect(onUploadProgress).toHaveBeenCalledTimes(1) expect(axiosMock.request).toHaveBeenCalledTimes(1) @@ -154,6 +155,7 @@ describe('Upload data', () => { 'axios-retry': { retries: 5, retryDelay: expect.any(Function), + onRetry: expect.any(Function), }, }) }) @@ -165,7 +167,7 @@ describe('Upload data', () => { const blob = new Blob([new ArrayBuffer(50 * 1024 * 1024)]) const signal = new AbortController().signal const onUploadProgress = vi.fn() - await uploadData(url, blob, signal, onUploadProgress, url) + await uploadData(url, blob, { signal, onUploadProgress, destinationFile: url }) expect(onUploadProgress).toHaveBeenCalledTimes(1) expect(axiosMock.request).toHaveBeenCalledTimes(1) @@ -182,6 +184,35 @@ describe('Upload data', () => { 'axios-retry': { retries: 5, retryDelay: expect.any(Function), + onRetry: expect.any(Function), + }, + }) + }) + + test('Upload data stream with callback', async () => { + axiosMock.request = vi.fn((config: any) => Promise.resolve(config?.onUploadProgress())) + + const url = 'https://cloud.example.com/remote.php/dav/files/test/image.jpg' + const blob = new Blob([new ArrayBuffer(50 * 1024 * 1024)]) + const signal = new AbortController().signal + const onUploadProgress = vi.fn() + const onUploadRetry = vi.fn() + await uploadData(url, blob, { signal, onUploadProgress, onUploadRetry }) + + expect(onUploadProgress).toHaveBeenCalledTimes(1) + expect(axiosMock.request).toHaveBeenCalledWith({ + method: 'PUT', + url, + data: blob, + signal, + onUploadProgress, + headers: { + 'Content-Type': 'application/octet-stream', + }, + 'axios-retry': { + retries: 5, + retryDelay: expect.any(Function), + onRetry: onUploadRetry, }, }) }) @@ -199,7 +230,7 @@ describe('Upload data', () => { // Cancel after 200ms setTimeout(() => controller.abort(), 400) try { - await uploadData(url, data, controller.signal, onUploadProgress) + await uploadData(url, data, { signal: controller.signal, onUploadProgress }) } catch (error) { expect(onUploadProgress).toHaveBeenCalledTimes(1) expect(axiosMock.request).toHaveBeenCalledTimes(1) diff --git a/cypress/components/UploadPicker/progress.cy.ts b/cypress/components/UploadPicker/progress.cy.ts new file mode 100644 index 00000000..1d246112 --- /dev/null +++ b/cypress/components/UploadPicker/progress.cy.ts @@ -0,0 +1,359 @@ +/*! + * SPDX-FileCopyrightText: 2025 Nextcloud GmbH and Nextcloud contributors + * SPDX-License-Identifier: AGPL-3.0-or-later + */ +import { Folder, Permission } from '@nextcloud/files' +import { generateRemoteUrl } from '@nextcloud/router' +import { UploadPicker, getUploader } from '../../../lib/index.ts' + +let state: string | undefined + +before(() => { + cy.window().then((win) => { + state = win.document.body.innerHTML + }) +}) + +/** + * Reset the inner body of the document to remove any previous state of the uploader. + */ +function resetDocument(): void { + if (state) { + cy.window().then((win) => { + win.document.body.innerHTML = state! + }) + } +} + +/** + * Throttle the upload speed using browser API. + * @param speed upload speed in bytes per second. -1 for unlimited. + */ +function throttleUpload(speed: number) { + Cypress.automation('remote:debugger:protocol', { + command: 'Network.emulateNetworkConditions', + params: { + offline: speed === 0, + latency: 0, + downloadThroughput: -1, + uploadThroughput: Math.max(speed, -1), + }, + }) +} + +describe('UploadPicker: progress handling', () => { + afterEach(() => { + resetDocument() + throttleUpload(-1) + }) + + beforeEach(() => { + // Make sure we reset the destination + // so other tests do not interfere + const propsData = { + destination: new Folder({ + id: 56, + owner: 'user', + source: generateRemoteUrl('dav/files/user'), + permissions: Permission.ALL, + root: '/files/user', + }), + } + + // Mount picker + cy.mount(UploadPicker, { + propsData, + }).as('uploadPicker') + + // Check and init aliases + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-input]').as('input').should('exist') + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-progress]').as('progress').should('exist') + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-progress-label]').as('progressLabel').should('exist') + }) + + it('has increasing progress bar during non-chunked upload', () => { + // Start in paused mode + const uploader = getUploader() + uploader.pause() + + cy.get('@input').attachFile({ + // file of 5 MiB + fileContent: new Blob([new ArrayBuffer(5 * 1024 * 1024)]), + fileName: 'file.txt', + mimeType: 'text/plain', + encoding: 'utf8', + lastModified: new Date().getTime(), + }) + + // promise used to time request response + const { promise, resolve } = Promise.withResolvers() + cy.intercept('PUT', '/remote.php/dav/files/user/file.txt', (rq) => { + rq.reply({ statusCode: 201 }) + rq.on('response', async () => { + await promise + }) + }).as('upload') + + // 1 MiB/s meaning upload will take 5 seconds + throttleUpload(1024 * 1024) + + // See there is no progress yet + cy.get('@progress') + .should('be.visible') + .should('have.value', 0) + cy.get('@progressLabel') + .should('contain.text', 'paused') + // start the uploader + .then(() => uploader.start()) + + // See the upload has started + cy.get('@progressLabel') + .should('contain.text', 'estimating time') + + cy.get('@progress', { timeout: 2000 }) + .should((el) => expect(el.val()).to.be.greaterThan(10)) + .and((el) => expect(el.val()).to.be.lessThan(30)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(30)) + .and((el) => expect(el.val()).to.be.lessThan(50)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(50)) + .and((el) => expect(el.val()).to.be.lessThan(70)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(70)) + .and((el) => expect(el.val()).to.be.lessThan(90)) + + cy.get('@progress') + .should('have.value', 90) + .then(() => resolve()) // resolve the promise + // now the progress should be 100 meaning the progress bar is hidden + cy.get('@progress') + .should('not.be.visible') + }) + + it('has increasing progress bar for chunked upload', () => { + // Maximum the responses can take + Cypress.config({ defaultCommandTimeout: 7000 }) + + const { promise: promiseChunk1, resolve: resolveChunk1 } = Promise.withResolvers() + const { promise: promiseChunk2, resolve: resolveChunk2 } = Promise.withResolvers() + cy.intercept('MKCOL', '/remote.php/dav/uploads/user/*', (rq) => { + rq.reply({ statusCode: 201 }) + }).as('mkdir') + cy.intercept('PUT', '/remote.php/dav/uploads/user/*/*', (rq) => { + rq.reply({ statusCode: 201 }) + rq.on('response', async () => await (rq.url.endsWith('/1') ? promiseChunk1 : promiseChunk2)) + }).as('uploadBig') + cy.intercept('MOVE', '/remote.php/dav/uploads/user/*/.file', (rq) => { + rq.reply({ statusCode: 201 }) + }).as('move') + + // Start in paused mode + const uploader = getUploader() + uploader.pause() + + // 3 MiB/s meaning upload will take 5 seconds + throttleUpload(3 * 1024 * 1024) + + cy.get('@input').attachFile({ + // file of 15 MiB so it is chunked in 10MiB and 5 MiB + fileContent: new Blob([new ArrayBuffer(15 * 1024 * 1024)]), + fileName: 'file.txt', + mimeType: 'text/plain', + encoding: 'utf8', + lastModified: new Date().getTime(), + }) + + // See there is no progress yet + cy.get('@progress') + .should('be.visible') + .should('have.value', 0) + cy.get('@progressLabel') + .should('contain.text', 'paused') + // start the uploader + .then(() => uploader.start()) + + // See the upload has started + cy.get('@progressLabel') + .should((el) => expect(el.text()).to.match(/(estimating time|few seconds left)/)) + + // MKCOL was successfully so the upload can begin + cy.wait('@mkdir') + + // ~20% per second + cy.get('@progress', { timeout: 2000 }) + .should((el) => expect(el.val()).to.be.greaterThan(10)) + .and((el) => expect(el.val()).to.be.lessThan(30)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(30)) + .and((el) => expect(el.val()).to.be.lessThan(50)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(50)) + .and((el) => expect(el.val()).to.be.lessThan(70)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(70)) + .and((el) => expect(el.val()).to.be.lessThan(90)) + + cy.get('@progress') + .should('have.value', 90) + // Now the upload (sending) is done - if we trigger the resolve the value will increase to 97% (or 95 if we resolve only chunk2) + .then(() => resolveChunk1()) + cy.get('@progress') + .should('have.value', 97) + .then(() => resolveChunk2()) + // now the progress should be 100 meaning the progress bar is hidden + cy.get('@progress') + .should('not.be.visible') + }) +}) + +describe('UploadPicker: reset progress on retry', () => { + afterEach(() => { + resetDocument() + throttleUpload(-1) + }) + + beforeEach(() => { + cy.window() + .then((win) => { + // Internal global variable + (win as any)._oc_capabilities = { files: { chunked_upload: { max_parallel_count: 1 } } } + }) + + // Make sure we reset the destination + // so other tests do not interfere + const propsData = { + destination: new Folder({ + id: 56, + owner: 'user', + source: generateRemoteUrl('dav/files/user'), + permissions: Permission.ALL, + root: '/files/user', + }), + } + + // Start the uploader paused + getUploader(false, true).pause() + + // Mount picker + cy.mount(UploadPicker, { + propsData, + }).as('uploadPicker') + + // Check and init aliases + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-input]').as('input').should('exist') + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-progress]').as('progress').should('exist') + cy.get('[data-cy-upload-picker] [data-cy-upload-picker-progress-label]').as('progressLabel').should('exist') + }) + + it('non chunked request', () => { + let destroyRequest = true + cy.intercept('PUT', '/remote.php/dav/files/user/file.txt', (rq) => { + if (destroyRequest) { + rq.reply({ statusCode: 504 }) + destroyRequest = false + } else { + rq.reply({ statusCode: 201 }) + } + }).as('upload') + + throttleUpload(2 * 1024 * 1024) + + cy.get('@input').attachFile({ + // file of 5 MiB + fileContent: new Blob([new ArrayBuffer(5 * 1024 * 1024)]), + fileName: 'file.txt', + mimeType: 'text/plain', + encoding: 'utf8', + lastModified: new Date().getTime(), + }) + + // See there is no progress yet + cy.get('@progress') + .should('be.visible') + .should('have.value', 0) + cy.get('@progressLabel') + .should('contain.text', 'paused') + // start the uploader + .then(() => getUploader().start()) + + // See the upload has started + cy.get('@progressLabel') + .should('contain.text', 'estimating time') + + cy.get('@progress', { timeout: 2000 }) + .should((el) => expect(el.val()).to.be.greaterThan(10)) + .and((el) => expect(el.val()).to.be.lessThan(40)) + cy.get('@progress', { timeout: 1500 }) + .should((el) => expect(el.val()).to.be.greaterThan(50)) + + cy.wait('@upload') + + // see progress is reset + cy.get('@progress', { timeout: 1000 }) + .should((el) => expect(el.val()).to.be.lessThan(25)) + + cy.get('@progress', { timeout: 4000 }) + // see second request / retry is increasing the progress again + .should((el) => expect(el.val()).to.be.greaterThan(26)) + }) + + it('only failed chunk is reset on retry', () => { + let retryChunk = true + cy.intercept('MKCOL', '/remote.php/dav/uploads/user/*', (rq) => { + rq.reply({ statusCode: 201 }) + }).as('mkdir') + cy.intercept('MOVE', '/remote.php/dav/uploads/user/*/.file', (rq) => { + rq.reply({ statusCode: 201 }) + }).as('move') + cy.intercept('PUT', '/remote.php/dav/uploads/user/*/*', (rq) => { + if (rq.url.endsWith('/2')) { + if (retryChunk) { + rq.reply({ statusCode: 504, delay: 1000 }) + retryChunk = false + return + } + } + rq.reply({ statusCode: 201, delay: 500 }) + }).as('upload') + + cy.get('@input').attachFile({ + // file of 25 MiB so that we get 3 chunks + fileContent: new Blob([new ArrayBuffer(25 * 1024 * 1024)]), + fileName: 'file.txt', + mimeType: 'text/plain', + encoding: 'utf8', + lastModified: new Date().getTime(), + }) + + throttleUpload(6 * 1024 * 1024) + + // See there is no progress yet + cy.get('@progress') + .should('be.visible') + .should('have.value', 0) + cy.get('@progressLabel') + .should('contain.text', 'paused') + // start the uploader + .then(() => getUploader().start()) + + // chunks: 40% 40% 20% + // See we get progress of chunk 1 + cy.get('@progress') + .should('have.value', 40) + // See we get some progress on chunk 2 + cy.get('@progress') + .should((el) => expect(el.val()).to.be.greaterThan(75)) + // See the progress is reset to 40% (first chunk) + cy.get('@progress') + .should('have.value', 40) + // See we succeed with uploading + cy.get('@progress', { timeout: 5000 }) + .should((el) => expect(el.val()).to.be.greaterThan(75)) + // And suceed + cy.get('@progress') + .should('not.be.visible') + cy.get('@upload.all') + .should('have.length', 4) // 3 chunks + 1 retry + }) +}) diff --git a/lib/components/UploadPicker.vue b/lib/components/UploadPicker.vue index bc8621a0..fb2cedc8 100644 --- a/lib/components/UploadPicker.vue +++ b/lib/components/UploadPicker.vue @@ -108,10 +108,11 @@
-

+

{{ timeLeft }}

diff --git a/lib/uploader.ts b/lib/uploader.ts index 59bfa3ae..b4bcdf28 100644 --- a/lib/uploader.ts +++ b/lib/uploader.ts @@ -472,22 +472,46 @@ export class Uploader { // Init request queue const request = () => { + // bytes uploaded on this chunk (as upload.uploaded tracks all chunks) + let chunkBytes = 0 return uploadData( `${tempUrl}/${chunk + 1}`, blob, - upload.signal, - () => this.updateStats(), - encodedDestinationFile, { - ...this._customHeaders, - 'X-OC-Mtime': Math.floor(file.lastModified / 1000), - 'OC-Total-Length': file.size, - 'Content-Type': 'application/octet-stream', + signal: upload.signal, + destinationFile: encodedDestinationFile, + retries, + onUploadProgress: ({ bytes }) => { + // Only count 90% of bytes as the request is not yet processed by server + // we set the remaining 10% when the request finished (server responded). + const progressBytes = bytes * 0.9 + chunkBytes += progressBytes + upload.uploaded += progressBytes + this.updateStats() + }, + onUploadRetry: () => { + // Current try failed, so reset the stats for this chunk + // meaning remove the uploaded chunk bytes from stats + upload.uploaded -= chunkBytes + chunkBytes = 0 + this.updateStats() + }, + headers: { + ...this._customHeaders, + 'X-OC-Mtime': Math.floor(file.lastModified / 1000), + 'OC-Total-Length': file.size, + 'Content-Type': 'application/octet-stream', + }, }, - retries, ) // Update upload progress on chunk completion - .then(() => { upload.uploaded = upload.uploaded + maxChunkSize }) + .then(() => { + // request fully done so we uploaded the full chunk + // we first remove the intermediate chunkBytes from progress events + // and then add the real full size + upload.uploaded += bufferEnd - bufferStart - chunkBytes + this.updateStats() + }) .catch((error) => { if (error?.response?.status === 507) { logger.error('Upload failed, not enough space on the server or quota exceeded. Cancelling the remaining chunks', { error, upload }) @@ -555,20 +579,27 @@ export class Uploader { upload.response = await uploadData( encodedDestinationFile, blob, - upload.signal, - (event) => { - upload.uploaded = upload.uploaded + event.bytes - this.updateStats() - }, - undefined, { - ...this._customHeaders, - 'X-OC-Mtime': Math.floor(file.lastModified / 1000), - 'Content-Type': file.type, + signal: upload.signal, + onUploadProgress: ({ bytes }) => { + // As this is only the sent bytes not the processed ones we only count 90%. + // When the upload is finished (server acknowledged the upload) the remaining 10% will be correctly set. + upload.uploaded += bytes * 0.9 + this.updateStats() + }, + onUploadRetry: () => { + upload.uploaded = 0 + this.updateStats() + }, + headers: { + ...this._customHeaders, + 'X-OC-Mtime': Math.floor(file.lastModified / 1000), + 'Content-Type': file.type, + }, }, ) - // Update progress + // Update progress - now we set the uploaded size to 100% of the file size upload.uploaded = upload.size this.updateStats() diff --git a/lib/utils/upload.ts b/lib/utils/upload.ts index c94251f2..b0b4ce9c 100644 --- a/lib/utils/upload.ts +++ b/lib/utils/upload.ts @@ -14,25 +14,40 @@ axiosRetry(axios, { retries: 0 }) type UploadData = Blob | (() => Promise) +interface UploadDataOptions { + /** The abort signal */ + signal: AbortSignal + /** Upload progress event callback */ + onUploadProgress?: (event: AxiosProgressEvent) => void + /** Request retry callback (e.g. network error of previous try) */ + onUploadRetry?: () => void + /** The final destination file (for chunked uploads) */ + destinationFile?: string + /** Additional headers */ + headers?: Record + /** Number of retries */ + retries?: number, +} + /** * Upload some data to a given path * @param url the url to upload to * @param uploadData the data to upload - * @param signal the abort signal - * @param onUploadProgress the progress callback - * @param destinationFile the final destination file (often used for chunked uploads) - * @param headers additional headers - * @param retries number of retries + * @param uploadOptions upload options */ -export const uploadData = async function( +export async function uploadData( url: string, uploadData: UploadData, - signal: AbortSignal, - onUploadProgress:(event: AxiosProgressEvent) => void = () => {}, - destinationFile: string | undefined = undefined, - headers: Record = {}, - retries: number = 5, + uploadOptions: UploadDataOptions, ): Promise { + const options = { + headers: {}, + onUploadProgress: () => {}, + onUploadRetry: () => {}, + retries: 5, + ...uploadOptions, + } + let data: Blob // If the upload data is a blob, we can directly use it @@ -44,25 +59,26 @@ export const uploadData = async function( } // Helps the server to know what to do with the file afterwards (e.g. chunked upload) - if (destinationFile) { - headers.Destination = destinationFile + if (options.destinationFile) { + options.headers.Destination = options.destinationFile } // If no content type is set, we default to octet-stream - if (!headers['Content-Type']) { - headers['Content-Type'] = 'application/octet-stream' + if (!options.headers['Content-Type']) { + options.headers['Content-Type'] = 'application/octet-stream' } return await axios.request({ method: 'PUT', url, data, - signal, - onUploadProgress, - headers, + signal: options.signal, + onUploadProgress: options.onUploadProgress, + headers: options.headers, 'axios-retry': { - retries, + retries: options.retries, retryDelay: (retryCount: number, error: AxiosError) => exponentialDelay(retryCount, error, 1000), + onRetry: options.onUploadRetry, }, }) }